Merge branch 'master' into compute_mop

# Conflicts:
#	cmake/CMakeLists.txt
This commit is contained in:
Axel Kohlmeyer 2018-09-06 21:13:42 -04:00
commit 947f574503
226 changed files with 30979 additions and 690 deletions
.github
cmake
doc/src
examples
COUPLE
README
USER

67
.github/CODEOWNERS vendored
View File

@ -17,6 +17,7 @@ src/GPU/* @ndtrung81
src/KOKKOS/* @stanmoore1
src/KIM/* @ellio167
src/LATTE/* @cnegre
src/MESSAGE/* @sjplimp
src/SPIN/* @julient31
src/USER-CGDNA/* @ohenrich
src/USER-CGSDK/* @akohlmey
@ -32,16 +33,82 @@ src/USER-PHONON/* @lingtikong
src/USER-OMP/* @akohlmey
src/USER-QMMM/* @akohlmey
src/USER-REAXC/* @hasanmetin
src/USER-SCAFACOS/* @rhalver
src/USER-TALLY/* @akohlmey
src/USER-UEF/* @danicholson
src/USER-VTK/* @rbberger
# individual files in packages
src/GPU/pair_vashishta_gpu.* @andeplane
src/KOKKOS/pair_vashishta_kokkos.* @andeplane
src/MANYBODY/pair_vashishta_table.* @andeplane
src/MANYBODY/pair_atm.* @sergeylishchuk
src/USER-MISC/fix_bond_react.* @jrgissing
src/USER-MISC/*_grem.* @dstelter92
src/USER-MISC/compute_stress_mop*.* @RomainVermorel
# core LAMMPS classes
src/lammps.* @sjplimp
src/pointers.h @sjplimp
src/atom.* @sjplimp
src/atom_vec.* @sjplimp
src/angle.* @sjplimp
src/bond.* @sjplimp
src/comm*.* @sjplimp
src/compute.* @sjplimp
src/dihedral.* @sjplimp
src/domain.* @sjplimp
src/dump*.* @sjplimp
src/error.* @sjplimp
src/finish.* @sjplimp
src/fix.* @sjplimp
src/force.* @sjplimp
src/group.* @sjplimp
src/improper.* @sjplimp
src/kspace.* @sjplimp
src/lmptyp.h @sjplimp
src/library.* @sjplimp
src/main.cpp @sjplimp
src/memory.* @sjplimp
src/modify.* @sjplimp
src/molecule.* @sjplimp
src/my_page.h @sjplimp
src/my_pool_chunk.h @sjplimp
src/npair*.* @sjplimp
src/ntopo*.* @sjplimp
src/nstencil*.* @sjplimp
src/neighbor.* @sjplimp
src/nbin*.* @sjplimp
src/neigh_*.* @sjplimp
src/output.* @sjplimp
src/pair.* @sjplimp
src/rcb.* @sjplimp
src/random_*.* @sjplimp
src/region*.* @sjplimp
src/rcb.* @sjplimp
src/read*.* @sjplimp
src/rerun.* @sjplimp
src/run.* @sjplimp
src/respa.* @sjplimp
src/set.* @sjplimp
src/special.* @sjplimp
src/suffix.h @sjplimp
src/thermo.* @sjplimp
src/universe.* @sjplimp
src/update.* @sjplimp
src/variable.* @sjplimp
src/verlet.* @sjplimp
src/velocity.* @sjplimp
src/write_data.* @sjplimp
src/write_restart.* @sjplimp
# overrides for specific files
src/dump_movie.* @akohlmey
src/exceptions.h @rbberger
src/fix_nh.* @athomps
src/info.* @akohlmey @rbberger
src/timer.* @akohlmey
# tools
tools/msi2lmp/* @akohlmey

View File

@ -136,6 +136,7 @@ if(BUILD_EXE)
if(LAMMPS_MACHINE)
set(LAMMPS_MACHINE "_${LAMMPS_MACHINE}")
endif()
set(LAMMPS_BINARY lmp${LAMMPS_MACHINE})
endif()
option(BUILD_LIB "Build LAMMPS library" OFF)
@ -162,6 +163,34 @@ set(LAMMPS_LINK_LIBS)
set(LAMMPS_DEPS)
set(LAMMPS_API_DEFINES)
set(DEFAULT_PACKAGES ASPHERE BODY CLASS2 COLLOID COMPRESS DIPOLE GRANULAR
KSPACE MANYBODY MC MEAM MESSAGE MISC MOLECULE PERI REAX REPLICA RIGID SHOCK SPIN SNAP
SRD KIM PYTHON MSCG MPIIO VORONOI POEMS LATTE USER-ATC USER-AWPMD USER-BOCS
USER-CGDNA USER-MESO USER-CGSDK USER-COLVARS USER-DIFFRACTION USER-DPD USER-DRUDE
USER-EFF USER-FEP USER-H5MD USER-LB USER-MANIFOLD USER-MEAMC USER-MGPT USER-MISC
USER-MOFFF USER-MOLFILE USER-NETCDF USER-PHONON USER-QTB USER-REAXC USER-SCAFACOS
USER-SMD USER-SMTBQ USER-SPH USER-TALLY USER-UEF USER-VTK USER-QUIP USER-QMMM)
set(ACCEL_PACKAGES USER-OMP KOKKOS OPT USER-INTEL GPU)
set(OTHER_PACKAGES CORESHELL QEQ)
foreach(PKG ${DEFAULT_PACKAGES})
option(PKG_${PKG} "Build ${PKG} Package" OFF)
endforeach()
foreach(PKG ${ACCEL_PACKAGES} ${OTHER_PACKAGES})
option(PKG_${PKG} "Build ${PKG} Package" OFF)
endforeach()
######################################################
# packages with special compiler needs or external libs
######################################################
if(PKG_REAX OR PKG_MEAM OR PKG_USER-QUIP OR PKG_USER-QMMM OR PKG_LATTE OR PKG_USER-SCAFACOS)
enable_language(Fortran)
endif()
if(PKG_MEAM OR PKG_USER-H5MD OR PKG_USER-QMMM OR PKG_USER-SCAFACOS)
enable_language(C)
endif()
# do MPI detection after language activation, if MPI for these language is required
find_package(MPI QUIET)
option(BUILD_MPI "Build MPI version" ${MPI_FOUND})
if(BUILD_MPI)
@ -206,25 +235,52 @@ endif()
option(CMAKE_VERBOSE_MAKEFILE "Verbose makefile" OFF)
option(ENABLE_TESTING "Enable testing" OFF)
if(ENABLE_TESTING)
if(ENABLE_TESTING AND BUILD_EXE)
enable_testing()
endif(ENABLE_TESTING)
option(LAMMPS_TESTING_SOURCE_DIR "Location of lammps-testing source directory" "")
option(LAMMPS_TESTING_GIT_TAG "Git tag of lammps-testing" "master")
mark_as_advanced(LAMMPS_TESTING_SOURCE_DIR LAMMPS_TESTING_GIT_TAG)
set(DEFAULT_PACKAGES ASPHERE BODY CLASS2 COLLOID COMPRESS DIPOLE GRANULAR
KSPACE MANYBODY MC MEAM MISC MOLECULE PERI REAX REPLICA RIGID SHOCK SPIN SNAP
SRD KIM PYTHON MSCG MPIIO VORONOI POEMS LATTE USER-ATC USER-AWPMD USER-BOCS
USER-CGDNA USER-MESO USER-CGSDK USER-COLVARS USER-DIFFRACTION USER-DPD USER-DRUDE
USER-EFF USER-FEP USER-H5MD USER-LB USER-MANIFOLD USER-MEAMC USER-MGPT USER-MISC
USER-MOFFF USER-MOLFILE USER-NETCDF USER-PHONON USER-QTB USER-REAXC
USER-SMD USER-SMTBQ USER-SPH USER-TALLY USER-UEF USER-VTK USER-QUIP USER-QMMM)
set(ACCEL_PACKAGES USER-OMP KOKKOS OPT USER-INTEL GPU)
set(OTHER_PACKAGES CORESHELL QEQ)
foreach(PKG ${DEFAULT_PACKAGES})
option(PKG_${PKG} "Build ${PKG} Package" OFF)
endforeach()
foreach(PKG ${ACCEL_PACKAGES} ${OTHER_PACKAGES})
option(PKG_${PKG} "Build ${PKG} Package" OFF)
endforeach()
if (CMAKE_VERSION VERSION_GREATER "3.10.3" AND NOT LAMMPS_TESTING_SOURCE_DIR)
include(FetchContent)
FetchContent_Declare(lammps-testing
GIT_REPOSITORY https://github.com/lammps/lammps-testing.git
GIT_TAG ${LAMMPS_TESTING_GIT_TAG}
)
FetchContent_GetProperties(lammps-testing)
if(NOT lammps-testing_POPULATED)
message(STATUS "Downloading tests...")
FetchContent_Populate(lammps-testing)
endif()
set(LAMMPS_TESTING_SOURCE_DIR ${lammps-testing_SOURCE_DIR})
elseif(NOT LAMMPS_TESTING_SOURCE_DIR)
message(WARNING "Full test-suite requires CMake >= 3.11 or copy of\n"
"https://github.com/lammps/lammps-testing in LAMMPS_TESTING_SOURCE_DIR")
endif()
if(EXISTS ${LAMMPS_TESTING_SOURCE_DIR})
message(STATUS "Running test discovery...")
file(GLOB_RECURSE TEST_SCRIPTS ${LAMMPS_TESTING_SOURCE_DIR}/tests/core/*/in.*)
foreach(script_path ${TEST_SCRIPTS})
get_filename_component(TEST_NAME ${script_path} EXT)
get_filename_component(SCRIPT_NAME ${script_path} NAME)
get_filename_component(PARENT_DIR ${script_path} DIRECTORY)
string(SUBSTRING ${TEST_NAME} 1 -1 TEST_NAME)
string(REPLACE "-" "_" TEST_NAME ${TEST_NAME})
string(REPLACE "+" "_" TEST_NAME ${TEST_NAME})
set(TEST_NAME "test_core_${TEST_NAME}_serial")
add_test(${TEST_NAME} ${CMAKE_BINARY_DIR}/${LAMMPS_BINARY} -in ${SCRIPT_NAME})
set_tests_properties(${TEST_NAME} PROPERTIES WORKING_DIRECTORY ${PARENT_DIR})
endforeach()
list(LENGTH TEST_SCRIPTS NUM_TESTS)
message(STATUS "Found ${NUM_TESTS} tests.")
endif()
endif()
macro(pkg_depends PKG1 PKG2)
if(PKG_${PKG1} AND NOT (PKG_${PKG2} OR BUILD_${PKG2}))
@ -238,17 +294,7 @@ pkg_depends(MPIIO MPI)
pkg_depends(USER-ATC MANYBODY)
pkg_depends(USER-LB MPI)
pkg_depends(USER-PHONON KSPACE)
######################################################
# packages with special compiler needs or external libs
######################################################
if(PKG_REAX OR PKG_MEAM OR PKG_USER-QUIP OR PKG_USER-QMMM OR PKG_LATTE)
enable_language(Fortran)
endif()
if(PKG_MEAM OR PKG_USER-H5MD OR PKG_USER-QMMM)
enable_language(C)
endif()
pkg_depends(USER-SCAFACOS MPI)
find_package(OpenMP QUIET)
option(BUILD_OMP "Build with OpenMP support" ${OpenMP_FOUND})
@ -426,6 +472,57 @@ if(PKG_LATTE)
list(APPEND LAMMPS_LINK_LIBS ${LATTE_LIBRARIES} ${LAPACK_LIBRARIES})
endif()
if(PKG_USER-SCAFACOS)
find_package(GSL REQUIRED)
option(DOWNLOAD_SCAFACOS "Download ScaFaCoS (instead of using the system's one)" OFF)
if(DOWNLOAD_SCAFACOS)
include(ExternalProject)
ExternalProject_Add(scafacos_build
URL https://github.com/scafacos/scafacos/releases/download/v1.0.1/scafacos-1.0.1.tar.gz
URL_MD5 bd46d74e3296bd8a444d731bb10c1738
CONFIGURE_COMMAND <SOURCE_DIR>/configure --prefix=<INSTALL_DIR>
--disable-doc
--enable-fcs-solvers=fmm,p2nfft,direct,ewald,p3m
--with-internal-fftw
--with-internal-pfft
--with-internal-pnfft
$<$<BOOL:${BUILD_SHARED_LIBS}>:--with-pic>
FC=${CMAKE_MPI_Fortran_COMPILER}
CXX=${CMAKE_MPI_CXX_COMPILER}
CC=${CMAKE_MPI_C_COMPILER}
F77=
)
ExternalProject_get_property(scafacos_build INSTALL_DIR)
set(SCAFACOS_BUILD_DIR ${INSTALL_DIR})
set(SCAFACOS_INCLUDE_DIRS ${SCAFACOS_BUILD_DIR}/include)
list(APPEND LAMMPS_DEPS scafacos_build)
# list and order from pkg_config file of ScaFaCoS build
list(APPEND LAMMPS_LINK_LIBS ${SCAFACOS_BUILD_DIR}/lib/libfcs.a)
list(APPEND LAMMPS_LINK_LIBS ${SCAFACOS_BUILD_DIR}/lib/libfcs_direct.a)
list(APPEND LAMMPS_LINK_LIBS ${SCAFACOS_BUILD_DIR}/lib/libfcs_ewald.a)
list(APPEND LAMMPS_LINK_LIBS ${SCAFACOS_BUILD_DIR}/lib/libfcs_fmm.a)
list(APPEND LAMMPS_LINK_LIBS ${SCAFACOS_BUILD_DIR}/lib/libfcs_p2nfft.a)
list(APPEND LAMMPS_LINK_LIBS ${SCAFACOS_BUILD_DIR}/lib/libfcs_p3m.a)
list(APPEND LAMMPS_LINK_LIBS ${GSL_LIBRARIES})
list(APPEND LAMMPS_LINK_LIBS ${SCAFACOS_BUILD_DIR}/lib/libfcs_near.a)
list(APPEND LAMMPS_LINK_LIBS ${SCAFACOS_BUILD_DIR}/lib/libfcs_gridsort.a)
list(APPEND LAMMPS_LINK_LIBS ${SCAFACOS_BUILD_DIR}/lib/libfcs_resort.a)
list(APPEND LAMMPS_LINK_LIBS ${SCAFACOS_BUILD_DIR}/lib/libfcs_redist.a)
list(APPEND LAMMPS_LINK_LIBS ${SCAFACOS_BUILD_DIR}/lib/libfcs_common.a)
list(APPEND LAMMPS_LINK_LIBS ${SCAFACOS_BUILD_DIR}/lib/libfcs_pnfft.a)
list(APPEND LAMMPS_LINK_LIBS ${SCAFACOS_BUILD_DIR}/lib/libfcs_pfft.a)
list(APPEND LAMMPS_LINK_LIBS ${SCAFACOS_BUILD_DIR}/lib/libfcs_fftw3_mpi.a)
list(APPEND LAMMPS_LINK_LIBS ${SCAFACOS_BUILD_DIR}/lib/libfcs_fftw3.a)
list(APPEND LAMMPS_LINK_LIBS ${MPI_Fortran_LIBRARIES})
list(APPEND LAMMPS_LINK_LIBS ${MPI_C_LIBRARIES})
else()
FIND_PACKAGE(PkgConfig REQUIRED)
PKG_CHECK_MODULES(SCAFACOS scafacos REQUIRED)
list(APPEND LAMMPS_LINK_LIBS ${SCAFACOS_LDFLAGS})
endif()
include_directories(${SCAFACOS_INCLUDE_DIRS})
endif()
if(PKG_USER-MOLFILE)
add_library(molfile INTERFACE)
target_include_directories(molfile INTERFACE ${LAMMPS_LIB_SOURCE_DIR}/molfile)
@ -435,8 +532,8 @@ endif()
if(PKG_USER-NETCDF)
find_package(NetCDF REQUIRED)
include_directories(NETCDF_INCLUDE_DIR)
list(APPEND LAMMPS_LINK_LIBS ${NETCDF_LIBRARY})
include_directories(${NETCDF_INCLUDE_DIRS})
list(APPEND LAMMPS_LINK_LIBS ${NETCDF_LIBRARIES})
add_definitions(-DLMP_HAS_NETCDF -DNC_64BIT_DATA=0x0020)
endif()
@ -504,6 +601,39 @@ if(PKG_KIM)
include_directories(${KIM_INCLUDE_DIRS})
endif()
if(PKG_MESSAGE)
option(MESSAGE_ZMQ "Use ZeroMQ in MESSAGE package" OFF)
file(GLOB_RECURSE cslib_SOURCES ${LAMMPS_LIB_SOURCE_DIR}/message/cslib/*.F
${LAMMPS_LIB_SOURCE_DIR}/message/cslib/*.c ${LAMMPS_LIB_SOURCE_DIR}/message/cslib/*.cpp)
if(BUILD_SHARED_LIBS)
add_library(cslib SHARED ${cslib_SOURCES})
else()
add_library(cslib STATIC ${cslib_SOURCES})
endif()
if(BUILD_MPI)
target_compile_definitions(cslib PRIVATE -DMPI_YES)
set_target_properties(cslib PROPERTIES OUTPUT_NAME "csmpi")
else()
target_compile_definitions(cslib PRIVATE -DMPI_NO)
set_target_properties(cslib PROPERTIES OUTPUT_NAME "csnompi")
endif()
if(MESSAGE_ZMQ)
target_compile_definitions(cslib PRIVATE -DZMQ_YES)
find_package(ZMQ REQUIRED)
target_include_directories(cslib PRIVATE ${ZMQ_INCLUDE_DIRS})
target_link_libraries(cslib PUBLIC ${ZMQ_LIBRARIES})
else()
target_compile_definitions(cslib PRIVATE -DZMQ_NO)
target_include_directories(cslib PRIVATE ${LAMMPS_LIB_SOURCE_DIR}/message/cslib/src/STUBS_ZMQ)
endif()
list(APPEND LAMMPS_LINK_LIBS cslib)
include_directories(${LAMMPS_LIB_SOURCE_DIR}/message/cslib/src)
endif()
if(PKG_MSCG)
find_package(GSL REQUIRED)
option(DOWNLOAD_MSCG "Download latte (instead of using the system's one)" OFF)
@ -700,6 +830,7 @@ if(PKG_USER-OMP)
set(USER-OMP_SOURCES_DIR ${LAMMPS_SOURCE_DIR}/USER-OMP)
set(USER-OMP_SOURCES ${USER-OMP_SOURCES_DIR}/thr_data.cpp
${USER-OMP_SOURCES_DIR}/thr_omp.cpp
${USER-OMP_SOURCES_DIR}/fix_omp.cpp
${USER-OMP_SOURCES_DIR}/fix_nh_omp.cpp
${USER-OMP_SOURCES_DIR}/fix_nh_sphere_omp.cpp
${USER-OMP_SOURCES_DIR}/domain_omp.cpp)
@ -708,7 +839,7 @@ if(PKG_USER-OMP)
# detects styles which have USER-OMP version
RegisterStylesExt(${USER-OMP_SOURCES_DIR} omp OMP_SOURCES)
RegisterFixStyle("${USER-OMP_SOURCES_DIR}/fix_omp.h")
get_property(USER-OMP_SOURCES GLOBAL PROPERTY OMP_SOURCES)
@ -1086,11 +1217,11 @@ if(BUILD_EXE)
endif()
endif()
set_target_properties(lmp PROPERTIES OUTPUT_NAME lmp${LAMMPS_MACHINE})
set_target_properties(lmp PROPERTIES OUTPUT_NAME ${LAMMPS_BINARY})
install(TARGETS lmp DESTINATION ${CMAKE_INSTALL_BINDIR})
install(FILES ${LAMMPS_DOC_DIR}/lammps.1 DESTINATION ${CMAKE_INSTALL_MANDIR}/man1 RENAME lmp${LAMMPS_MACHINE}.1)
install(FILES ${LAMMPS_DOC_DIR}/lammps.1 DESTINATION ${CMAKE_INSTALL_MANDIR}/man1 RENAME ${LAMMPS_BINARY}.1)
if(ENABLE_TESTING)
add_test(ShowHelp lmp${LAMMPS_MACHINE} -help)
add_test(ShowHelp ${LAMMPS_BINARY} -help)
endif()
endif()
@ -1200,7 +1331,7 @@ endif()
###############################################################################
# Print package summary
###############################################################################
foreach(PKG ${DEFAULT_PACKAGES} ${ACCEL_PACKAGES})
foreach(PKG ${DEFAULT_PACKAGES} ${ACCEL_PACKAGES} ${OTHER_PACKAGES})
if(PKG_${PKG})
message(STATUS "Building package: ${PKG}")
endif()

View File

@ -0,0 +1,8 @@
find_path(ZMQ_INCLUDE_DIR zmq.h)
find_library(ZMQ_LIBRARY NAMES zmq)
set(ZMQ_LIBRARIES ${ZMQ_LIBRARY})
set(ZMQ_INCLUDE_DIRS ${ZMQ_INCLUDE_DIR})
include(FindPackageHandleStandardArgs)
find_package_handle_standard_args(ZMQ DEFAULT_MSG ZMQ_LIBRARY ZMQ_INCLUDE_DIR)

View File

@ -85,6 +85,10 @@ function(RegisterNPairStyle path)
AddStyleHeader(${path} NPAIR)
endfunction(RegisterNPairStyle)
function(RegisterFixStyle path)
AddStyleHeader(${path} FIX)
endfunction(RegisterFixStyle)
function(RegisterStyles search_path)
FindStyleHeaders(${search_path} ANGLE_CLASS angle_ ANGLE ) # angle ) # force
FindStyleHeaders(${search_path} ATOM_CLASS atom_vec_ ATOM_VEC ) # atom ) # atom atom_vec_hybrid

View File

@ -31,6 +31,7 @@ This is the list of packages that may require additional steps.
"KOKKOS"_#kokkos,
"LATTE"_#latte,
"MEAM"_#meam,
"MESSAGE"_#message,
"MSCG"_#mscg,
"OPT"_#opt,
"POEMS"_#poems,
@ -47,6 +48,7 @@ This is the list of packages that may require additional steps.
"USER-OMP"_#user-omp,
"USER-QMMM"_#user-qmmm,
"USER-QUIP"_#user-quip,
"USER-SCAFACOS"_#user-scafacos,
"USER-SMD"_#user-smd,
"USER-VTK"_#user-vtk :tb(c=6,ea=c,a=l)
@ -361,6 +363,10 @@ make lib-meam args="-m mpi" # build with default Fortran compiler compatible
make lib-meam args="-m serial" # build with compiler compatible with "make serial" (GNU Fortran)
make lib-meam args="-m ifort" # build with Intel Fortran compiler using Makefile.ifort :pre
NOTE: You should test building the MEAM library with both the Intel
and GNU compilers to see if a simulation runs faster with one versus
the other on your system.
The build should produce two files: lib/meam/libmeam.a and
lib/meam/Makefile.lammps. The latter is copied from an existing
Makefile.lammps.* and has settings needed to link C++ (LAMMPS) with
@ -373,6 +379,35 @@ file.
:line
MESSAGE package :h4,link(message)
This package can optionally include support for messaging via sockets,
using the open-source "ZeroMQ library"_http://zeromq.org, which must
be installed on your system.
[CMake build]:
-D MESSAGE_ZMQ=value # build with ZeroMQ support, value = no (default) or yes
[Traditional make]:
Before building LAMMPS, you must build the CSlib library in
lib/message. You can build the CSlib library manually if you prefer;
follow the instructions in lib/message/README. You can also do it in
one step from the lammps/src dir, using a command like these, which
simply invoke the lib/message/Install.py script with the specified args:
make lib-message # print help message
make lib-message args="-m -z" # build with MPI and socket (ZMQ) support
make lib-message args="-s" # build as serial lib with no ZMQ support
The build should produce two files: lib/message/cslib/src/libmessage.a
and lib/message/Makefile.lammps. The latter is copied from an
existing Makefile.lammps.* and has settings to link with the ZeroMQ
library if requested in the build.
:line
MSCG package :h4,link(mscg)
To build with this package, you must download and build the MS-CG
@ -894,6 +929,45 @@ successfully build on your system.
:line
USER-SCAFACOS package :h4,link(user-scafacos)
To build with this package, you must download and build the "ScaFaCoS
Coulomb solver library"_scafacos_home
:link(scafacos_home,http://www.scafacos.de)
[CMake build]:
-D DOWNLOAD_SCAFACOS=value # download ScaFaCoS for build, value = no (default) or yes
-D SCAFACOS_LIBRARY=path # ScaFaCos library file (only needed if at custom location)
-D SCAFACOS_INCLUDE_DIR=path # ScaFaCoS include directory (only needed if at custom location) :pre
If DOWNLOAD_SCAFACOS is set, the ScaFaCoS library will be downloaded
and built inside the CMake build directory. If the ScaFaCoS library
is already on your system (in a location CMake cannot find it),
SCAFACOS_LIBRARY is the filename (plus path) of the ScaFaCoS library
file, not the directory the library file is in. SCAFACOS_INCLUDE_DIR
is the directory the ScaFaCoS include file is in.
[Traditional make]:
You can download and build the ScaFaCoS library manually if you
prefer; follow the instructions in lib/scafacos/README. You can also
do it in one step from the lammps/src dir, using a command like these,
which simply invoke the lib/scafacos/Install.py script with the
specified args:
make lib-scafacos # print help message
make lib-scafacos args="-b" # download and build in lib/scafacos/scafacos-<version>
make lib-scafacos args="-p $HOME/scafacos # use existing ScaFaCoS installation in $HOME/scafacos
Note that 2 symbolic (soft) links, "includelink" and "liblink", are
created in lib/scafacos to point to the ScaFaCoS src dir. When LAMMPS
builds in src it will use these links. You should not need to edit
the lib/scafacos/Makefile.lammps file.
:line
USER-SMD package :h4,link(user-smd)
To build with this package, you must download the Eigen3 library.

View File

@ -42,6 +42,7 @@ packages:
"KOKKOS"_Build_extras.html#kokkos,
"LATTE"_Build_extras.html#latte,
"MEAM"_Build_extras.html#meam,
"MESSAGE"_#Build_extras.html#message,
"MSCG"_Build_extras.html#mscg,
"OPT"_Build_extras.html#opt,
"POEMS"_Build_extras.html#poems,
@ -58,6 +59,7 @@ packages:
"USER-OMP"_Build_extras.html#user-omp,
"USER-QMMM"_Build_extras.html#user-qmmm,
"USER-QUIP"_Build_extras.html#user-quip,
"USER-SCAFACOS"_#Build_extras.html#user-scafacos,
"USER-SMD"_Build_extras.html#user-smd,
"USER-VTK"_Build_extras.html#user-vtk :tb(c=6,ea=c,a=l)

View File

@ -71,6 +71,7 @@ An alphabetic list of all LAMMPS commands.
"lattice"_lattice.html,
"log"_log.html,
"mass"_mass.html,
"message"_message.html,
"minimize"_minimize.html,
"min_modify"_min_modify.html,
"min_style"_min_style.html,
@ -103,6 +104,7 @@ An alphabetic list of all LAMMPS commands.
"restart"_restart.html,
"run"_run.html,
"run_style"_run_style.html,
"server"_server.html,
"set"_set.html,
"shell"_shell.html,
"special_bonds"_special_bonds.html,

View File

@ -33,4 +33,5 @@ OPT.
"pppm/disp (i)"_kspace_style.html,
"pppm/disp/tip4p"_kspace_style.html,
"pppm/stagger"_kspace_style.html,
"pppm/tip4p (o)"_kspace_style.html :tb(c=4,ea=c)
"pppm/tip4p (o)"_kspace_style.html,
"scafacos"_kspace_style.html :tb(c=4,ea=c)

View File

@ -54,6 +54,7 @@ General howto :h3
Howto_replica
Howto_library
Howto_couple
Howto_client_server
END_RST -->
@ -64,7 +65,8 @@ END_RST -->
"Run multiple simulations from one input script"_Howto_multiple.html
"Multi-replica simulations"_Howto_replica.html
"Library interface to LAMMPS"_Howto_library.html
"Couple LAMMPS to other codes"_Howto_couple.html :all(b)
"Couple LAMMPS to other codes"_Howto_couple.html
"Using LAMMPS in client/server mode"_Howto_client_server.html :all(b)
<!-- END_HTML_ONLY -->

View File

@ -0,0 +1,131 @@
"Higher level section"_Howto.html - "LAMMPS WWW Site"_lws - "LAMMPS
Documentation"_ld - "LAMMPS Commands"_lc :c
:link(lws,http://lammps.sandia.gov)
:link(ld,Manual.html)
:link(lc,Commands_all.html)
:line
Using LAMMPS in client/server mode
Client/server coupling of two codes is where one code is the "client"
and sends request messages to a "server" code. The server responds to
each request with a reply message. This enables the two codes to work
in tandem to perform a simulation. LAMMPS can act as either a client
or server code.
Some advantages of client/server coupling are that the two codes run
as stand-alone executables; they are not linked together. Thus
neither code needs to have a library interface. This often makes it
easier to run the two codes on different numbers of processors. If a
message protocol (format and content) is defined for a particular kind
of simulation, then in principle any code that implements the
client-side protocol can be used in tandem with any code that
implements the server-side protocol, without the two codes needing to
know anything more specific about each other.
A simple example of client/server coupling is where LAMMPS is the
client code performing MD timestepping. Each timestep it sends a
message to a server quantum code containing current coords of all the
atoms. The quantum code computes energy and forces based on the
coords. It returns them as a message to LAMMPS, which completes the
timestep.
Alternate methods for code coupling with LAMMPS are described on
the "Howto couple"_Howto_couple.html doc page.
LAMMPS support for client/server coupling is in its "MESSAGE
package"_Packages_details.html#PKG-MESSAGE which implements several
commands that enable LAMMPS to act as a client or server, as discussed
below. The MESSAGE package also wraps a client/server library called
CSlib which enables two codes to exchange messages in different ways,
either via files, sockets, or MPI. The CSlib is provided with LAMMPS
in the lib/message dir. The CSlib has its own
"website"_http://cslib.sandia.gov with documentation and test
programs.
NOTE: For client/server coupling to work between LAMMPS and another
code, the other code also has to use the CSlib. This can sometimes be
done without any modifications to the other code by simply wrapping it
with a Python script that exchanges CSlib messages with LAMMPS and
prepares input for or processes output from the other code. The other
code also has to implement a matching protocol for the format and
content of messages that LAMMPS exchanges with it.
These are the commands currently in the MESSAGE package for two
protocols, MD and MC (Monte Carlo). New protocols can easily be
defined and added to this directory, where LAMMPS acts as either the
client or server.
"message"_message.html
"fix client md"_fix_client_md.html = LAMMPS is a client for running MD
"server md"_server_md.html = LAMMPS is a server for computing MD forces
"server mc"_server_mc.html = LAMMPS is a server for computing a Monte Carlo energy
The server doc files give details of the message protocols
for data that is exchanged bewteen the client and server.
These example directories illustrate how to use LAMMPS as either a
client or server code:
examples/message
examples/COUPLE/README
examples/COUPLE/lammps_mc
examples/COUPLE/lammps_vasp :ul
The examples/message dir couples a client instance of LAMMPS to a
server instance of LAMMPS.
The lammps_mc dir shows how to couple LAMMPS as a server to a simple
Monte Carlo client code as the driver.
The lammps_vasp dir shows how to couple LAMMPS as a client code
running MD timestepping to VASP acting as a server providing quantum
DFT forces, thru a Python wrapper script on VASP.
Here is how to launch a client and server code together for any of the
4 modes of message exchange that the "message"_message.html command
and the CSlib support. Here LAMMPS is used as both the client and
server code. Another code could be subsitituted for either.
The examples below show launching both codes from the same window (or
batch script), using the "&" character to launch the first code in the
background. For all modes except {mpi/one}, you could also launch the
codes in separate windows on your desktop machine. It does not
matter whether you launch the client or server first.
In these examples either code can be run on one or more processors.
If running in a non-MPI mode (file or zmq) you can launch a code on a
single processor without using mpirun.
IMPORTANT: If you run in mpi/two mode, you must launch both codes via
mpirun, even if one or both of them runs on a single processor. This
is so that MPI can figure out how to connect both MPI processes
together to exchange MPI messages between them.
For message exchange in {file}, {zmq}, or {mpi/two} modes:
% mpirun -np 1 lmp_mpi -log log.client < in.client &
% mpirun -np 2 lmp_mpi -log log.server < in.server :pre
% mpirun -np 4 lmp_mpi -log log.client < in.client &
% mpirun -np 1 lmp_mpi -log log.server < in.server :pre
% mpirun -np 2 lmp_mpi -log log.client < in.client &
% mpirun -np 4 lmp_mpi -log log.server < in.server :pre
For message exchange in {mpi/one} mode:
Launch both codes in a single mpirun command:
mpirun -np 2 lmp_mpi -mpicolor 0 -in in.message.client -log log.client : -np 4 lmp_mpi -mpicolor 1 -in in.message.server -log log.server
The two -np values determine how many procs the client and the server
run on.
A LAMMPS executable run in this manner must use the -mpicolor color
command-line option as their its option, where color is an integer
label that will be used to distinguish one executable from another in
the multiple executables that the mpirun command launches. In this
example the client was colored with a 0, and the server with a 1.

View File

@ -16,10 +16,12 @@ atoms and pass those forces to LAMMPS. Or a continuum finite element
nodal points, compute a FE solution, and return interpolated forces on
MD atoms.
LAMMPS can be coupled to other codes in at least 3 ways. Each has
LAMMPS can be coupled to other codes in at least 4 ways. Each has
advantages and disadvantages, which you'll have to think about in the
context of your application.
:line
(1) Define a new "fix"_fix.html command that calls the other code. In
this scenario, LAMMPS is the driver code. During its timestepping,
the fix is invoked, and can make library calls to the other code,
@ -32,6 +34,8 @@ LAMMPS.
:link(poems,http://www.rpi.edu/~anderk5/lab)
:line
(2) Define a new LAMMPS command that calls the other code. This is
conceptually similar to method (1), but in this case LAMMPS and the
other code are on a more equal footing. Note that now the other code
@ -52,6 +56,8 @@ command writes and reads.
See the "Modify command"_Modify_command.html doc page for info on how
to add a new command to LAMMPS.
:line
(3) Use LAMMPS as a library called by another code. In this case the
other code is the driver and calls LAMMPS as needed. Or a wrapper
code could link and call both LAMMPS and another code as libraries.
@ -102,3 +108,9 @@ on all the processors. Or it might allocate half the processors to
LAMMPS and half to the other code and run both codes simultaneously
before syncing them up periodically. Or it might instantiate multiple
instances of LAMMPS to perform different calculations.
:line
(4) Couple LAMMPS with another code in a client/server mode. This is
described on the "Howto client/server"_Howto_client_server.html doc
page.

View File

@ -1,7 +1,7 @@
<!-- HTML_ONLY -->
<HEAD>
<TITLE>LAMMPS Users Manual</TITLE>
<META NAME="docnumber" CONTENT="31 Aug 2018 version">
<META NAME="docnumber" CONTENT="5 Sep 2018 version">
<META NAME="author" CONTENT="http://lammps.sandia.gov - Sandia National Laboratories">
<META NAME="copyright" CONTENT="Copyright (2003) Sandia Corporation. This software and manual is distributed under the GNU General Public License.">
</HEAD>
@ -21,7 +21,7 @@
:line
LAMMPS Documentation :c,h1
31 Aug 2018 version :c,h2
5 Sep 2018 version :c,h2
"What is a LAMMPS version?"_Manual_version.html

View File

@ -92,6 +92,7 @@ as contained in the file name.
"USER-QTB"_#PKG-USER-QTB,
"USER-QUIP"_#PKG-USER-QUIP,
"USER-REAXC"_#PKG-USER-REAXC,
"USER-SCAFACOS"_#USER-SCAFACOS,
"USER-SMD"_#PKG-USER-SMD,
"USER-SMTBQ"_#PKG-USER-SMTBQ,
"USER-SPH"_#PKG-USER-SPH,
@ -549,10 +550,6 @@ This package has "specific installation
instructions"_Build_extras.html#gpu on the "Build
extras"_Build_extras.html doc page.
NOTE: You should test building the MEAM library with both the Intel
and GNU compilers to see if a simulation runs faster with one versus
the other on your system.
[Supporting info:]
src/MEAM: filenames -> commands
@ -563,6 +560,31 @@ examples/meam :ul
:line
MESSAGE package :link(PKG-MESSAGE),h4
[Contents:]
Commands to use LAMMPS as either a client or server and couple it to
another application.
[Install:]
This package has "specific installation
instructions"_Build_extras.html#message on the "Build
extras"_Build_extras.html doc page.
[Supporting info:]
src/MESSAGE: filenames -> commands
lib/message/README
"message"_message.html
"fix client/md"_fix_client_md.html
"server md"_server_md.html
"server mc"_server_mc.html
examples/message :ul
:line
MISC package :link(PKG-MISC),h4
[Contents:]
@ -1838,6 +1860,41 @@ examples/reax :ul
:line
USER-SCAFACOS package :link(USER-SCAFACOS),h4
[Contents:]
A KSpace style which wraps the "ScaFaCoS Coulomb solver
library"_http://www.scafacos.de to compute long-range Coulombic
interactions.
To use this package you must have the ScaFaCoS library available on
your system.
[Author:] Rene Halver (JSC) wrote the scafacos LAMMPS command.
ScaFaCoS itself was developed by a consortium of German research
facilities with a BMBF (German Ministry of Science and Education)
funded project in 2009-2012. Participants of the consortium were the
Universities of Bonn, Chemnitz, Stuttgart, and Wuppertal as well as
the Forschungszentrum Juelich.
[Install:]
This package has "specific installation
instructions"_Build_extras.html#user-scafacos on the "Build
extras"_Build_extras.html doc page.
[Supporting info:]
src/USER-SCAFACOS: filenames -> commands
src/USER-SCAFACOS/README
"kspace_style scafacos"_kspace_style.html
"kspace_modify"_kspace_modify.html
examples/USER/scafacos :ul
:line
USER-SMD package :link(PKG-USER-SMD),h4
[Contents:]

View File

@ -47,6 +47,7 @@ Package, Description, Doc page, Example, Library
"MANYBODY"_Packages_details.html#PKG-MANYBODY, many-body potentials, "pair_style tersoff"_pair_tersoff.html, shear, no
"MC"_Packages_details.html#PKG-MC, Monte Carlo options, "fix gcmc"_fix_gcmc.html, n/a, no
"MEAM"_Packages_details.html#PKG-MEAM, modified EAM potential, "pair_style meam"_pair_meam.html, meam, int
"MESSAGE"_Packages_details.html#PKG-MESSAGE, client/server messaging, "message"_message.html, message, int
"MISC"_Packages_details.html#PKG-MISC, miscellaneous single-file commands, n/a, no, no
"MOLECULE"_Packages_details.html#PKG-MOLECULE, molecular system force fields, "Howto bioFF"_Howto_bioFF.html, peptide, no
"MPIIO"_Packages_details.html#PKG-MPIIO, MPI parallel I/O dump and restart, "dump"_dump.html, n/a, no

View File

@ -66,6 +66,7 @@ Package, Description, Doc page, Example, Library
"USER-QTB"_Packages_details.html#PKG-USER-QTB, quantum nuclear effects,"fix qtb"_fix_qtb.html "fix qbmsst"_fix_qbmsst.html, qtb, no
"USER-QUIP"_Packages_details.html#PKG-USER-QUIP, QUIP/libatoms interface,"pair_style quip"_pair_quip.html, USER/quip, ext
"USER-REAXC"_Packages_details.html#PKG-USER-REAXC, ReaxFF potential (C/C++) ,"pair_style reaxc"_pair_reaxc.html, reax, no
"USER-SCAFACOS"_Packages_details.html#PKG-USER-SCAFACOS, wrapper on ScaFaCoS solver,"kspace_style scafacos"_kspace_style.html, USER/scafacos, ext
"USER-SMD"_Packages_details.html#PKG-USER-SMD, smoothed Mach dynamics,"SMD User Guide"_PDF/SMD_LAMMPS_userguide.pdf, USER/smd, ext
"USER-SMTBQ"_Packages_details.html#PKG-USER-SMTBQ, second moment tight binding QEq potential,"pair_style smtbq"_pair_smtbq.html, USER/smtbq, no
"USER-SPH"_Packages_details.html#PKG-USER-SPH, smoothed particle hydrodynamics,"SPH User Guide"_PDF/SPH_LAMMPS_userguide.pdf, USER/sph, no

View File

@ -18,6 +18,7 @@ letter abbreviation can be used:
"-i or -in"_#file
"-k or -kokkos"_#run-kokkos
"-l or -log"_#log
"-m or -mpicolor"_#mpicolor
"-nc or -nocite"_#nocite
"-pk or -package"_#package
"-p or -partition"_#partition
@ -175,6 +176,30 @@ Option -plog will override the name of the partition log files file.N.
:line
[-mpicolor] color :link(mpi)
If used, this must be the first command-line argument after the LAMMPS
executable name. It is only used when LAMMPS is launched by an mpirun
command which also launches another executable(s) at the same time.
(The other executable could be LAMMPS as well.) The color is an
integer value which should be different for each executable (another
application may set this value in a different way). LAMMPS and the
other executable(s) perform an MPI_Comm_split() with their own colors
to shrink the MPI_COMM_WORLD communication to be the subset of
processors they are actually running on.
Currently, this is only used in LAMMPS to perform client/server
messaging with another application. LAMMPS can act as either a client
or server (or both). More details are given on the "Howto
client/server"_Howto_client_server.html doc page.
Specifically, this refers to the "mpi/one" mode of messaging provided
by the "message"_message.html command and the CSlib library LAMMPS
links with from the lib/message directory. See the
"message"_message.html command for more details.
:line
[-nocite] :link(nocite)
Disable writing the log.cite file which is normally written to list

View File

@ -106,6 +106,11 @@ modification to the input script is needed. Alternatively, one can run
with the KOKKOS package by editing the input script as described
below.
NOTE: When using a single OpenMP thread, the Kokkos Serial backend (i.e.
Makefile.kokkos_mpi_only) will give better performance than the OpenMP
backend (i.e. Makefile.kokkos_omp) because some of the overhead to make
the code thread-safe is removed.
NOTE: The default for the "package kokkos"_package.html command is to
use "full" neighbor lists and set the Newton flag to "off" for both
pairwise and bonded interactions. However, when running on CPUs, it
@ -122,6 +127,22 @@ mpirun -np 16 lmp_kokkos_mpi_only -k on -sf kk -pk kokkos newton on neigh half c
If the "newton"_newton.html command is used in the input
script, it can also override the Newton flag defaults.
For half neighbor lists and OpenMP, the KOKKOS package uses data
duplication (i.e. thread-private arrays) by default to avoid
thread-level write conflicts in the force arrays (and other data
structures as necessary). Data duplication is typically fastest for
small numbers of threads (i.e. 8 or less) but does increase memory
footprint and is not scalable to large numbers of threads. An
alternative to data duplication is to use thread-level atomics, which
don't require duplication. The use of atomics can be forced by compiling
with the "-DLMP_KOKKOS_USE_ATOMICS" compile switch. Most but not all
Kokkos-enabled pair_styles support data duplication. Alternatively, full
neighbor lists avoid the need for duplication or atomics but require
more compute operations per atom. When using the Kokkos Serial backend
or the OpenMP backend with a single thread, no duplication or atomics are
used. For CUDA and half neighbor lists, the KOKKOS package always uses
atomics.
[Core and Thread Affinity:]
When using multi-threading, it is important for performance to bind

View File

@ -56,6 +56,7 @@ Commands :h1
lattice
log
mass
message
min_modify
min_style
minimize
@ -87,6 +88,7 @@ Commands :h1
restart
run
run_style
server
set
shell
special_bonds

106
doc/src/fix_client_md.txt Normal file
View File

@ -0,0 +1,106 @@
"LAMMPS WWW Site"_lws - "LAMMPS Documentation"_ld - "LAMMPS Commands"_lc :c
:link(lws,http://lammps.sandia.gov)
:link(ld,Manual.html)
:link(lc,Commands_all.html)
:line
fix client/md command :h3
[Syntax:]
fix ID group-ID client/md :pre
ID, group-ID are documented in "fix"_fix.html command
client/md = style name of this fix command :ul
[Examples:]
fix 1 all client/md :pre
[Description:]
This fix style enables LAMMPS to run as a "client" code and
communicate each timestep with a separate "server" code to perform an
MD simulation together.
The "Howto client/server"_Howto_client_server.html doc page gives an
overview of client/server coupling of LAMMPS with another code where
one code is the "client" and sends request messages to a "server"
code. The server responds to each request with a reply message. This
enables the two codes to work in tandem to perform a simulation.
When using this fix, LAMMPS (as the client code) passes the current
coordinates of all particles to the server code each timestep, which
computes their interaction, and returns the energy, forces, and virial
for the interacting particles to LAMMPS, so it can complete the
timestep.
The server code could be a quantum code, or another classical MD code
which encodes a force field (pair_style in LAMMPS lingo) which LAMMPS
does not have. In the quantum case, this fix is a mechanism for
running {ab initio} MD with quantum forces.
The group associated with this fix is ignored.
The protocol and "units"_units.html for message format and content
that LAMMPS exchanges with the server code is defined on the "server
md"_server_md.html doc page.
Note that when using LAMMPS as an MD client, your LAMMPS input script
should not normally contain force field commands, like a
"pair_style"_doc/pair_style.html, "bond_style"_doc/bond_style.html, or
"kspace_style"_kspace_style.html commmand. However it is possible for
a server code to only compute a portion of the full force-field, while
LAMMPS computes the remaining part. Your LAMMPS script can also
specify boundary conditions or force constraints in the usual way,
which will be added to the per-atom forces returned by the server
code.
See the examples/message dir for example scripts where LAMMPS is both
the "client" and/or "server" code for this kind of client/server MD
simulation. The examples/message/README file explains how to launch
LAMMPS and another code in tandem to perform a coupled simulation.
:line
[Restart, fix_modify, output, run start/stop, minimize info:]
No information about this fix is written to "binary restart
files"_restart.html.
The "fix_modify"_fix_modify.html {energy} option is supported by this
fix to add the potential energy computed by the server application to
the system's potential energy as part of "thermodynamic
output"_thermo_style.html.
The "fix_modify"_fix_modify.html {virial} option is supported by this
fix to add the server application's contribution to the system's
virial as part of "thermodynamic output"_thermo_style.html. The
default is {virial yes}
This fix computes a global scalar which can be accessed by various
"output commands"_Howto_output.html. The scalar is the potential
energy discussed above. The scalar value calculated by this fix is
"extensive".
No parameter of this fix can be used with the {start/stop} keywords of
the "run"_run.html command. This fix is not invoked during "energy
minimization"_minimize.html.
[Restrictions:]
This fix is part of the MESSAGE package. It is only enabled if LAMMPS
was built with that package. See the "Build
package"_Build_package.html doc page for more info.
A script that uses this command must also use the
"message"_message.html command to setup the messaging protocol with
the other server code.
[Related commands:]
"message"_message.html, "server"_server.html
[Default:] none

View File

@ -13,47 +13,53 @@ kspace_modify command :h3
kspace_modify keyword value ... :pre
one or more keyword/value pairs may be listed :ulb,l
keyword = {mesh} or {order} or {order/disp} or {mix/disp} or {overlap} or {minorder} or {force} or {gewald} or {gewald/disp} or {slab} or (nozforce} or {compute} or {cutoff/adjust} or {fftbench} or {collective} or {diff} or {kmax/ewald} or {force/disp/real} or {force/disp/kspace} or {splittol} or {disp/auto}:l
{mesh} value = x y z
x,y,z = grid size in each dimension for long-range Coulombics
{mesh/disp} value = x y z
x,y,z = grid size in each dimension for 1/r^6 dispersion
{order} value = N
N = extent of Gaussian for PPPM or MSM mapping of charge to grid
{order/disp} value = N
N = extent of Gaussian for PPPM mapping of dispersion term to grid
{mix/disp} value = {pair} or {geom} or {none}
{overlap} = {yes} or {no} = whether the grid stencil for PPPM is allowed to overlap into more than the nearest-neighbor processor
{minorder} value = M
M = min allowed extent of Gaussian when auto-adjusting to minimize grid communication
keyword = {collective} or {compute} or {cutoff/adjust} or {diff} or {disp/auto} or {fftbench} or {force/disp/kspace} or {force/disp/real} or {force} or {gewald/disp} or {gewald} or {kmax/ewald} or {mesh} or {minorder} or {mix/disp} or {order/disp} or {order} or {overlap} or {scafacos} or {slab} or {splittol} :l
{collective} value = {yes} or {no}
{compute} value = {yes} or {no}
{cutoff/adjust} value = {yes} or {no}
{diff} value = {ad} or {ik} = 2 or 4 FFTs for PPPM in smoothed or non-smoothed mode
{disp/auto} value = yes or no
{fftbench} value = {yes} or {no}
{force/disp/real} value = accuracy (force units)
{force/disp/kspace} value = accuracy (force units)
{force} value = accuracy (force units)
{gewald} value = rinv (1/distance units)
rinv = G-ewald parameter for Coulombics
{gewald/disp} value = rinv (1/distance units)
rinv = G-ewald parameter for dispersion
{kmax/ewald} value = kx ky kz
kx,ky,kz = number of Ewald sum kspace vectors in each dimension
{mesh} value = x y z
x,y,z = grid size in each dimension for long-range Coulombics
{mesh/disp} value = x y z
x,y,z = grid size in each dimension for 1/r^6 dispersion
{minorder} value = M
M = min allowed extent of Gaussian when auto-adjusting to minimize grid communication
{mix/disp} value = {pair} or {geom} or {none}
{order} value = N
N = extent of Gaussian for PPPM or MSM mapping of charge to grid
{order/disp} value = N
N = extent of Gaussian for PPPM mapping of dispersion term to grid
{overlap} = {yes} or {no} = whether the grid stencil for PPPM is allowed to overlap into more than the nearest-neighbor processor
{pressure/scalar} value = {yes} or {no}
{scafacos} values = option value1 value2 ...
option = {tolerance}
value = {energy} or {energy_rel} or {field} or {field_rel} or {potential} or {potential_rel}
option = {fmm_tuning}
value = {0} or {1}
{slab} value = volfactor or {nozforce}
volfactor = ratio of the total extended volume used in the
2d approximation compared with the volume of the simulation domain
{nozforce} turns off kspace forces in the z direction
{compute} value = {yes} or {no}
{cutoff/adjust} value = {yes} or {no}
{pressure/scalar} value = {yes} or {no}
{fftbench} value = {yes} or {no}
{collective} value = {yes} or {no}
{diff} value = {ad} or {ik} = 2 or 4 FFTs for PPPM in smoothed or non-smoothed mode
{kmax/ewald} value = kx ky kz
kx,ky,kz = number of Ewald sum kspace vectors in each dimension
{force/disp/real} value = accuracy (force units)
{force/disp/kspace} value = accuracy (force units)
{splittol} value = tol
tol = relative size of two eigenvalues (see discussion below)
{disp/auto} value = yes or no :pre
tol = relative size of two eigenvalues (see discussion below) :pre
:ule
[Examples:]
kspace_modify mesh 24 24 30 order 6
kspace_modify slab 3.0 :pre
kspace_modify slab 3.0
kspace_modify scafacos tolerance energy :pre
[Description:]
@ -61,6 +67,132 @@ Set parameters used by the kspace solvers defined by the
"kspace_style"_kspace_style.html command. Not all parameters are
relevant to all kspace styles.
:line
The {collective} keyword applies only to PPPM. It is set to {no} by
default, except on IBM BlueGene machines. If this option is set to
{yes}, LAMMPS will use MPI collective operations to remap data for
3d-FFT operations instead of the default point-to-point communication.
This is faster on IBM BlueGene machines, and may also be faster on
other machines if they have an efficient implementation of MPI
collective operations and adequate hardware.
:line
The {compute} keyword allows Kspace computations to be turned off,
even though a "kspace_style"_kspace_style.html is defined. This is
not useful for running a real simulation, but can be useful for
debugging purposes or for computing only partial forces that do not
include the Kspace contribution. You can also do this by simply not
defining a "kspace_style"_kspace_style.html, but a Kspace-compatible
"pair_style"_pair_style.html requires a kspace style to be defined.
This keyword gives you that option.
:line
The {cutoff/adjust} keyword applies only to MSM. If this option is
turned on, the Coulombic cutoff will be automatically adjusted at the
beginning of the run to give the desired estimated error. Other
cutoffs such as LJ will not be affected. If the grid is not set using
the {mesh} command, this command will also attempt to use the optimal
grid that minimizes cost using an estimate given by
"(Hardy)"_#Hardy1. Note that this cost estimate is not exact, somewhat
experimental, and still may not yield the optimal parameters.
:line
The {diff} keyword specifies the differentiation scheme used by the
PPPM method to compute forces on particles given electrostatic
potentials on the PPPM mesh. The {ik} approach is the default for
PPPM and is the original formulation used in "(Hockney)"_#Hockney1. It
performs differentiation in Kspace, and uses 3 FFTs to transfer each
component of the computed fields back to real space for total of 4
FFTs per timestep.
The analytic differentiation {ad} approach uses only 1 FFT to transfer
information back to real space for a total of 2 FFTs per timestep. It
then performs analytic differentiation on the single quantity to
generate the 3 components of the electric field at each grid point.
This is sometimes referred to as "smoothed" PPPM. This approach
requires a somewhat larger PPPM mesh to achieve the same accuracy as
the {ik} method. Currently, only the {ik} method (default) can be
used for a triclinic simulation cell with PPPM. The {ad} method is
always used for MSM.
NOTE: Currently, not all PPPM styles support the {ad} option. Support
for those PPPM variants will be added later.
:line
The {disp/auto} option controls whether the pppm/disp is allowed to
generate PPPM parameters automatically. If set to {no}, parameters have
to be specified using the {gewald/disp}, {mesh/disp},
{force/disp/real} or {force/disp/kspace} keywords, or
the code will stop with an error message. When this option is set to
{yes}, the error message will not appear and the simulation will start.
For a typical application, using the automatic parameter generation
will provide simulations that are either inaccurate or slow. Using this
option is thus not recommended. For guidelines on how to obtain good
parameters, see the "How-To"_Section_howto.html#howto_24 discussion.
:line
The {fftbench} keyword applies only to PPPM. It is off by default. If
this option is turned on, LAMMPS will perform a short FFT benchmark
computation and report its timings, and will thus finish a some seconds
later than it would if this option were off.
:line
The {force/disp/real} and {force/disp/kspace} keywords set the force
accuracy for the real and space computations for the dispersion part
of pppm/disp. As shown in "(Isele-Holder)"_#Isele-Holder1, optimal
performance and accuracy in the results is obtained when these values
are different.
:line
The {force} keyword overrides the relative accuracy parameter set by
the "kspace_style"_kspace_style.html command with an absolute
accuracy. The accuracy determines the RMS error in per-atom forces
calculated by the long-range solver and is thus specified in force
units. A negative value for the accuracy setting means to use the
relative accuracy parameter. The accuracy setting is used in
conjunction with the pairwise cutoff to determine the number of
K-space vectors for style {ewald}, the FFT grid size for style
{pppm}, or the real space grid size for style {msm}.
:line
The {gewald} keyword sets the value of the Ewald or PPPM G-ewald
parameter for charge as {rinv} in reciprocal distance units. Without
this setting, LAMMPS chooses the parameter automatically as a function
of cutoff, precision, grid spacing, etc. This means it can vary from
one simulation to the next which may not be desirable for matching a
KSpace solver to a pre-tabulated pairwise potential. This setting can
also be useful if Ewald or PPPM fails to choose a good grid spacing
and G-ewald parameter automatically. If the value is set to 0.0,
LAMMPS will choose the G-ewald parameter automatically. MSM does not
use the {gewald} parameter.
:line
The {gewald/disp} keyword sets the value of the Ewald or PPPM G-ewald
parameter for dispersion as {rinv} in reciprocal distance units. It
has the same meaning as the {gewald} setting for Coulombics.
:line
The {kmax/ewald} keyword sets the number of kspace vectors in each
dimension for kspace style {ewald}. The three values must be positive
integers, or else (0,0,0), which unsets the option. When this option
is not set, the Ewald sum scheme chooses its own kspace vectors,
consistent with the user-specified accuracy and pairwise cutoff. In
any case, if kspace style {ewald} is invoked, the values used are
printed to the screen and the log file at the start of the run.
:line
The {mesh} keyword sets the grid size for kspace style {pppm} or
{msm}. In the case of PPPM, this is the FFT mesh, and each dimension
must be factorizable into powers of 2, 3, and 5. In the case of MSM,
@ -70,6 +202,8 @@ or MSM solver chooses its own grid size, consistent with the
user-specified accuracy and pairwise cutoff. Values for x,y,z of
0,0,0 unset the option.
:line
The {mesh/disp} keyword sets the grid size for kspace style
{pppm/disp}. This is the FFT mesh for long-range dispersion and ach
dimension must be factorizable into powers of 2, 3, and 5. When this
@ -77,39 +211,7 @@ option is not set, the PPPM solver chooses its own grid size,
consistent with the user-specified accuracy and pairwise cutoff.
Values for x,y,z of 0,0,0 unset the option.
The {order} keyword determines how many grid spacings an atom's charge
extends when it is mapped to the grid in kspace style {pppm} or {msm}.
The default for this parameter is 5 for PPPM and 8 for MSM, which
means each charge spans 5 or 8 grid cells in each dimension,
respectively. For the LAMMPS implementation of MSM, the order can
range from 4 to 10 and must be even. For PPPM, the minimum allowed
setting is 2 and the maximum allowed setting is 7. The larger the
value of this parameter, the smaller that LAMMPS will set the grid
size, to achieve the requested accuracy. Conversely, the smaller the
order value, the larger the grid size will be. Note that there is an
inherent trade-off involved: a small grid will lower the cost of FFTs
or MSM direct sum, but a larger order parameter will increase the cost
of interpolating charge/fields to/from the grid.
The {order/disp} keyword determines how many grid spacings an atom's
dispersion term extends when it is mapped to the grid in kspace style
{pppm/disp}. It has the same meaning as the {order} setting for
Coulombics.
The {overlap} keyword can be used in conjunction with the {minorder}
keyword with the PPPM styles to adjust the amount of communication
that occurs when values on the FFT grid are exchanged between
processors. This communication is distinct from the communication
inherent in the parallel FFTs themselves, and is required because
processors interpolate charge and field values using grid point values
owned by neighboring processors (i.e. ghost point communication). If
the {overlap} keyword is set to {yes} then this communication is
allowed to extend beyond nearest-neighbor processors, e.g. when using
lots of processors on a small problem. If it is set to {no} then the
communication will be limited to nearest-neighbor processors and the
{order} setting will be reduced if necessary, as explained by the
{minorder} keyword discussion. The {overlap} keyword is always set to
{yes} in MSM.
:line
The {minorder} keyword allows LAMMPS to reduce the {order} setting if
necessary to keep the communication of ghost grid point limited to
@ -126,6 +228,42 @@ error if the grid communication is non-nearest-neighbor and {overlap}
is set to {no}. The {minorder} keyword is not currently supported in
MSM.
:line
The {mix/disp} keyword selects the mixing rule for the dispersion
coefficients. With {pair}, the dispersion coefficients of unlike
types are computed as indicated with "pair_modify"_pair_modify.html.
With {geom}, geometric mixing is enforced on the dispersion
coefficients in the kspace coefficients. When using the arithmetic
mixing rule, this will speed-up the simulations but introduces some
error in the force computations, as shown in "(Wennberg)"_#Wennberg.
With {none}, it is assumed that no mixing rule is
applicable. Splitting of the dispersion coefficients will be performed
as described in "(Isele-Holder)"_#Isele-Holder1.
This splitting can be influenced with the {splittol} keywords. Only
the eigenvalues that are larger than tol compared to the largest
eigenvalues are included. Using this keywords the original matrix of
dispersion coefficients is approximated. This leads to faster
computations, but the accuracy in the reciprocal space computations of
the dispersion part is decreased.
:line
The {order} keyword determines how many grid spacings an atom's charge
extends when it is mapped to the grid in kspace style {pppm} or {msm}.
The default for this parameter is 5 for PPPM and 8 for MSM, which
means each charge spans 5 or 8 grid cells in each dimension,
respectively. For the LAMMPS implementation of MSM, the order can
range from 4 to 10 and must be even. For PPPM, the minimum allowed
setting is 2 and the maximum allowed setting is 7. The larger the
value of this parameter, the smaller that LAMMPS will set the grid
size, to achieve the requested accuracy. Conversely, the smaller the
order value, the larger the grid size will be. Note that there is an
inherent trade-off involved: a small grid will lower the cost of FFTs
or MSM direct sum, but a larger order parameter will increase the cost
of interpolating charge/fields to/from the grid.
The PPPM order parameter may be reset by LAMMPS when it sets up the
FFT grid if the implied grid stencil extends beyond the grid cells
owned by neighboring processors. Typically this will only occur when
@ -134,30 +272,102 @@ be generated indicating the order parameter is being reduced to allow
LAMMPS to run the problem. Automatic adjustment of the order parameter
is not supported in MSM.
The {force} keyword overrides the relative accuracy parameter set by
the "kspace_style"_kspace_style.html command with an absolute
accuracy. The accuracy determines the RMS error in per-atom forces
calculated by the long-range solver and is thus specified in force
units. A negative value for the accuracy setting means to use the
relative accuracy parameter. The accuracy setting is used in
conjunction with the pairwise cutoff to determine the number of
K-space vectors for style {ewald}, the FFT grid size for style
{pppm}, or the real space grid size for style {msm}.
:line
The {gewald} keyword sets the value of the Ewald or PPPM G-ewald
parameter for charge as {rinv} in reciprocal distance units. Without
this setting, LAMMPS chooses the parameter automatically as a function
of cutoff, precision, grid spacing, etc. This means it can vary from
one simulation to the next which may not be desirable for matching a
KSpace solver to a pre-tabulated pairwise potential. This setting can
also be useful if Ewald or PPPM fails to choose a good grid spacing
and G-ewald parameter automatically. If the value is set to 0.0,
LAMMPS will choose the G-ewald parameter automatically. MSM does not
use the {gewald} parameter.
The {order/disp} keyword determines how many grid spacings an atom's
dispersion term extends when it is mapped to the grid in kspace style
{pppm/disp}. It has the same meaning as the {order} setting for
Coulombics.
The {gewald/disp} keyword sets the value of the Ewald or PPPM G-ewald
parameter for dispersion as {rinv} in reciprocal distance units. It
has the same meaning as the {gewald} setting for Coulombics.
:line
The {overlap} keyword can be used in conjunction with the {minorder}
keyword with the PPPM styles to adjust the amount of communication
that occurs when values on the FFT grid are exchanged between
processors. This communication is distinct from the communication
inherent in the parallel FFTs themselves, and is required because
processors interpolate charge and field values using grid point values
owned by neighboring processors (i.e. ghost point communication). If
the {overlap} keyword is set to {yes} then this communication is
allowed to extend beyond nearest-neighbor processors, e.g. when using
lots of processors on a small problem. If it is set to {no} then the
communication will be limited to nearest-neighbor processors and the
{order} setting will be reduced if necessary, as explained by the
{minorder} keyword discussion. The {overlap} keyword is always set to
{yes} in MSM.
:line
The {pressure/scalar} keyword applies only to MSM. If this option is
turned on, only the scalar pressure (i.e. (Pxx + Pyy + Pzz)/3.0) will
be computed, which can be used, for example, to run an isotropic barostat.
Computing the full pressure tensor with MSM is expensive, and this option
provides a faster alternative. The scalar pressure is computed using a
relationship between the Coulombic energy and pressure "(Hummer)"_#Hummer
instead of using the virial equation. This option cannot be used to access
individual components of the pressure tensor, to compute per-atom virial,
or with suffix kspace/pair styles of MSM, like OMP or GPU.
:line
The {scafacos} keyword is used for settings that are passed to the
ScaFaCoS library when using "kspace_style scafacos"_kspace_style.html.
The {tolerance} option affects how the {accuracy} specified with the
"kspace_style"_kspace_style.html command is interpreted by ScaFaCoS.
The following values may be used:
energy = absolute accuracy in total Coulomic energy
energy_rel = relative accuracy in total Coulomic energy
potential = absolute accuracy in total Coulomic potential
potential_rel = relative accuracy in total Coulomic potential
field = absolute accuracy in electric field
field_rel = relative accuracy in electric field :ul
The values with suffix _rel indicate the tolerance is a relative
tolerance; the other values impose an absolute tolerance on the given
quantity. Absoulte tolerance in this case means, that for a given
quantity q and a given absolute tolerance of t_a the result should
be between q-t_a and q+t_a. For a relative tolerance t_r the relative
error should not be greater than t_r, i.e. abs(1 - (result/q)) < t_r.
As a consequence of this, the tolerance type should be checked, when
performing computations with a high absolute field / energy. E.g.
if the total energy in the system is 1000000.0 an absolute tolerance
of 1e-3 would mean that the result has to be between 999999.999 and
1000000.001, which would be equivalent to a relative tolerance of
1e-9.
The energy and energy_rel values, set a tolerance based on the total
Coulomic energy of the system. The potential and potential_rel set a
tolerance based on the per-atom Coulomic energy. The field and
field_rel tolerance types set a tolerance based on the electric field
values computed by ScaFaCoS. Since per-atom forces are derived from
the per-atom electric field, this effectively sets a tolerance on the
forces, simimlar to other LAMMPS KSpace styles, as explained on the
"kspace_style"_kspace_style.html doc page.
Note that not all ScaFaCoS solvers support all tolerance types.
These are the allowed values for each method:
fmm = energy and energy_rel
p2nfft = field (1d-,2d-,3d-periodic systems) or potential (0d-periodic)
p3m = field
ewald = field
direct = has no tolerance tuning :ul
If the tolerance type is not changed, the default values for the
tolerance type are the first values in the above list, e.g. energy
is the default tolerance type for the fmm solver.
The {fmm_tuning} option is only relevant when using the FMM method.
It activates (value=1) or deactivates (value=0) an internal tuning
mechanism for the FMM solver. The tuning operation runs sequentially
and can be very time-consuming. Usually it is not needed for systems
with a homogenous charge distribution. The default for this option is
therefore {0}. The FMM internal tuning is performed once, when the
solver is set up.
:line
The {slab} keyword allows an Ewald or PPPM solver to be used for a
systems that are periodic in x,y but non-periodic in z - a
@ -191,92 +401,7 @@ the "fix efield"_fix_efield.html command, it will not give the correct
dielectric constant due to the Yeh/Berkowitz "(Yeh)"_#Yeh correction
not being compatible with how "fix efield"_fix_efield.html works.
The {compute} keyword allows Kspace computations to be turned off,
even though a "kspace_style"_kspace_style.html is defined. This is
not useful for running a real simulation, but can be useful for
debugging purposes or for computing only partial forces that do not
include the Kspace contribution. You can also do this by simply not
defining a "kspace_style"_kspace_style.html, but a Kspace-compatible
"pair_style"_pair_style.html requires a kspace style to be defined.
This keyword gives you that option.
The {cutoff/adjust} keyword applies only to MSM. If this option is
turned on, the Coulombic cutoff will be automatically adjusted at the
beginning of the run to give the desired estimated error. Other
cutoffs such as LJ will not be affected. If the grid is not set using
the {mesh} command, this command will also attempt to use the optimal
grid that minimizes cost using an estimate given by
"(Hardy)"_#Hardy1. Note that this cost estimate is not exact, somewhat
experimental, and still may not yield the optimal parameters.
The {pressure/scalar} keyword applies only to MSM. If this option is
turned on, only the scalar pressure (i.e. (Pxx + Pyy + Pzz)/3.0) will
be computed, which can be used, for example, to run an isotropic barostat.
Computing the full pressure tensor with MSM is expensive, and this option
provides a faster alternative. The scalar pressure is computed using a
relationship between the Coulombic energy and pressure "(Hummer)"_#Hummer
instead of using the virial equation. This option cannot be used to access
individual components of the pressure tensor, to compute per-atom virial,
or with suffix kspace/pair styles of MSM, like OMP or GPU.
The {fftbench} keyword applies only to PPPM. It is off by default. If
this option is turned on, LAMMPS will perform a short FFT benchmark
computation and report its timings, and will thus finish a some seconds
later than it would if this option were off.
The {collective} keyword applies only to PPPM. It is set to {no} by
default, except on IBM BlueGene machines. If this option is set to
{yes}, LAMMPS will use MPI collective operations to remap data for
3d-FFT operations instead of the default point-to-point communication.
This is faster on IBM BlueGene machines, and may also be faster on
other machines if they have an efficient implementation of MPI
collective operations and adequate hardware.
The {diff} keyword specifies the differentiation scheme used by the
PPPM method to compute forces on particles given electrostatic
potentials on the PPPM mesh. The {ik} approach is the default for
PPPM and is the original formulation used in "(Hockney)"_#Hockney1. It
performs differentiation in Kspace, and uses 3 FFTs to transfer each
component of the computed fields back to real space for total of 4
FFTs per timestep.
The analytic differentiation {ad} approach uses only 1 FFT to transfer
information back to real space for a total of 2 FFTs per timestep. It
then performs analytic differentiation on the single quantity to
generate the 3 components of the electric field at each grid point.
This is sometimes referred to as "smoothed" PPPM. This approach
requires a somewhat larger PPPM mesh to achieve the same accuracy as
the {ik} method. Currently, only the {ik} method (default) can be
used for a triclinic simulation cell with PPPM. The {ad} method is
always used for MSM.
NOTE: Currently, not all PPPM styles support the {ad} option. Support
for those PPPM variants will be added later.
The {kmax/ewald} keyword sets the number of kspace vectors in each
dimension for kspace style {ewald}. The three values must be positive
integers, or else (0,0,0), which unsets the option. When this option
is not set, the Ewald sum scheme chooses its own kspace vectors,
consistent with the user-specified accuracy and pairwise cutoff. In
any case, if kspace style {ewald} is invoked, the values used are
printed to the screen and the log file at the start of the run.
With the {mix/disp} keyword one can select the mixing rule for the
dispersion coefficients. With {pair}, the dispersion coefficients of
unlike types are computed as indicated with
"pair_modify"_pair_modify.html. With {geom}, geometric mixing is
enforced on the dispersion coefficients in the kspace
coefficients. When using the arithmetic mixing rule, this will
speed-up the simulations but introduces some error in the force
computations, as shown in "(Wennberg)"_#Wennberg. With {none}, it is
assumed that no mixing rule is applicable. Splitting of the dispersion
coefficients will be performed as described in
"(Isele-Holder)"_#Isele-Holder1. This splitting can be influenced with
the {splittol} keywords. Only the eigenvalues that are larger than tol
compared to the largest eigenvalues are included. Using this keywords
the original matrix of dispersion coefficients is approximated. This
leads to faster computations, but the accuracy in the reciprocal space
computations of the dispersion part is decreased.
:line
The {force/disp/real} and {force/disp/kspace} keywords set the force
accuracy for the real and space computations for the dispersion part
@ -295,6 +420,8 @@ provide simulations that are either inaccurate or slow. Using this
option is thus not recommended. For guidelines on how to obtain good
parameters, see the "Howto dispersion"_Howto_dispersion.html doc page.
:line
[Restrictions:] none
[Related commands:]
@ -306,10 +433,12 @@ parameters, see the "Howto dispersion"_Howto_dispersion.html doc page.
The option defaults are mesh = mesh/disp = 0 0 0, order = order/disp =
5 (PPPM), order = 10 (MSM), minorder = 2, overlap = yes, force = -1.0,
gewald = gewald/disp = 0.0, slab = 1.0, compute = yes, cutoff/adjust =
yes (MSM), pressure/scalar = yes (MSM), fftbench = no (PPPM), diff = ik
(PPPM), mix/disp = pair, force/disp/real = -1.0, force/disp/kspace = -1.0,
split = 0, tol = 1.0e-6, and disp/auto = no. For pppm/intel, order =
order/disp = 7.
yes (MSM), pressure/scalar = yes (MSM), fftbench = no (PPPM), diff =
ik (PPPM), mix/disp = pair, force/disp/real = -1.0, force/disp/kspace
= -1.0, split = 0, tol = 1.0e-6, and disp/auto = no. For pppm/intel,
order = order/disp = 7. For scafacos settings, the scafacos tolerance
option depends on the method chosen, as documented above. The
scafacos fmm_tuning default = 0.
:line

View File

@ -12,7 +12,7 @@ kspace_style command :h3
kspace_style style value :pre
style = {none} or {ewald} or {ewald/disp} or {ewald/omp} or {pppm} or {pppm/cg} or {pppm/disp} or {pppm/tip4p} or {pppm/stagger} or {pppm/disp/tip4p} or {pppm/gpu} or {pppm/kk} or {pppm/omp} or {pppm/cg/omp} or {pppm/tip4p/omp} or {msm} or {msm/cg} or {msm/omp} or {msm/cg/omp} :ulb,l
style = {none} or {ewald} or {ewald/disp} or {ewald/omp} or {pppm} or {pppm/cg} or {pppm/disp} or {pppm/tip4p} or {pppm/stagger} or {pppm/disp/tip4p} or {pppm/gpu} or {pppm/kk} or {pppm/omp} or {pppm/cg/omp} or {pppm/tip4p/omp} or {msm} or {msm/cg} or {msm/omp} or {msm/cg/omp} or {scafacos} :ulb,l
{none} value = none
{ewald} value = accuracy
accuracy = desired relative error in forces
@ -22,7 +22,7 @@ style = {none} or {ewald} or {ewald/disp} or {ewald/omp} or {pppm} or {pppm/cg}
accuracy = desired relative error in forces
{pppm} value = accuracy
accuracy = desired relative error in forces
{pppm/cg} value = accuracy (smallq)
{pppm/cg} values = accuracy (smallq)
accuracy = desired relative error in forces
smallq = cutoff for charges to be considered (optional) (charge units)
{pppm/disp} value = accuracy
@ -56,7 +56,10 @@ style = {none} or {ewald} or {ewald/disp} or {ewald/omp} or {pppm} or {pppm/cg}
accuracy = desired relative error in forces
{msm/cg/omp} value = accuracy (smallq)
accuracy = desired relative error in forces
smallq = cutoff for charges to be considered (optional) (charge units) :pre
smallq = cutoff for charges to be considered (optional) (charge units)
{scafacos} values = method accuracy
method = fmm or p2nfft or ewald or direct
accuracy = desired relative error in forces :pre
:ule
[Examples:]
@ -64,6 +67,7 @@ style = {none} or {ewald} or {ewald/disp} or {ewald/omp} or {pppm} or {pppm/cg}
kspace_style pppm 1.0e-4
kspace_style pppm/cg 1.0e-5 1.0e-6
kspace style msm 1.0e-4
kspace style scafacos fmm 1.0e-4
kspace_style none :pre
[Description:]
@ -211,6 +215,63 @@ pressure simulation with MSM will cause the code to run slower.
:line
The {scafacos} style is a wrapper on the "ScaFaCoS Coulomb solver
library"_http://www.scafacos.de which provides a variety of solver
methods which can be used with LAMMPS. The paper by "(Who)"_#Who2012
gives an overview of ScaFaCoS.
ScaFaCoS was developed by a consortium of German research facilities
with a BMBF (German Ministry of Science and Education) funded project
in 2009-2012. Participants of the consortium were the Universities of
Bonn, Chemnitz, Stuttgart, and Wuppertal as well as the
Forschungszentrum Juelich.
The library is available for download at "http://scafacos.de" or can
be cloned from the git-repository
"git://github.com/scafacos/scafacos.git".
In order to use this KSpace style, you must download and build the
ScaFaCoS library, then build LAMMPS with the USER-SCAFACOS package
installed package which links LAMMPS to the ScaFaCoS library.
See details on "this page"_Section_packages.html#USER-SCAFACOS.
NOTE: Unlike other KSpace solvers in LAMMPS, ScaFaCoS computes all
Coulombic interactions, both short- and long-range. Thus you should
NOT use a Coulmbic pair style when using kspace_style scafacos. This
also means the total Coulombic energy (short- and long-range) will be
tallied for "thermodynamic output"_thermo_style.html command as part
of the {elong} keyword; the {ecoul} keyword will be zero.
NOTE: See the current restriction below about use of ScaFaCoS in
LAMMPS with molecular charged systems or the TIP4P water model.
The specified {method} determines which ScaFaCoS algorithm is used.
These are the ScaFaCoS methods currently available from LAMMPS:
{fmm} = Fast Multi-Pole method
{p2nfft} = FFT-based Coulomb solver
{ewald} = Ewald summation
{direct} = direct O(N^2) summation
{p3m} = PPPM :ul
We plan to support additional ScaFaCoS solvers from LAMMPS in the
future. For an overview of the included solvers, refer to
"(Sutmann)"_#Sutmann2013
The specified {accuracy} is similar to the accuracy setting for other
LAMMPS KSpace styles, but is passed to ScaFaCoS, which can interpret
it in different ways for different methods it supports. Within the
ScaFaCoS library the {accuracy} is treated as a tolerance level
(either absolute or relative) for the chosen quantity, where the
quantity can be either the Columic field values, the per-atom Columic
energy or the total Columic energy. To select from these options, see
the "kspace_modify scafacos accuracy"_kspace_modify.html doc page.
The "kspace_modify scafacos"_kspace_modify.html command also explains
other ScaFaCoS options currently exposed to LAMMPS.
:line
The specified {accuracy} determines the relative RMS error in per-atom
forces calculated by the long-range solver. It is set as a
dimensionless number, relative to the force that two unit point
@ -321,12 +382,24 @@ dimensions. The only exception is if the slab option is set with
"kspace_modify"_kspace_modify.html, in which case the xy dimensions
must be periodic and the z dimension must be non-periodic.
The scafacos KSpace style will only be enabled if LAMMPS is built with
the USER-SCAFACOS package. See the "Making
LAMMPS"_Section_start.html#start_3 section for more info.
The use of ScaFaCos in LAMMPS does not yet support molecular charged
systems where the short-range Coulombic interactions between atoms in
the same bond/angle/dihedral are weighted by the
"special_bonds"_special_bonds.html command. Likewise it does not
support the "TIP4P water style" where a fictitious charge site is
introduced in each water molecule.
[Related commands:]
"kspace_modify"_kspace_modify.html, "pair_style
lj/cut/coul/long"_pair_lj.html, "pair_style
lj/charmm/coul/long"_pair_charmm.html, "pair_style
lj/long/coul/long"_pair_lj_long.html, "pair_style buck/coul/long"_pair_buck.html
lj/long/coul/long"_pair_lj_long.html, "pair_style
buck/coul/long"_pair_buck.html
[Default:]
@ -384,5 +457,12 @@ Evaluation of Forces for the Simulation of Biomolecules, University of
Illinois at Urbana-Champaign, (2006).
:link(Hardy2009)
[(Hardy2)] Hardy, Stone, Schulten, Parallel Computing 35 (2009)
164-177.
[(Hardy2)] Hardy, Stone, Schulten, Parallel Computing, 35, 164-177
(2009).
:link(Sutmann2013)
[(Sutmann)] Sutmann, Arnold, Fahrenberger, et. al., Physical review / E 88(6), 063308 (2013)
:link(Who2012)
[(Who)] Who, Author2, Author3, J of Long Range Solvers, 35, 164-177
(2012).

View File

@ -167,6 +167,7 @@ label.html
lattice.html
log.html
mass.html
message.html
min_modify.html
min_style.html
minimize.html
@ -194,6 +195,9 @@ reset_timestep.html
restart.html
run.html
run_style.html
server.html
server_mc.html
server_md.html
set.html
shell.html
special_bonds.html
@ -241,6 +245,7 @@ fix_bond_create.html
fix_bond_react.html
fix_bond_swap.html
fix_box_relax.html
fix_client_md.html
fix_cmap.html
fix_colvars.html
fix_controller.html

162
doc/src/message.txt Normal file
View File

@ -0,0 +1,162 @@
"LAMMPS WWW Site"_lws - "LAMMPS Documentation"_ld - "LAMMPS Commands"_lc :c
:link(lws,http://lammps.sandia.gov)
:link(ld,Manual.html)
:link(lc,Command_all.html)
:line
message command :h3
[Syntax:]
message which protocol mode arg :pre
which = {client} or {server} :ulb,l
protocol = {md} or {mc} :l
mode = {file} or {zmq} or {mpi/one} or {mpi/two} :l
{file} arg = filename
filename = file used for message exchanges
{zmq} arg = socket-ID
socket-ID for client = localhost:5555, see description below
socket-ID for server = *:5555, see description below
{mpi/one} arg = none
{mpi/two} arg = filename
filename = file used to establish communication bewteen 2 MPI jobs :pre
:ule
[Examples:]
message client md file tmp.couple
message server md file tmp.couple :pre
message client md zmq localhost:5555
message server md zmq *:5555 :pre
message client md mpi/one
message server md mpi/one :pre
message client md mpi/two tmp.couple
message server md mpi/two tmp.couple :pre
[Description:]
Establish a messaging protocol between LAMMPS and another code for the
purpose of client/server coupling.
The "Howto client/server"_Howto_client_server.html doc page gives an
overview of client/server coupling of LAMMPS with another code where
one code is the "client" and sends request messages to a "server"
code. The server responds to each request with a reply message. This
enables the two codes to work in tandem to perform a simulation.
:line
The {which} argument defines LAMMPS to be the client or the server.
:line
The {protocol} argument defines the format and content of messages
that will be exchanged between the two codes. The current options
are:
md = run dynamics with another code
mc = perform Monte Carlo moves with another code :ul
For protocol {md}, LAMMPS can be either a client or server. See the
"server md"_server_md.html doc page for details on the protocol.
For protocol {mc}, LAMMPS can be the server. See the "server
mc"_server_mc.html doc page for details on the protocol.
:line
The {mode} argument specifies how messages are exchanged between the
client and server codes. Both codes must use the same mode and use
consistent parameters.
For mode {file}, the 2 codes communicate via binary files. They must
use the same filename, which is actually a file prefix. Several files
with that prefix will be created and deleted as a simulation runs.
The filename can include a path. Both codes must be able to access
the path/file in a common filesystem.
For mode {zmq}, the 2 codes communicate via a socket on the server
code's machine. Support for socket messaging is provided by the
open-source "ZeroMQ library"_http://zeromq.org, which must be
installed on your system. The client specifies an IP address (IPv4
format) or the DNS name of the machine the server code is running on,
followed by a 4-digit port ID for the socket, separated by a colon.
E.g.
localhost:5555 # client and server running on same machine
192.168.1.1:5555 # server is 192.168.1.1
deptbox.uni.edu:5555 # server is deptbox.uni.edu :pre
The server specifes "*:5555" where "*" represents all available
interfaces on the server's machine, and the port ID must match
what the client specifies.
NOTE: What are allowed port IDs?
NOTE: Additional explanation is needed here about how to use the {zmq}
mode on a parallel machine, e.g. a cluster with many nodes.
For mode {mpi/one}, the 2 codes communicate via MPI and are launched
by the same mpirun command, e.g. with this syntax for OpenMPI:
mpirun -np 2 lmp_mpi -mpicolor 0 -in in.client -log log.client : -np 4 othercode args # LAMMPS is client
mpirun -np 2 othercode args : -np 4 lmp_mpi -mpicolor 1 -in in.server # LAMMPS is server :pre
Note the use of the "-mpicolor color" command-line argument with
LAMMPS. See the "command-line args"_Run_options.html doc page for
further explanation.
For mode {mpi/two}, the 2 codes communicate via MPI, but are launched
be 2 separate mpirun commands. The specified {filename} argument is a
file the 2 MPI processes will use to exchange info so that an MPI
inter-communicator can be established to enable the 2 codes to send
MPI messages to each other. Both codes must be able to access the
path/file in a common filesystem.
:line
Normally, the message command should be used at the top of a LAMMPS
input script. It performs an initial handshake with the other code to
setup messaging and to verify that both codes are using the same
message protocol and mode. Assuming both codes are launched at
(nearly) the same time, the other code should perform the same kind of
initialization.
If LAMMPS is the client code, it will begin sending messages when a
LAMMPS client command begins its operation. E.g. for the "fix
client/md"_fix_client_md.html command, it is when a "run"_run.html
command is executed.
If LAMMPS is the server code, it will begin receiving messages when
the "server"_server.html command is invoked.
A fix client command will terminate its messaging with the server when
LAMMPS ends, or the fix is deleted via the "unfix"_unfix command. The
server command will terminate its messaging with the client when the
client signals it. Then the remainder of the LAMMPS input script will
be processed.
If both codes do something similar, this means a new round of
client/server messaging can be initiated after termination by re-using
a 2nd message command in your LAMMPS input script, followed by a new
fix client or server command.
:line
[Restrictions:]
This command is part of the MESSAGE package. It is only enabled if
LAMMPS was built with that package. See the "Build
package"_Build_package.html doc page for more info.
[Related commands:]
"server"_server.html, "fix client/md"_fix_client_md.html
[Default:] none

71
doc/src/server.txt Normal file
View File

@ -0,0 +1,71 @@
"LAMMPS WWW Site"_lws - "LAMMPS Documentation"_ld - "LAMMPS Commands"_lc :c
:link(lws,http://lammps.sandia.gov)
:link(ld,Manual.html)
:link(lc,Commands_all.html)
:line
server command :h3
[Syntax:]
server protocol :pre
protocol = {md} or {mc} :ul
[Examples:]
server md :pre
[Description:]
This command starts LAMMPS running in "server" mode, where it receives
messages from a separate "client" code and responds by sending a reply
message back to the client. The specified {protocol} determines the
format and content of messages LAMMPS expects to receive and how it
responds.
The "Howto client/server"_Howto_client_server.html doc page gives an
overview of client/server coupling of LAMMPS with another code where
one code is the "client" and sends request messages to a "server"
code. The server responds to each request with a reply message. This
enables the two codes to work in tandem to perform a simulation.
When this command is invoked, LAMMPS will run in server mode in an
endless loop, waiting for messages from the client code. The client
signals when it is done sending messages to LAMMPS, at which point the
loop will exit, and the remainder of the LAMMPS script will be
processed.
The {protocol} argument defines the format and content of messages
that will be exchanged between the two codes. The current options
are:
"md"_server_md.html = run dynamics with another code
"mc"_server_mc.html = perform Monte Carlo moves with another code :ul
For protocol {md}, LAMMPS can be either a client (via the "fix
client/md"_fix_client_md.html command) or server. See the "server
md"_server_md.html doc page for details on the protocol.
For protocol {mc}, LAMMPS can be the server. See the "server
mc"_server_mc.html doc page for details on the protocol.
:line
[Restrictions:]
This command is part of the MESSAGE package. It is only enabled if
LAMMPS was built with that package. See the "Build
package"_Build_package.html doc page for more info.
A script that uses this command must also use the
"message"_message.html command to setup the messaging protocol with
the other client code.
[Related commands:]
"message"_message.html, "fix client/md"_fix_client_md.html
[Default:] none

116
doc/src/server_mc.txt Normal file
View File

@ -0,0 +1,116 @@
"LAMMPS WWW Site"_lws - "LAMMPS Documentation"_ld - "LAMMPS Commands"_lc :c
:link(lws,http://lammps.sandia.gov)
:link(ld,Manual.html)
:link(lc,Commands_all.html)
:line
server mc command :h3
[Syntax:]
server mc :pre
mc = the protocol argument to the "server"_server.html command
[Examples:]
server mc :pre
[Description:]
This command starts LAMMPS running in "server" mode, where it will
expect messages from a separate "client" code that match the {mc}
protocol for format and content explained below. For each message
LAMMPS receives it will send a message back to the client.
The "Howto client/server"_Howto_client_server.html doc page gives an
overview of client/server coupling of LAMMPS with another code where
one code is the "client" and sends request messages to a "server"
code. The server responds to each request with a reply message. This
enables the two codes to work in tandem to perform a simulation.
When this command is invoked, LAMMPS will run in server mode in an
endless loop, waiting for messages from the client code. The client
signals when it is done sending messages to LAMMPS, at which point the
loop will exit, and the remainder of the LAMMPS script will be
processed.
The "server"_server.html doc page gives other options for using LAMMPS
See an example of how this command is used in
examples/COUPLE/lammps_mc/in.server.
:line
When using this command, LAMMPS (as the server code) receives
instructions from a Monte Carlo (MC) driver to displace random atoms,
compute the energy before and after displacement, and run dynamics to
equilibrate the system.
The MC driver performs the random displacements on random atoms,
accepts or rejects the move in an MC sense, and orchestrates the MD
runs.
The format and content of the exchanged messages are explained here in
a conceptual sense. Python-style pseudo code for the library calls to
the CSlib is shown, which performs the actual message exchange between
the two codes. See the "CSlib website"_http://cslib.sandia.gov doc
pages for more details on the actual library syntax. The "cs" object
in this pseudo code is a pointer to an instance of the CSlib.
See the src/MESSAGE/server_mc.cpp file for details on how LAMMPS uses
these messages. See the examples/COUPLE/lammmps_mc/mc.cpp file for an
example of how an MC driver code can use these messages.
Let NATOMS=1, EINIT=2, DISPLACE=3, ACCEPT=4, RUN=5.
[Client sends one of these kinds of message]:
cs->send(NATOMS,0) # msgID = 1 with no fields :pre
cs->send(EINIT,0) # msgID = 2 with no fields :pre
cs->send(DISPLACE,2) # msgID = 3 with 2 fields
cs->pack_int(1,ID) # 1st field = ID of atom to displace
cs->pack(2,3,xnew) # 2nd field = new xyz coords of displaced atom :pre
cs->send(ACCEPT,1) # msgID = 4 with 1 field
cs->pack_int(1,flag) # 1st field = accept/reject flag :pre
cs->send(RUN,1) # msgID = 5 with 1 field
cs->pack_int(1,nsteps) # 1st field = # of timesteps to run MD :pre
[Server replies]:
cs->send(NATOMS,1) # msgID = 1 with 1 field
cs->pack_int(1,natoms) # 1st field = number of atoms :pre
cs->send(EINIT,2) # msgID = 2 with 2 fields
cs->pack_double(1,poteng) # 1st field = potential energy of system
cs->pack(2,3*natoms,x) # 2nd field = 3N coords of Natoms :pre
cs->send(DISPLACE,1) # msgID = 3 with 1 field
cs->pack_double(1,poteng) # 1st field = new potential energy of system :pre
cs->send(ACCEPT,0) # msgID = 4 with no fields
cs->send(RUN,0) # msgID = 5 with no fields
:line
[Restrictions:]
This command is part of the MESSAGE package. It is only enabled if
LAMMPS was built with that package. See the "Build
package"_Build_package.html doc page for more info.
A script that uses this command must also use the
"message"_message.html command to setup the messaging protocol with
the other client code.
[Related commands:]
"message"_message.html
[Default:] none

147
doc/src/server_md.txt Normal file
View File

@ -0,0 +1,147 @@
"LAMMPS WWW Site"_lws - "LAMMPS Documentation"_ld - "LAMMPS Commands"_lc :c
:link(lws,http://lammps.sandia.gov)
:link(ld,Manual.html)
:link(lc,Commands_all.html)
:line
server md command :h3
[Syntax:]
server md :pre
md = the protocol argument to the "server"_server.html command
[Examples:]
server md :pre
[Description:]
This command starts LAMMPS running in "server" mode, where it will
expect messages from a separate "client" code that match the {md}
protocol for format and content explained below. For each message
LAMMPS receives it will send a message back to the client.
The "Howto client/server"_Howto_client_server.html doc page gives an
overview of client/server coupling of LAMMPS with another code where
one code is the "client" and sends request messages to a "server"
code. The server responds to each request with a reply message. This
enables the two codes to work in tandem to perform a simulation.
When this command is invoked, LAMMPS will run in server mode in an
endless loop, waiting for messages from the client code. The client
signals when it is done sending messages to LAMMPS, at which point the
loop will exit, and the remainder of the LAMMPS script will be
processed.
The "server"_server.html doc page gives other options for using LAMMPS
in server mode. See an example of how this command is used in
examples/message/in.message.server.
:line
When using this command, LAMMPS (as the server code) receives the
current coordinates of all particles from the client code each
timestep, computes their interaction, and returns the energy, forces,
and pressure for the interacting particles to the client code, so it
can complete the timestep. This command could also be used with a
client code that performs energy minimization, using the server to
compute forces and energy each iteration of its minimizer.
When using the "fix client/md" command, LAMMPS (as the client code)
does the timestepping and receives needed energy, forces, and pressure
values from the server code.
The format and content of the exchanged messages are explained here in
a conceptual sense. Python-style pseudo code for the library calls to
the CSlib is shown, which performs the actual message exchange between
the two codes. See the "CSlib website"_http://cslib.sandia.gov doc
pages for more details on the actual library syntax. The "cs" object
in this pseudo code is a pointer to an instance of the CSlib.
See the src/MESSAGE/server_md.cpp and src/MESSAGE/fix_client_md.cpp
files for details on how LAMMPS uses these messages. See the
examples/COUPLE/lammps_vasp/vasp_wrapper.py file for an example of how
a quantum code (VASP) can use use these messages.
The following pseudo-code uses these values, defined as enums.
enum{SETUP=1,STEP};
enum{DIM=1,PERIODICITY,ORIGIN,BOX,NATOMS,NTYPES,TYPES,COORDS,UNITS,CHARGE};
enum{FORCES=1,ENERGY,PRESSURE,ERROR}; :pre
[Client sends 2 kinds of messages]:
# required fields: DIM, PERIODICTY, ORIGIN, BOX, NATOMS, NTYPES, TYPES, COORDS
# optional fields: UNITS, CHARGE :pre
cs->send(SETUP,nfields) # msgID with nfields :pre
cs->pack_int(DIM,dim) # dimension (2,3) of simulation
cs->pack(PERIODICITY,3,xyz) # periodicity flags in 3 dims
cs->pack(ORIGIN,3,origin) # lower-left corner of simulation box
cs->pack(BOX,9,box) # 3 edge vectors of simulation box
cs->pack_int(NATOMS,natoms) # total number of atoms
cs->pack_int(NTYPES,ntypes) # number of atom types
cs->pack(TYPES,natoms,type) # vector of per-atom types
cs->pack(COORDS,3*natoms,x) # vector of 3N atom coords
cs->pack_string(UNITS,units) # units = "lj", "real", "metal", etc
cs->pack(CHARGE,natoms,q) # vector of per-atom charge :pre
# required fields: COORDS
# optional fields: ORIGIN, BOX :pre
cs->send(STEP,nfields) # msgID with nfields :pre
cs->pack(COORDS,3*natoms,x) # vector of 3N atom coords
cs->pack(ORIGIN,3,origin) # lower-left corner of simulation box
cs->pack(BOX,9,box) # 3 edge vectors of simulation box
[Server replies to either kind of message]:
# required fields: FORCES, ENERGY, PRESSURE
# optional fields: ERROR :pre
cs->send(msgID,nfields) # msgID with nfields
cs->pack(FORCES,3*Natoms,f) # vector of 3N forces on atoms
cs->pack(ENERGY,1,poteng) # total potential energy of system
cs->pack(PRESSURE,6,press) # global pressure tensor (6-vector)
cs->pack_int(ERROR,flag) # server had an error (e.g. DFT non-convergence) :pre
:line
The units for various quantities that are sent and received iva
messages are defined for atomic-scale simulations in the table below.
The client and server codes (including LAMMPS) can use internal units
different than these (e.g. "real units"_units.html in LAMMPS), so long
as they convert to these units for meesaging.
COORDS, ORIGIN, BOX = Angstroms
CHARGE = multiple of electron charge (1.0 is a proton)
ENERGY = eV
FORCES = eV/Angstrom
PRESSURE = bars :ul
Note that these are "metal units"_units.html in LAMMPS.
If you wish to run LAMMPS in another its non-atomic units, e.g. "lj
units"_units.html, then the client and server should exchange a UNITS
message as indicated above, and both the client and server should
agree on the units for the data they exchange.
:line
[Restrictions:]
This command is part of the MESSAGE package. It is only enabled if
LAMMPS was built with that package. See the "Build
package"_Build_package.html doc page for more info.
[Related commands:]
"message"_message.html, "fix client/md"_fix_client_md.html
[Default:] none

View File

@ -10,6 +10,7 @@ See these sections of the LAMMPS manaul for details:
2.5 Building LAMMPS as a library (doc/Section_start.html#start_5)
6.10 Coupling LAMMPS to other codes (doc/Section_howto.html#howto_10)
6.29 Using LAMMPS in client/server mode (doc/Section_howto.html#howto_29)
In all of the examples included here, LAMMPS must first be built as a
library. Basically, in the src dir you type one of
@ -33,9 +34,13 @@ These are the sub-directories included in this directory:
simple simple example of driver code calling LAMMPS as a lib
multiple example of driver code calling multiple instances of LAMMPS
lammps_mc client/server coupling of Monte Carlo client
with LAMMPS server for energy evaluation
lammps_quest MD with quantum forces, coupling to Quest DFT code
lammps_spparks grain-growth Monte Carlo with strain via MD,
coupling to SPPARKS kinetic MC code
lammps_vasp client/server coupling of LAMMPS client with
VASP quantum DFT as server for quantum forces
library collection of useful inter-code communication routines
fortran a simple wrapper on the LAMMPS library API that
can be called from Fortran

View File

@ -0,0 +1,33 @@
# Makefile for MC
SHELL = /bin/sh
SRC = mc.cpp random_park.cpp
OBJ = $(SRC:.cpp=.o)
# change this line for your machine to path for CSlib src dir
CSLIB = /home/sjplimp/lammps/lib/message/cslib/src
# compiler/linker settings
CC = g++
CCFLAGS = -g -O3 -I$(CSLIB)
LINK = g++
LINKFLAGS = -g -O -L$(CSLIB)
# targets
mc: $(OBJ)
# first line if built the CSlib within lib/message with ZMQ support
# second line if built the CSlib without ZMQ support
$(LINK) $(LINKFLAGS) $(OBJ) -lcsnompi -lzmq -o mc
# $(LINK) $(LINKFLAGS) $(OBJ) -lcsnompi -o mc
clean:
@rm -f *.o mc
# rules
%.o:%.cpp
$(CC) $(CCFLAGS) -c $<

View File

@ -0,0 +1,128 @@
Sample Monte Carlo (MC) wrapper on LAMMPS via client/server coupling
See the MESSAGE package (doc/Section_messages.html#MESSAGE)
and Section_howto.html#howto10 for more details on how
client/server coupling works in LAMMPS.
In this dir, the mc.cpp/h files are a standalone "client" MC code. It
should be run on a single processor, though it could become a parallel
program at some point. LAMMPS is also run as a standalone executable
as a "server" on as many processors as desired using its "server mc"
command; see it's doc page for details.
Messages are exchanged between MC and LAMMPS via a client/server
library (CSlib), which is included in the LAMMPS distribution in
lib/message. As explained below you can choose to exchange data
between the two programs either via files or sockets (ZMQ). If the MC
program became parallel, data could also be exchanged via MPI.
The MC code makes simple MC moves, by displacing a single random atom
by a small random amount. It uses LAMMPS to calculate the energy
change, and to run dynamics between MC moves.
----------------
Build LAMMPS with its MESSAGE package installed:
See the Build extras doc page and its MESSAGE package
section for details.
CMake:
-D PKG_MESSAGE=yes # include the MESSAGE package
-D MESSAGE_ZMQ=value # build with ZeroMQ support, value = no (default) or yes
Traditional make:
% cd lammps/lib/message
% python Install.py -m -z # build CSlib with MPI and ZMQ support
% cd lammps/src
% make yes-message
% make mpi
You can leave off the -z if you do not have ZMQ on your system.
----------------
Build the MC client code
The source files for the MC code are in this dir. It links with the
CSlib library in lib/message/cslib.
You must first build the CSlib in serial mode, e.g.
% cd lammps/lib/message/cslib/src
% make lib # build serial and parallel lib with ZMQ support
% make lib zmq=no # build serial and parallel lib without ZMQ support
Then edit the Makefile in this dir. The CSLIB variable should be the
path to where the LAMMPS lib/message/cslib/src dir is on your system.
If you built the CSlib without ZMQ support you will also need to
comment/uncomment one line. Then you can just type
% make
and you should get an "mc" executable.
----------------
To run in client/server mode:
Both the client (MC) and server (LAMMPS) must use the same messaging
mode, namely file or zmq. This is an argument to the MC code; it can
be selected by setting the "mode" variable when you run LAMMPS. The
default mode = file.
Here we assume LAMMPS was built to run in parallel, and the MESSAGE
package was installed with socket (ZMQ) support. This means either of
the messaging modes can be used and LAMMPS can be run in serial or
parallel. The MC code is always run in serial.
When you run, the server should print out thermodynamic info
for every MD run it performs (between MC moves). The client
will print nothing until the simulation ends, then it will
print stats about the accepted MC moves.
The examples below are commands you should use in two different
terminal windows. The order of the two commands (client or server
launch) does not matter. You can run them both in the same window if
you append a "&" character to the first one to run it in the
background.
--------------
File mode of messaging:
% mpirun -np 1 mc in.mc file tmp.couple
% mpirun -np 1 lmp_mpi -v mode file < in.mc.server
% mpirun -np 1 mc in.mc file tmp.couple
% mpirun -np 4 lmp_mpi -v mode file < in.mc.server
ZMQ mode of messaging:
% mpirun -np 1 mc in.mc zmq localhost:5555
% mpirun -np 1 lmp_mpi -v mode zmq < in.mc.server
% mpirun -np 1 mc in.mc zmq localhost:5555
% mpirun -np 4 lmp_mpi -v mode zmq < in.mc.server
--------------
The input script for the MC program is in.mc. You can edit it to run
longer simulations.
500 nsteps = total # of steps of MD
100 ndynamics = # of MD steps between MC moves
0.1 delta = displacement size of MC move
1.0 temperature = used in MC Boltzman factor
12345 seed = random number seed
--------------
The problem size that LAMMPS is computing the MC energy for and
running dynamics on is set by the x,y,z variables in the LAMMPS
in.mc.server script. The default size is 500 particles. You can
adjust the size as follows:
lmp_mpi -v x 10 -v y 10 -v z 20 # 8000 particles

View File

@ -0,0 +1,7 @@
# MC params
500 nsteps
100 ndynamics
0.1 delta
1.0 temperature
12345 seed

View File

@ -0,0 +1,36 @@
# 3d Lennard-Jones Monte Carlo server script
variable mode index file
if "${mode} == file" then &
"message server mc file tmp.couple" &
elif "${mode} == zmq" &
"message server mc zmq *:5555" &
variable x index 5
variable y index 5
variable z index 5
units lj
atom_style atomic
atom_modify map yes
lattice fcc 0.8442
region box block 0 $x 0 $y 0 $z
create_box 1 box
create_atoms 1 box
mass 1 1.0
pair_style lj/cut 2.5
pair_coeff 1 1 1.0 1.0 2.5
neighbor 0.3 bin
neigh_modify delay 0 every 20 check no
velocity all create 1.44 87287 loop geom
fix 1 all nve
thermo 50
server mc

View File

@ -0,0 +1,254 @@
LAMMPS (22 Aug 2018)
# 3d Lennard-Jones Monte Carlo server script
variable mode index file
if "${mode} == file" then "message server mc file tmp.couple" elif "${mode} == zmq" "message server mc zmq *:5555"
message server mc file tmp.couple
variable x index 5
variable y index 5
variable z index 5
units lj
atom_style atomic
atom_modify map yes
lattice fcc 0.8442
Lattice spacing in x,y,z = 1.6796 1.6796 1.6796
region box block 0 $x 0 $y 0 $z
region box block 0 5 0 $y 0 $z
region box block 0 5 0 5 0 $z
region box block 0 5 0 5 0 5
create_box 1 box
Created orthogonal box = (0 0 0) to (8.39798 8.39798 8.39798)
1 by 1 by 1 MPI processor grid
create_atoms 1 box
Created 500 atoms
Time spent = 0.000649929 secs
mass 1 1.0
pair_style lj/cut 2.5
pair_coeff 1 1 1.0 1.0 2.5
neighbor 0.3 bin
neigh_modify delay 0 every 20 check no
velocity all create 1.44 87287 loop geom
fix 1 all nve
thermo 50
server mc
run 0
Neighbor list info ...
update every 20 steps, delay 0 steps, check no
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 2.8
ghost atom cutoff = 2.8
binsize = 1.4, bins = 6 6 6
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair lj/cut, perpetual
attributes: half, newton on
pair build: half/bin/atomonly/newton
stencil: half/bin/3d/newton
bin: standard
Per MPI rank memory allocation (min/avg/max) = 2.658 | 2.658 | 2.658 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1.44 -6.7733681 0 -4.6176881 -5.0221006
Loop time of 2.14577e-06 on 1 procs for 0 steps with 500 atoms
93.2% CPU use with 1 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 2.146e-06 | | |100.00
Nlocal: 500 ave 500 max 500 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 1956 ave 1956 max 1956 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 19500 ave 19500 max 19500 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 19500
Ave neighs/atom = 39
Neighbor list builds = 0
Dangerous builds not checked
run 0
Per MPI rank memory allocation (min/avg/max) = 2.658 | 2.658 | 2.658 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1.44 -6.7723127 0 -4.6166327 -5.015531
Loop time of 2.14577e-06 on 1 procs for 0 steps with 500 atoms
93.2% CPU use with 1 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 2.146e-06 | | |100.00
Nlocal: 500 ave 500 max 500 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 1956 ave 1956 max 1956 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 19501 ave 19501 max 19501 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 19501
Ave neighs/atom = 39.002
Neighbor list builds = 0
Dangerous builds not checked
Per MPI rank memory allocation (min/avg/max) = 2.658 | 2.658 | 2.658 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1.44 -6.7723127 0 -4.6166327 -5.015531
50 0.70239211 -5.6763152 0 -4.6248342 0.59544428
100 0.7565013 -5.757431 0 -4.6249485 0.21982657
run 0
Per MPI rank memory allocation (min/avg/max) = 2.658 | 2.658 | 2.658 Mbytes
Step Temp E_pair E_mol TotEng Press
100 0.7565013 -5.7565768 0 -4.6240944 0.22436405
Loop time of 1.90735e-06 on 1 procs for 0 steps with 500 atoms
157.3% CPU use with 1 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 1.907e-06 | | |100.00
Nlocal: 500 ave 500 max 500 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 1939 ave 1939 max 1939 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 18757 ave 18757 max 18757 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 18757
Ave neighs/atom = 37.514
Neighbor list builds = 0
Dangerous builds not checked
Per MPI rank memory allocation (min/avg/max) = 2.658 | 2.658 | 2.658 Mbytes
Step Temp E_pair E_mol TotEng Press
100 0.7565013 -5.757431 0 -4.6249485 0.21982657
150 0.76110797 -5.7664315 0 -4.6270529 0.16005254
200 0.73505651 -5.7266069 0 -4.6262273 0.34189744
run 0
Per MPI rank memory allocation (min/avg/max) = 2.658 | 2.658 | 2.658 Mbytes
Step Temp E_pair E_mol TotEng Press
200 0.73505651 -5.7181381 0 -4.6177585 0.37629943
Loop time of 2.14577e-06 on 1 procs for 0 steps with 500 atoms
139.8% CPU use with 1 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 2.146e-06 | | |100.00
Nlocal: 500 ave 500 max 500 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 1899 ave 1899 max 1899 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 18699 ave 18699 max 18699 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 18699
Ave neighs/atom = 37.398
Neighbor list builds = 0
Dangerous builds not checked
Per MPI rank memory allocation (min/avg/max) = 2.658 | 2.658 | 2.658 Mbytes
Step Temp E_pair E_mol TotEng Press
200 0.73505651 -5.7266069 0 -4.6262273 0.34189744
250 0.73052476 -5.7206316 0 -4.627036 0.39287516
300 0.76300831 -5.7675007 0 -4.6252773 0.16312925
run 0
Per MPI rank memory allocation (min/avg/max) = 2.658 | 2.658 | 2.658 Mbytes
Step Temp E_pair E_mol TotEng Press
300 0.76300831 -5.768304 0 -4.6260806 0.15954325
Loop time of 2.14577e-06 on 1 procs for 0 steps with 500 atoms
139.8% CPU use with 1 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 2.146e-06 | | |100.00
Nlocal: 500 ave 500 max 500 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 1903 ave 1903 max 1903 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 18715 ave 18715 max 18715 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 18715
Ave neighs/atom = 37.43
Neighbor list builds = 0
Dangerous builds not checked
Per MPI rank memory allocation (min/avg/max) = 2.658 | 2.658 | 2.658 Mbytes
Step Temp E_pair E_mol TotEng Press
300 0.76300831 -5.768304 0 -4.6260806 0.15954325
350 0.72993309 -5.7193261 0 -4.6266162 0.3358374
400 0.72469448 -5.713463 0 -4.6285954 0.44859547
run 0
Per MPI rank memory allocation (min/avg/max) = 2.658 | 2.658 | 2.658 Mbytes
Step Temp E_pair E_mol TotEng Press
400 0.72469448 -5.7077332 0 -4.6228655 0.47669832
Loop time of 1.90735e-06 on 1 procs for 0 steps with 500 atoms
157.3% CPU use with 1 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 1.907e-06 | | |100.00
Nlocal: 500 ave 500 max 500 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 1899 ave 1899 max 1899 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 18683 ave 18683 max 18683 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 18683
Ave neighs/atom = 37.366
Neighbor list builds = 0
Dangerous builds not checked
Per MPI rank memory allocation (min/avg/max) = 2.658 | 2.658 | 2.658 Mbytes
Step Temp E_pair E_mol TotEng Press
400 0.72469448 -5.713463 0 -4.6285954 0.44859547
450 0.75305735 -5.7518283 0 -4.6245015 0.34658587
500 0.73092571 -5.7206337 0 -4.6264379 0.43715809
Total wall time: 0:00:02

View File

@ -0,0 +1,254 @@
LAMMPS (22 Aug 2018)
# 3d Lennard-Jones Monte Carlo server script
variable mode index file
if "${mode} == file" then "message server mc file tmp.couple" elif "${mode} == zmq" "message server mc zmq *:5555"
message server mc file tmp.couple
variable x index 5
variable y index 5
variable z index 5
units lj
atom_style atomic
atom_modify map yes
lattice fcc 0.8442
Lattice spacing in x,y,z = 1.6796 1.6796 1.6796
region box block 0 $x 0 $y 0 $z
region box block 0 5 0 $y 0 $z
region box block 0 5 0 5 0 $z
region box block 0 5 0 5 0 5
create_box 1 box
Created orthogonal box = (0 0 0) to (8.39798 8.39798 8.39798)
1 by 2 by 2 MPI processor grid
create_atoms 1 box
Created 500 atoms
Time spent = 0.000592947 secs
mass 1 1.0
pair_style lj/cut 2.5
pair_coeff 1 1 1.0 1.0 2.5
neighbor 0.3 bin
neigh_modify delay 0 every 20 check no
velocity all create 1.44 87287 loop geom
fix 1 all nve
thermo 50
server mc
run 0
Neighbor list info ...
update every 20 steps, delay 0 steps, check no
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 2.8
ghost atom cutoff = 2.8
binsize = 1.4, bins = 6 6 6
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair lj/cut, perpetual
attributes: half, newton on
pair build: half/bin/atomonly/newton
stencil: half/bin/3d/newton
bin: standard
Per MPI rank memory allocation (min/avg/max) = 2.619 | 2.619 | 2.619 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1.44 -6.7733681 0 -4.6176881 -5.0221006
Loop time of 3.8147e-06 on 4 procs for 0 steps with 500 atoms
59.0% CPU use with 4 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 3.815e-06 | | |100.00
Nlocal: 125 ave 125 max 125 min
Histogram: 4 0 0 0 0 0 0 0 0 0
Nghost: 1099 ave 1099 max 1099 min
Histogram: 4 0 0 0 0 0 0 0 0 0
Neighs: 4875 ave 4875 max 4875 min
Histogram: 4 0 0 0 0 0 0 0 0 0
Total # of neighbors = 19500
Ave neighs/atom = 39
Neighbor list builds = 0
Dangerous builds not checked
run 0
Per MPI rank memory allocation (min/avg/max) = 2.619 | 2.619 | 2.619 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1.44 -6.7723127 0 -4.6166327 -5.015531
Loop time of 3.03984e-06 on 4 procs for 0 steps with 500 atoms
106.9% CPU use with 4 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 3.04e-06 | | |100.00
Nlocal: 125 ave 125 max 125 min
Histogram: 4 0 0 0 0 0 0 0 0 0
Nghost: 1099 ave 1099 max 1099 min
Histogram: 4 0 0 0 0 0 0 0 0 0
Neighs: 4875.25 ave 4885 max 4866 min
Histogram: 1 0 0 0 2 0 0 0 0 1
Total # of neighbors = 19501
Ave neighs/atom = 39.002
Neighbor list builds = 0
Dangerous builds not checked
Per MPI rank memory allocation (min/avg/max) = 2.619 | 2.619 | 2.619 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1.44 -6.7723127 0 -4.6166327 -5.015531
50 0.70210225 -5.6759068 0 -4.6248598 0.59609192
100 0.75891559 -5.7611234 0 -4.6250267 0.20841608
run 0
Per MPI rank memory allocation (min/avg/max) = 2.619 | 2.619 | 2.619 Mbytes
Step Temp E_pair E_mol TotEng Press
100 0.75891559 -5.7609392 0 -4.6248426 0.20981291
Loop time of 3.75509e-06 on 4 procs for 0 steps with 500 atoms
113.2% CPU use with 4 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 3.755e-06 | | |100.00
Nlocal: 125 ave 126 max 124 min
Histogram: 2 0 0 0 0 0 0 0 0 2
Nghost: 1085.25 ave 1089 max 1079 min
Histogram: 1 0 0 0 0 1 0 0 0 2
Neighs: 4690.25 ave 4996 max 4401 min
Histogram: 1 0 0 1 0 1 0 0 0 1
Total # of neighbors = 18761
Ave neighs/atom = 37.522
Neighbor list builds = 0
Dangerous builds not checked
Per MPI rank memory allocation (min/avg/max) = 2.619 | 2.619 | 2.619 Mbytes
Step Temp E_pair E_mol TotEng Press
100 0.75891559 -5.7609392 0 -4.6248426 0.20981291
150 0.75437991 -5.7558622 0 -4.6265555 0.20681722
200 0.73111257 -5.7193748 0 -4.6248993 0.35230715
run 0
Per MPI rank memory allocation (min/avg/max) = 2.619 | 2.619 | 2.619 Mbytes
Step Temp E_pair E_mol TotEng Press
200 0.73111257 -5.7143906 0 -4.6199151 0.37126023
Loop time of 2.563e-06 on 4 procs for 0 steps with 500 atoms
117.1% CPU use with 4 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 2.563e-06 | | |100.00
Nlocal: 125 ave 126 max 123 min
Histogram: 1 0 0 0 0 0 1 0 0 2
Nghost: 1068.5 ave 1076 max 1063 min
Histogram: 2 0 0 0 0 0 1 0 0 1
Neighs: 4674.75 ave 4938 max 4419 min
Histogram: 1 0 0 0 1 1 0 0 0 1
Total # of neighbors = 18699
Ave neighs/atom = 37.398
Neighbor list builds = 0
Dangerous builds not checked
Per MPI rank memory allocation (min/avg/max) = 2.619 | 2.619 | 2.619 Mbytes
Step Temp E_pair E_mol TotEng Press
200 0.73111257 -5.7193748 0 -4.6248993 0.35230715
250 0.73873144 -5.7312505 0 -4.6253696 0.33061033
300 0.76392796 -5.7719207 0 -4.6283206 0.18197874
run 0
Per MPI rank memory allocation (min/avg/max) = 2.619 | 2.619 | 2.619 Mbytes
Step Temp E_pair E_mol TotEng Press
300 0.76392796 -5.7725589 0 -4.6289588 0.17994628
Loop time of 3.99351e-06 on 4 procs for 0 steps with 500 atoms
93.9% CPU use with 4 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 3.994e-06 | | |100.00
Nlocal: 125 ave 128 max 121 min
Histogram: 1 0 0 0 0 1 0 1 0 1
Nghost: 1069 ave 1080 max 1055 min
Histogram: 1 0 0 0 0 0 2 0 0 1
Neighs: 4672 ave 4803 max 4600 min
Histogram: 2 0 0 1 0 0 0 0 0 1
Total # of neighbors = 18688
Ave neighs/atom = 37.376
Neighbor list builds = 0
Dangerous builds not checked
Per MPI rank memory allocation (min/avg/max) = 2.619 | 2.619 | 2.619 Mbytes
Step Temp E_pair E_mol TotEng Press
300 0.76392796 -5.7725589 0 -4.6289588 0.17994628
350 0.71953041 -5.7041632 0 -4.6270261 0.44866153
400 0.7319047 -5.7216051 0 -4.6259438 0.46321355
run 0
Per MPI rank memory allocation (min/avg/max) = 2.619 | 2.619 | 2.619 Mbytes
Step Temp E_pair E_mol TotEng Press
400 0.7319047 -5.7158168 0 -4.6201554 0.49192039
Loop time of 3.57628e-06 on 4 procs for 0 steps with 500 atoms
111.8% CPU use with 4 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 3.576e-06 | | |100.00
Nlocal: 125 ave 132 max 118 min
Histogram: 1 0 0 0 0 2 0 0 0 1
Nghost: 1057.5 ave 1068 max 1049 min
Histogram: 1 0 0 1 1 0 0 0 0 1
Neighs: 4685.75 ave 5045 max 4229 min
Histogram: 1 0 0 1 0 0 0 0 0 2
Total # of neighbors = 18743
Ave neighs/atom = 37.486
Neighbor list builds = 0
Dangerous builds not checked
Per MPI rank memory allocation (min/avg/max) = 2.619 | 2.619 | 2.619 Mbytes
Step Temp E_pair E_mol TotEng Press
400 0.7319047 -5.7216051 0 -4.6259438 0.46321355
450 0.74503154 -5.7405318 0 -4.6252196 0.33211879
500 0.70570501 -5.6824439 0 -4.6260035 0.62020788
Total wall time: 0:00:02

View File

@ -0,0 +1,254 @@
LAMMPS (22 Aug 2018)
# 3d Lennard-Jones Monte Carlo server script
variable mode index file
if "${mode} == file" then "message server mc file tmp.couple" elif "${mode} == zmq" "message server mc zmq *:5555"
message server mc zmq *:5555
variable x index 5
variable y index 5
variable z index 5
units lj
atom_style atomic
atom_modify map yes
lattice fcc 0.8442
Lattice spacing in x,y,z = 1.6796 1.6796 1.6796
region box block 0 $x 0 $y 0 $z
region box block 0 5 0 $y 0 $z
region box block 0 5 0 5 0 $z
region box block 0 5 0 5 0 5
create_box 1 box
Created orthogonal box = (0 0 0) to (8.39798 8.39798 8.39798)
1 by 1 by 1 MPI processor grid
create_atoms 1 box
Created 500 atoms
Time spent = 0.000741005 secs
mass 1 1.0
pair_style lj/cut 2.5
pair_coeff 1 1 1.0 1.0 2.5
neighbor 0.3 bin
neigh_modify delay 0 every 20 check no
velocity all create 1.44 87287 loop geom
fix 1 all nve
thermo 50
server mc
run 0
Neighbor list info ...
update every 20 steps, delay 0 steps, check no
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 2.8
ghost atom cutoff = 2.8
binsize = 1.4, bins = 6 6 6
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair lj/cut, perpetual
attributes: half, newton on
pair build: half/bin/atomonly/newton
stencil: half/bin/3d/newton
bin: standard
Per MPI rank memory allocation (min/avg/max) = 2.658 | 2.658 | 2.658 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1.44 -6.7733681 0 -4.6176881 -5.0221006
Loop time of 1.90735e-06 on 1 procs for 0 steps with 500 atoms
52.4% CPU use with 1 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 1.907e-06 | | |100.00
Nlocal: 500 ave 500 max 500 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 1956 ave 1956 max 1956 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 19500 ave 19500 max 19500 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 19500
Ave neighs/atom = 39
Neighbor list builds = 0
Dangerous builds not checked
run 0
Per MPI rank memory allocation (min/avg/max) = 2.658 | 2.658 | 2.658 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1.44 -6.7723127 0 -4.6166327 -5.015531
Loop time of 1.90735e-06 on 1 procs for 0 steps with 500 atoms
52.4% CPU use with 1 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 1.907e-06 | | |100.00
Nlocal: 500 ave 500 max 500 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 1956 ave 1956 max 1956 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 19501 ave 19501 max 19501 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 19501
Ave neighs/atom = 39.002
Neighbor list builds = 0
Dangerous builds not checked
Per MPI rank memory allocation (min/avg/max) = 2.658 | 2.658 | 2.658 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1.44 -6.7723127 0 -4.6166327 -5.015531
50 0.70239211 -5.6763152 0 -4.6248342 0.59544428
100 0.7565013 -5.757431 0 -4.6249485 0.21982657
run 0
Per MPI rank memory allocation (min/avg/max) = 2.658 | 2.658 | 2.658 Mbytes
Step Temp E_pair E_mol TotEng Press
100 0.7565013 -5.7565768 0 -4.6240944 0.22436405
Loop time of 1.19209e-06 on 1 procs for 0 steps with 500 atoms
83.9% CPU use with 1 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 1.192e-06 | | |100.00
Nlocal: 500 ave 500 max 500 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 1939 ave 1939 max 1939 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 18757 ave 18757 max 18757 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 18757
Ave neighs/atom = 37.514
Neighbor list builds = 0
Dangerous builds not checked
Per MPI rank memory allocation (min/avg/max) = 2.658 | 2.658 | 2.658 Mbytes
Step Temp E_pair E_mol TotEng Press
100 0.7565013 -5.757431 0 -4.6249485 0.21982657
150 0.76110797 -5.7664315 0 -4.6270529 0.16005254
200 0.73505651 -5.7266069 0 -4.6262273 0.34189744
run 0
Per MPI rank memory allocation (min/avg/max) = 2.658 | 2.658 | 2.658 Mbytes
Step Temp E_pair E_mol TotEng Press
200 0.73505651 -5.7181381 0 -4.6177585 0.37629943
Loop time of 9.53674e-07 on 1 procs for 0 steps with 500 atoms
209.7% CPU use with 1 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 9.537e-07 | | |100.00
Nlocal: 500 ave 500 max 500 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 1899 ave 1899 max 1899 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 18699 ave 18699 max 18699 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 18699
Ave neighs/atom = 37.398
Neighbor list builds = 0
Dangerous builds not checked
Per MPI rank memory allocation (min/avg/max) = 2.658 | 2.658 | 2.658 Mbytes
Step Temp E_pair E_mol TotEng Press
200 0.73505651 -5.7266069 0 -4.6262273 0.34189744
250 0.73052476 -5.7206316 0 -4.627036 0.39287516
300 0.76300831 -5.7675007 0 -4.6252773 0.16312925
run 0
Per MPI rank memory allocation (min/avg/max) = 2.658 | 2.658 | 2.658 Mbytes
Step Temp E_pair E_mol TotEng Press
300 0.76300831 -5.768304 0 -4.6260806 0.15954325
Loop time of 9.53674e-07 on 1 procs for 0 steps with 500 atoms
104.9% CPU use with 1 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 9.537e-07 | | |100.00
Nlocal: 500 ave 500 max 500 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 1903 ave 1903 max 1903 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 18715 ave 18715 max 18715 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 18715
Ave neighs/atom = 37.43
Neighbor list builds = 0
Dangerous builds not checked
Per MPI rank memory allocation (min/avg/max) = 2.658 | 2.658 | 2.658 Mbytes
Step Temp E_pair E_mol TotEng Press
300 0.76300831 -5.768304 0 -4.6260806 0.15954325
350 0.72993309 -5.7193261 0 -4.6266162 0.3358374
400 0.72469448 -5.713463 0 -4.6285954 0.44859547
run 0
Per MPI rank memory allocation (min/avg/max) = 2.658 | 2.658 | 2.658 Mbytes
Step Temp E_pair E_mol TotEng Press
400 0.72469448 -5.7077332 0 -4.6228655 0.47669832
Loop time of 9.53674e-07 on 1 procs for 0 steps with 500 atoms
209.7% CPU use with 1 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 9.537e-07 | | |100.00
Nlocal: 500 ave 500 max 500 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 1899 ave 1899 max 1899 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 18683 ave 18683 max 18683 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 18683
Ave neighs/atom = 37.366
Neighbor list builds = 0
Dangerous builds not checked
Per MPI rank memory allocation (min/avg/max) = 2.658 | 2.658 | 2.658 Mbytes
Step Temp E_pair E_mol TotEng Press
400 0.72469448 -5.713463 0 -4.6285954 0.44859547
450 0.75305735 -5.7518283 0 -4.6245015 0.34658587
500 0.73092571 -5.7206337 0 -4.6264379 0.43715809
Total wall time: 0:00:00

View File

@ -0,0 +1,254 @@
LAMMPS (22 Aug 2018)
# 3d Lennard-Jones Monte Carlo server script
variable mode index file
if "${mode} == file" then "message server mc file tmp.couple" elif "${mode} == zmq" "message server mc zmq *:5555"
message server mc zmq *:5555
variable x index 5
variable y index 5
variable z index 5
units lj
atom_style atomic
atom_modify map yes
lattice fcc 0.8442
Lattice spacing in x,y,z = 1.6796 1.6796 1.6796
region box block 0 $x 0 $y 0 $z
region box block 0 5 0 $y 0 $z
region box block 0 5 0 5 0 $z
region box block 0 5 0 5 0 5
create_box 1 box
Created orthogonal box = (0 0 0) to (8.39798 8.39798 8.39798)
1 by 2 by 2 MPI processor grid
create_atoms 1 box
Created 500 atoms
Time spent = 0.000576019 secs
mass 1 1.0
pair_style lj/cut 2.5
pair_coeff 1 1 1.0 1.0 2.5
neighbor 0.3 bin
neigh_modify delay 0 every 20 check no
velocity all create 1.44 87287 loop geom
fix 1 all nve
thermo 50
server mc
run 0
Neighbor list info ...
update every 20 steps, delay 0 steps, check no
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 2.8
ghost atom cutoff = 2.8
binsize = 1.4, bins = 6 6 6
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair lj/cut, perpetual
attributes: half, newton on
pair build: half/bin/atomonly/newton
stencil: half/bin/3d/newton
bin: standard
Per MPI rank memory allocation (min/avg/max) = 2.619 | 2.619 | 2.619 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1.44 -6.7733681 0 -4.6176881 -5.0221006
Loop time of 4.76837e-06 on 4 procs for 0 steps with 500 atoms
89.1% CPU use with 4 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 4.768e-06 | | |100.00
Nlocal: 125 ave 125 max 125 min
Histogram: 4 0 0 0 0 0 0 0 0 0
Nghost: 1099 ave 1099 max 1099 min
Histogram: 4 0 0 0 0 0 0 0 0 0
Neighs: 4875 ave 4875 max 4875 min
Histogram: 4 0 0 0 0 0 0 0 0 0
Total # of neighbors = 19500
Ave neighs/atom = 39
Neighbor list builds = 0
Dangerous builds not checked
run 0
Per MPI rank memory allocation (min/avg/max) = 2.619 | 2.619 | 2.619 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1.44 -6.7723127 0 -4.6166327 -5.015531
Loop time of 3.45707e-06 on 4 procs for 0 steps with 500 atoms
94.0% CPU use with 4 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 3.457e-06 | | |100.00
Nlocal: 125 ave 125 max 125 min
Histogram: 4 0 0 0 0 0 0 0 0 0
Nghost: 1099 ave 1099 max 1099 min
Histogram: 4 0 0 0 0 0 0 0 0 0
Neighs: 4875.25 ave 4885 max 4866 min
Histogram: 1 0 0 0 2 0 0 0 0 1
Total # of neighbors = 19501
Ave neighs/atom = 39.002
Neighbor list builds = 0
Dangerous builds not checked
Per MPI rank memory allocation (min/avg/max) = 2.619 | 2.619 | 2.619 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1.44 -6.7723127 0 -4.6166327 -5.015531
50 0.70210225 -5.6759068 0 -4.6248598 0.59609192
100 0.75891559 -5.7611234 0 -4.6250267 0.20841608
run 0
Per MPI rank memory allocation (min/avg/max) = 2.619 | 2.619 | 2.619 Mbytes
Step Temp E_pair E_mol TotEng Press
100 0.75891559 -5.7609392 0 -4.6248426 0.20981291
Loop time of 3.03984e-06 on 4 procs for 0 steps with 500 atoms
115.1% CPU use with 4 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 3.04e-06 | | |100.00
Nlocal: 125 ave 126 max 124 min
Histogram: 2 0 0 0 0 0 0 0 0 2
Nghost: 1085.25 ave 1089 max 1079 min
Histogram: 1 0 0 0 0 1 0 0 0 2
Neighs: 4690.25 ave 4996 max 4401 min
Histogram: 1 0 0 1 0 1 0 0 0 1
Total # of neighbors = 18761
Ave neighs/atom = 37.522
Neighbor list builds = 0
Dangerous builds not checked
Per MPI rank memory allocation (min/avg/max) = 2.619 | 2.619 | 2.619 Mbytes
Step Temp E_pair E_mol TotEng Press
100 0.75891559 -5.7609392 0 -4.6248426 0.20981291
150 0.75437991 -5.7558622 0 -4.6265555 0.20681722
200 0.73111257 -5.7193748 0 -4.6248993 0.35230715
run 0
Per MPI rank memory allocation (min/avg/max) = 2.619 | 2.619 | 2.619 Mbytes
Step Temp E_pair E_mol TotEng Press
200 0.73111257 -5.7143906 0 -4.6199151 0.37126023
Loop time of 2.38419e-06 on 4 procs for 0 steps with 500 atoms
125.8% CPU use with 4 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 2.384e-06 | | |100.00
Nlocal: 125 ave 126 max 123 min
Histogram: 1 0 0 0 0 0 1 0 0 2
Nghost: 1068.5 ave 1076 max 1063 min
Histogram: 2 0 0 0 0 0 1 0 0 1
Neighs: 4674.75 ave 4938 max 4419 min
Histogram: 1 0 0 0 1 1 0 0 0 1
Total # of neighbors = 18699
Ave neighs/atom = 37.398
Neighbor list builds = 0
Dangerous builds not checked
Per MPI rank memory allocation (min/avg/max) = 2.619 | 2.619 | 2.619 Mbytes
Step Temp E_pair E_mol TotEng Press
200 0.73111257 -5.7193748 0 -4.6248993 0.35230715
250 0.73873144 -5.7312505 0 -4.6253696 0.33061033
300 0.76392796 -5.7719207 0 -4.6283206 0.18197874
run 0
Per MPI rank memory allocation (min/avg/max) = 2.619 | 2.619 | 2.619 Mbytes
Step Temp E_pair E_mol TotEng Press
300 0.76392796 -5.7725589 0 -4.6289588 0.17994628
Loop time of 2.44379e-06 on 4 procs for 0 steps with 500 atoms
112.5% CPU use with 4 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 2.444e-06 | | |100.00
Nlocal: 125 ave 128 max 121 min
Histogram: 1 0 0 0 0 1 0 1 0 1
Nghost: 1069 ave 1080 max 1055 min
Histogram: 1 0 0 0 0 0 2 0 0 1
Neighs: 4672 ave 4803 max 4600 min
Histogram: 2 0 0 1 0 0 0 0 0 1
Total # of neighbors = 18688
Ave neighs/atom = 37.376
Neighbor list builds = 0
Dangerous builds not checked
Per MPI rank memory allocation (min/avg/max) = 2.619 | 2.619 | 2.619 Mbytes
Step Temp E_pair E_mol TotEng Press
300 0.76392796 -5.7725589 0 -4.6289588 0.17994628
350 0.71953041 -5.7041632 0 -4.6270261 0.44866153
400 0.7319047 -5.7216051 0 -4.6259438 0.46321355
run 0
Per MPI rank memory allocation (min/avg/max) = 2.619 | 2.619 | 2.619 Mbytes
Step Temp E_pair E_mol TotEng Press
400 0.7319047 -5.7158168 0 -4.6201554 0.49192039
Loop time of 2.14577e-06 on 4 procs for 0 steps with 500 atoms
139.8% CPU use with 4 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 2.146e-06 | | |100.00
Nlocal: 125 ave 132 max 118 min
Histogram: 1 0 0 0 0 2 0 0 0 1
Nghost: 1057.5 ave 1068 max 1049 min
Histogram: 1 0 0 1 1 0 0 0 0 1
Neighs: 4685.75 ave 5045 max 4229 min
Histogram: 1 0 0 1 0 0 0 0 0 2
Total # of neighbors = 18743
Ave neighs/atom = 37.486
Neighbor list builds = 0
Dangerous builds not checked
Per MPI rank memory allocation (min/avg/max) = 2.619 | 2.619 | 2.619 Mbytes
Step Temp E_pair E_mol TotEng Press
400 0.7319047 -5.7216051 0 -4.6259438 0.46321355
450 0.74503154 -5.7405318 0 -4.6252196 0.33211879
500 0.70570501 -5.6824439 0 -4.6260035 0.62020788
Total wall time: 0:00:00

View File

@ -0,0 +1,263 @@
/* ----------------------------------------------------------------------
LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator
http://lammps.sandia.gov, Sandia National Laboratories
Steve Plimpton, sjplimp@sandia.gov
------------------------------------------------------------------------- */
// MC code used with LAMMPS in client/server mode
// MC is the client, LAMMPS is the server
// Syntax: mc infile mode modearg
// mode = file, zmq
// modearg = filename for file, localhost:5555 for zmq
#include <cmath>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include "mc.h"
#include "random_park.h"
#include "cslib.h"
using namespace CSLIB_NS;
void error(const char *);
CSlib *cs_create(char *, char *);
#define MAXLINE 256
/* ---------------------------------------------------------------------- */
// main program
int main(int narg, char **arg)
{
if (narg != 4) {
error("Syntax: mc infile mode modearg");
exit(1);
}
// initialize CSlib
CSlib *cs = cs_create(arg[2],arg[3]);
// create MC class and perform run
MC *mc = new MC(arg[1],cs);
mc->run();
// final MC stats
int naccept = mc->naccept;
int nattempt = mc->nattempt;
printf("------ MC stats ------\n");
printf("MC attempts = %d\n",nattempt);
printf("MC accepts = %d\n",naccept);
printf("Acceptance ratio = %g\n",1.0*naccept/nattempt);
// clean up
delete cs;
delete mc;
}
/* ---------------------------------------------------------------------- */
void error(const char *str)
{
printf("ERROR: %s\n",str);
exit(1);
}
/* ---------------------------------------------------------------------- */
CSlib *cs_create(char *mode, char *arg)
{
CSlib *cs = new CSlib(0,mode,arg,NULL);
// initial handshake to agree on protocol
cs->send(0,1);
cs->pack_string(1,(char *) "mc");
int msgID,nfield;
int *fieldID,*fieldtype,*fieldlen;
msgID = cs->recv(nfield,fieldID,fieldtype,fieldlen);
return cs;
}
// ----------------------------------------------------------------------
// MC class
// ----------------------------------------------------------------------
MC::MC(char *mcfile, void *cs_caller)
//MC::MC(char *mcfile, CSlib *cs_caller)
{
cs_void = cs_caller;
// setup MC params
options(mcfile);
// random # generator
random = new RanPark(seed);
}
/* ---------------------------------------------------------------------- */
MC::~MC()
{
free(x);
delete random;
}
/* ---------------------------------------------------------------------- */
void MC::run()
{
int iatom,accept,msgID,nfield;
double pe_initial,pe_final,edelta;
double dx,dy,dz;
double xold[3],xnew[3];
int *fieldID,*fieldtype,*fieldlen;
enum{NATOMS=1,EINIT,DISPLACE,ACCEPT,RUN};
CSlib *cs = (CSlib *) cs_void;
// one-time request for atom count from MD
// allocate 1d coord buffer
cs->send(NATOMS,0);
msgID = cs->recv(nfield,fieldID,fieldtype,fieldlen);
natoms = cs->unpack_int(1);
x = (double *) malloc(3*natoms*sizeof(double));
// loop over MC moves
naccept = nattempt = 0;
for (int iloop = 0; iloop < nloop; iloop++) {
// request current energy from MD
// recv energy, coords from MD
cs->send(EINIT,0);
msgID = cs->recv(nfield,fieldID,fieldtype,fieldlen);
pe_initial = cs->unpack_double(1);
double *x = (double *) cs->unpack(2);
// perform simple MC event
// displace a single atom by random amount
iatom = (int) natoms*random->uniform();
xold[0] = x[3*iatom+0];
xold[1] = x[3*iatom+1];
xold[2] = x[3*iatom+2];
dx = 2.0*delta*random->uniform() - delta;
dy = 2.0*delta*random->uniform() - delta;
dz = 2.0*delta*random->uniform() - delta;
xnew[0] = xold[0] + dx;
xnew[1] = xold[1] + dx;
xnew[2] = xold[2] + dx;
// send atom ID and its new coords to MD
// recv new energy
cs->send(DISPLACE,2);
cs->pack_int(1,iatom+1);
cs->pack(2,4,3,xnew);
msgID = cs->recv(nfield,fieldID,fieldtype,fieldlen);
pe_final = cs->unpack_double(1);
// decide whether to accept/reject MC event
if (pe_final <= pe_initial) accept = 1;
else if (temperature == 0.0) accept = 0;
else if (random->uniform() >
exp(natoms*(pe_initial-pe_final)/temperature)) accept = 0;
else accept = 1;
nattempt++;
if (accept) naccept++;
// send accept (1) or reject (0) flag to MD
cs->send(ACCEPT,1);
cs->pack_int(1,accept);
msgID = cs->recv(nfield,fieldID,fieldtype,fieldlen);
// send dynamics timesteps
cs->send(RUN,1);
cs->pack_int(1,ndynamics);
msgID = cs->recv(nfield,fieldID,fieldtype,fieldlen);
}
// send exit message to MD
cs->send(-1,0);
msgID = cs->recv(nfield,fieldID,fieldtype,fieldlen);
}
/* ---------------------------------------------------------------------- */
void MC::options(char *filename)
{
// default params
nsteps = 0;
ndynamics = 100;
delta = 0.1;
temperature = 1.0;
seed = 12345;
// read and parse file
FILE *fp = fopen(filename,"r");
if (fp == NULL) error("Could not open MC file");
char line[MAXLINE];
char *keyword,*value;
char *eof = fgets(line,MAXLINE,fp);
while (eof) {
if (line[0] == '#') { // comment line
eof = fgets(line,MAXLINE,fp);
continue;
}
value = strtok(line," \t\n\r\f");
if (value == NULL) { // blank line
eof = fgets(line,MAXLINE,fp);
continue;
}
keyword = strtok(NULL," \t\n\r\f");
if (keyword == NULL) error("Missing keyword in MC file");
if (strcmp(keyword,"nsteps") == 0) nsteps = atoi(value);
else if (strcmp(keyword,"ndynamics") == 0) ndynamics = atoi(value);
else if (strcmp(keyword,"delta") == 0) delta = atof(value);
else if (strcmp(keyword,"temperature") == 0) temperature = atof(value);
else if (strcmp(keyword,"seed") == 0) seed = atoi(value);
else error("Unknown param in MC file");
eof = fgets(line,MAXLINE,fp);
}
// derived params
nloop = nsteps/ndynamics;
}

View File

@ -0,0 +1,40 @@
/* ----------------------------------------------------------------------
LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator
http://lammps.sandia.gov, Sandia National Laboratories
Steve Plimpton, sjplimp@sandia.gov
------------------------------------------------------------------------- */
#ifndef MC_H
#define MC_H
/* ---------------------------------------------------------------------- */
class MC {
public:
int naccept; // # of accepted MC events
int nattempt; // # of attempted MC events
MC(char *, void *);
~MC();
void run();
private:
int nsteps; // total # of MD steps
int ndynamics; // steps in one short dynamics run
int nloop; // nsteps/ndynamics
int natoms; // # of MD atoms
double delta; // MC displacement distance
double temperature; // MC temperature for Boltzmann criterion
double *x; // atom coords as 3N 1d vector
double energy; // global potential energy
int seed; // RNG seed
class RanPark *random;
void *cs_void; // messaging library
void options(char *);
};
#endif

View File

@ -0,0 +1,72 @@
/* ----------------------------------------------------------------------
LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator
http://lammps.sandia.gov, Sandia National Laboratories
Steve Plimpton, sjplimp@sandia.gov
Copyright (2003) Sandia Corporation. Under the terms of Contract
DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains
certain rights in this software. This software is distributed under
the GNU General Public License.
See the README file in the top-level LAMMPS directory.
------------------------------------------------------------------------- */
// Park/Miller RNG
#include <math.h>
#include "random_park.h"
//#include "error.h"
#define IA 16807
#define IM 2147483647
#define AM (1.0/IM)
#define IQ 127773
#define IR 2836
/* ---------------------------------------------------------------------- */
RanPark::RanPark(int seed_init)
{
//if (seed_init <= 0)
// error->one(FLERR,"Invalid seed for Park random # generator");
seed = seed_init;
save = 0;
}
/* ----------------------------------------------------------------------
uniform RN
------------------------------------------------------------------------- */
double RanPark::uniform()
{
int k = seed/IQ;
seed = IA*(seed-k*IQ) - IR*k;
if (seed < 0) seed += IM;
double ans = AM*seed;
return ans;
}
/* ----------------------------------------------------------------------
gaussian RN
------------------------------------------------------------------------- */
double RanPark::gaussian()
{
double first,v1,v2,rsq,fac;
if (!save) {
do {
v1 = 2.0*uniform()-1.0;
v2 = 2.0*uniform()-1.0;
rsq = v1*v1 + v2*v2;
} while ((rsq >= 1.0) || (rsq == 0.0));
fac = sqrt(-2.0*log(rsq)/rsq);
second = v1*fac;
first = v2*fac;
save = 1;
} else {
first = second;
save = 0;
}
return first;
}

View File

@ -0,0 +1,28 @@
/* ----------------------------------------------------------------------
LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator
http://lammps.sandia.gov, Sandia National Laboratories
Steve Plimpton, sjplimp@sandia.gov
Copyright (2003) Sandia Corporation. Under the terms of Contract
DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains
certain rights in this software. This software is distributed under
the GNU General Public License.
See the README file in the top-level LAMMPS directory.
------------------------------------------------------------------------- */
#ifndef RANPARK_H
#define RANPARK_H
class RanPark {
public:
RanPark(int);
double uniform();
double gaussian();
private:
int seed,save;
double second;
};
#endif

View File

@ -0,0 +1,53 @@
# Startparameter for this run:
NWRITE = 2 write-flag & timer
PREC = normal normal or accurate (medium, high low for compatibility)
ISTART = 0 job : 0-new 1-cont 2-samecut
ICHARG = 2 charge: 1-file 2-atom 10-const
ISPIN = 1 spin polarized calculation?
LSORBIT = F spin-orbit coupling
INIWAV = 1 electr: 0-lowe 1-rand 2-diag
# Electronic Relaxation 1
ENCUT = 600.0 eV #Plane wave energy cutoff
ENINI = 600.0 initial cutoff
NELM = 100; NELMIN= 2; NELMDL= -5 # of ELM steps
EDIFF = 0.1E-05 stopping-criterion for ELM
# Ionic relaxation
EDIFFG = 0.1E-02 stopping-criterion for IOM
NSW = 0 number of steps for IOM
NBLOCK = 1; KBLOCK = 1 inner block; outer block
IBRION = -1 ionic relax: 0-MD 1-quasi-New 2-CG #No ion relaxation with -1
NFREE = 0 steps in history (QN), initial steepest desc. (CG)
ISIF = 2 stress and relaxation # 2: F-yes Sts-yes RlxIon-yes cellshape-no cellvol-no
IWAVPR = 10 prediction: 0-non 1-charg 2-wave 3-comb # 10: TMPCAR stored in memory rather than file
POTIM = 0.5000 time-step for ionic-motion
TEBEG = 3500.0; TEEND = 3500.0 temperature during run # Finite Temperature variables if AI-MD is on
SMASS = -3.00 Nose mass-parameter (am)
estimated Nose-frequenzy (Omega) = 0.10E-29 period in steps =****** mass= -0.366E-27a.u.
PSTRESS= 0.0 pullay stress
# DOS related values:
EMIN = 10.00; EMAX =-10.00 energy-range for DOS
EFERMI = 0.00
ISMEAR = 0; SIGMA = 0.10 broadening in eV -4-tet -1-fermi 0-gaus
# Electronic relaxation 2 (details)
IALGO = 48 algorithm
# Write flags
LWAVE = T write WAVECAR
LCHARG = T write CHGCAR
LVTOT = F write LOCPOT, total local potential
LVHAR = F write LOCPOT, Hartree potential only
LELF = F write electronic localiz. function (ELF)
# Dipole corrections
LMONO = F monopole corrections only (constant potential shift)
LDIPOL = F correct potential (dipole corrections)
IDIPOL = 0 1-x, 2-y, 3-z, 4-all directions
EPSILON= 1.0000000 bulk dielectric constant
# Exchange correlation treatment:
GGA = -- GGA type

View File

@ -0,0 +1,6 @@
K-Points
0
Monkhorst Pack
15 15 15
0 0 0

View File

@ -0,0 +1,11 @@
W unit cell
1.0
3.16 0.00000000 0.00000000
0.00000000 3.16 0.00000000
0.00000000 0.00000000 3.16
W
2
Direct
0.00000000 0.00000000 0.00000000
0.50000000 0.50000000 0.50000000

View File

@ -0,0 +1,149 @@
Sample LAMMPS MD wrapper on VASP quantum DFT via client/server
coupling
See the MESSAGE package (doc/Section_messages.html#MESSAGE) and
Section_howto.html#howto10 for more details on how client/server
coupling works in LAMMPS.
In this dir, the vasp_wrap.py is a wrapper on the VASP quantum DFT
code so it can work as a "server" code which LAMMPS drives as a
"client" code to perform ab initio MD. LAMMPS performs the MD
timestepping, sends VASP a current set of coordinates each timestep,
VASP computes forces and energy and virial and returns that info to
LAMMPS.
Messages are exchanged between MC and LAMMPS via a client/server
library (CSlib), which is included in the LAMMPS distribution in
lib/message. As explained below you can choose to exchange data
between the two programs either via files or sockets (ZMQ). If the
vasp_wrap.py program became parallel, or the CSlib library calls were
integrated into VASP directly, then data could also be exchanged via
MPI.
----------------
Build LAMMPS with its MESSAGE package installed:
See the Build extras doc page and its MESSAGE package
section for details.
CMake:
-D PKG_MESSAGE=yes # include the MESSAGE package
-D MESSAGE_ZMQ=value # build with ZeroMQ support, value = no (default) or yes
Traditional make:
cd lammps/lib/message
python Install.py -m -z # build CSlib with MPI and ZMQ support
cd lammps/src
make yes-message
make mpi
You can leave off the -z if you do not have ZMQ on your system.
----------------
Build the CSlib in a form usable by the vasp_wrapper.py script:
% cd lammps/lib/message/cslib/src
% make shlib # build serial and parallel shared lib with ZMQ support
% make shlib zmq=no # build serial and parallel shared lib w/out ZMQ support
This will make a shared library versions of the CSlib, which Python
requires. Python must be able to find both the cslib.py script and
the libcsnompi.so library in your lammps/lib/message/cslib/src
directory. If it is not able to do this, you will get an error when
you run vasp_wrapper.py.
You can do this by augmenting two environment variables, either
from the command line, or in your shell start-up script.
Here is the sample syntax for the csh or tcsh shells:
setenv PYTHONPATH ${PYTHONPATH}:/home/sjplimp/lammps/lib/message/cslib/src
setenv LD_LIBRARY_PATH ${LD_LIBRARY_PATH}:/home/sjplimp/lammps/lib/message/cslib/src
----------------
Prepare to use VASP and the vasp_wrapper.py script
You can run the vasp_wrap.py script as-is to test that the coupling
between it and LAMMPS is functional. This will use the included
vasprun.xml file output by a previous VASP run.
But note that the as-is version of vasp_wrap.py will not attempt to
run VASP.
To do this, you must edit the 1st vaspcmd line at the top of
vasp_wrapper.py to be the launch command needed to run VASP on your
system. It can be a command to run VASP in serial or in parallel,
e.g. an mpirun command. Then comment out the 2nd vaspcmd line
immediately following it.
Insure you have the necessary VASP input files in this
directory, suitable for the VASP calculation you want to perform:
INCAR
KPOINTS
POSCAR_template
POTCAR
Examples of all but the POTCAR file are provided. As explained below,
POSCAR_W is an input file for a 2-atom unit cell of tungsten and can
be used to test the LAMMPS/VASP coupling. The POTCAR file is a
proprietary VASP file, so use one from your VASP installation.
Note that the POSCAR_template file should be matched to the LAMMPS
input script (# of atoms and atom types, box size, etc). The provided
POSCAR_W matches in.client.W.
Once you run VASP yourself, the vasprun.xml file will be overwritten.
----------------
To run in client/server mode:
NOTE: The vasp_wrap.py script must be run with Python version 2, not
3. This is because it used the CSlib python wrapper, which only
supports version 2. We plan to upgrade CSlib to support Python 3.
Both the client (LAMMPS) and server (vasp_wrap.py) must use the same
messaging mode, namely file or zmq. This is an argument to the
vasp_wrap.py code; it can be selected by setting the "mode" variable
when you run LAMMPS. The default mode = file.
Here we assume LAMMPS was built to run in parallel, and the MESSAGE
package was installed with socket (ZMQ) support. This means either of
the messaging modes can be used and LAMMPS can be run in serial or
parallel. The vasp_wrap.py code is always run in serial, but it
launches VASP from Python via an mpirun command which can run VASP
itself in parallel.
When you run, the server should print out thermodynamic info every
timestep which corresponds to the forces and virial computed by VASP.
VASP will also generate output files each timestep. The vasp_wrapper.py
script could be generalized to archive these.
The examples below are commands you should use in two different
terminal windows. The order of the two commands (client or server
launch) does not matter. You can run them both in the same window if
you append a "&" character to the first one to run it in the
background.
--------------
File mode of messaging:
% mpirun -np 1 lmp_mpi -v mode file < in.client.W
% python vasp_wrap.py file POSCAR_W
% mpirun -np 2 lmp_mpi -v mode file < in.client.W
% python vasp_wrap.py file POSCAR_W
ZMQ mode of messaging:
% mpirun -np 1 lmp_mpi -v mode zmq < in.client.W
% python vasp_wrap.py zmq POSCAR_W
% mpirun -np 2 lmp_mpi -v mode zmq < in.client.W
% python vasp_wrap.py zmq POSCAR_W

View File

@ -0,0 +1,15 @@
LAMMPS W data file
2 atoms
1 atom types
0.0 3.16 xlo xhi
0.0 3.16 ylo yhi
0.0 3.16 zlo zhi
Atoms
1 1 0.000 0.000 0.000
2 1 1.58 1.58 1.58

View File

@ -0,0 +1,34 @@
# small W unit cell for use with VASP
variable mode index file
if "${mode} == file" then &
"message client md file tmp.couple" &
elif "${mode} == zmq" &
"message client md zmq localhost:5555" &
variable x index 1
variable y index 1
variable z index 1
units metal
atom_style atomic
atom_modify sort 0 0.0 map yes
read_data data.W
mass 1 183.85
replicate $x $y $z
velocity all create 300.0 87287 loop geom
neighbor 0.3 bin
neigh_modify delay 0 every 10 check no
fix 1 all nve
fix 2 all client/md
fix_modify 2 energy yes
thermo 1
run 3

View File

@ -0,0 +1,76 @@
LAMMPS (22 Aug 2018)
# small W unit cell for use with VASP
variable mode index file
if "${mode} == file" then "message client md file tmp.couple" elif "${mode} == zmq" "message client md zmq localhost:5555"
message client md zmq localhost:5555
variable x index 1
variable y index 1
variable z index 1
units metal
atom_style atomic
atom_modify sort 0 0.0 map yes
read_data data.W
orthogonal box = (0 0 0) to (3.16 3.16 3.16)
1 by 1 by 2 MPI processor grid
reading atoms ...
2 atoms
mass 1 183.85
replicate $x $y $z
replicate 1 $y $z
replicate 1 1 $z
replicate 1 1 1
orthogonal box = (0 0 0) to (3.16 3.16 3.16)
1 by 1 by 2 MPI processor grid
2 atoms
Time spent = 0.000148058 secs
velocity all create 300.0 87287 loop geom
neighbor 0.3 bin
neigh_modify delay 0 every 10 check no
fix 1 all nve
fix 2 all client/md
fix_modify 2 energy yes
thermo 1
run 3
Per MPI rank memory allocation (min/avg/max) = 1.8 | 1.8 | 1.8 Mbytes
Step Temp E_pair E_mol TotEng Press
0 300 0 0 -48.030793 -78159.503
1 298.24318 0 0 -48.03102 -78167.19
2 296.85584 0 0 -48.031199 -78173.26
3 295.83795 0 0 -48.031331 -78177.714
Loop time of 0.457491 on 2 procs for 3 steps with 2 atoms
Performance: 0.567 ns/day, 42.360 hours/ns, 6.558 timesteps/s
50.1% CPU use with 2 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 1.3828e-05 | 2.9922e-05 | 4.6015e-05 | 0.0 | 0.01
Output | 7.5817e-05 | 9.3937e-05 | 0.00011206 | 0.0 | 0.02
Modify | 0.45735 | 0.45736 | 0.45736 | 0.0 | 99.97
Other | | 1.204e-05 | | | 0.00
Nlocal: 1 ave 1 max 1 min
Histogram: 2 0 0 0 0 0 0 0 0 0
Nghost: 4 ave 4 max 4 min
Histogram: 2 0 0 0 0 0 0 0 0 0
Neighs: 0 ave 0 max 0 min
Histogram: 2 0 0 0 0 0 0 0 0 0
Total # of neighbors = 0
Ave neighs/atom = 0
Neighbor list builds = 0
Dangerous builds not checked
Total wall time: 0:01:21

View File

@ -0,0 +1,300 @@
#!/usr/bin/env python
# ----------------------------------------------------------------------
# LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator
# http://lammps.sandia.gov, Sandia National Laboratories
# Steve Plimpton, sjplimp@sandia.gov
# ----------------------------------------------------------------------
# Syntax: vasp_wrap.py file/zmq POSCARfile
# wrapper on VASP to act as server program using CSlib
# receives message with list of coords from client
# creates VASP inputs
# invokes VASP to calculate self-consistent energy of that config
# reads VASP outputs
# sends message with energy, forces, pressure to client
# NOTES:
# check to insure basic VASP input files are in place?
# could archive VASP input/output in special filenames or dirs?
# need to check that POTCAR file is consistent with atom ordering?
# could make syntax for launching VASP more flexible
# e.g. command-line arg for # of procs
# detect if VASP had an error and return ERROR field, e.g. non-convergence ??
from __future__ import print_function
import sys
version = sys.version_info[0]
if version == 3:
sys.exit("The CSlib python wrapper does not yet support python 3")
import subprocess
import xml.etree.ElementTree as ET
from cslib import CSlib
# comment out 2nd line once 1st line is correct for your system
vaspcmd = "srun -N 1 --ntasks-per-node=4 " + \
"-n 4 /projects/vasp/2017-build/cts1/vasp5.4.4/vasp_tfermi/bin/vasp_std"
vaspcmd = "touch tmp"
# enums matching FixClientMD class in LAMMPS
SETUP,STEP = range(1,2+1)
DIM,PERIODICITY,ORIGIN,BOX,NATOMS,NTYPES,TYPES,COORDS,UNITS,CHARGE = range(1,10+1)
FORCES,ENERGY,VIRIAL,ERROR = range(1,4+1)
# -------------------------------------
# functions
# error message and exit
def error(txt):
print("ERROR:",txt)
sys.exit(1)
# -------------------------------------
# read initial VASP POSCAR file to setup problem
# return natoms,ntypes,box
def vasp_setup(poscar):
ps = open(poscar,'r').readlines()
# box size
words = ps[2].split()
xbox = float(words[0])
words = ps[3].split()
ybox = float(words[1])
words = ps[4].split()
zbox = float(words[2])
box = [xbox,ybox,zbox]
ntypes = 0
natoms = 0
words = ps[6].split()
for word in words:
if word == '#': break
ntypes += 1
natoms += int(word)
return natoms,ntypes,box
# -------------------------------------
# write a new POSCAR file for VASP
def poscar_write(poscar,natoms,ntypes,types,coords,box):
psold = open(poscar,'r').readlines()
psnew = open("POSCAR",'w')
# header, including box size
psnew.write(psold[0])
psnew.write(psold[1])
psnew.write("%g %g %g\n" % (box[0],box[1],box[2]))
psnew.write("%g %g %g\n" % (box[3],box[4],box[5]))
psnew.write("%g %g %g\n" % (box[6],box[7],box[8]))
psnew.write(psold[5])
psnew.write(psold[6])
# per-atom coords
# grouped by types
psnew.write("Cartesian\n")
for itype in range(1,ntypes+1):
for i in range(natoms):
if types[i] != itype: continue
x = coords[3*i+0]
y = coords[3*i+1]
z = coords[3*i+2]
aline = " %g %g %g\n" % (x,y,z)
psnew.write(aline)
psnew.close()
# -------------------------------------
# read a VASP output vasprun.xml file
# uses ElementTree module
# see https://docs.python.org/2/library/xml.etree.elementtree.html
def vasprun_read():
tree = ET.parse('vasprun.xml')
root = tree.getroot()
#fp = open("vasprun.xml","r")
#root = ET.parse(fp)
scsteps = root.findall('calculation/scstep')
energy = scsteps[-1].find('energy')
for child in energy:
if child.attrib["name"] == "e_0_energy":
eout = float(child.text)
fout = []
sout = []
varrays = root.findall('calculation/varray')
for varray in varrays:
if varray.attrib["name"] == "forces":
forces = varray.findall("v")
for line in forces:
fxyz = line.text.split()
fxyz = [float(value) for value in fxyz]
fout += fxyz
if varray.attrib["name"] == "stress":
tensor = varray.findall("v")
stensor = []
for line in tensor:
sxyz = line.text.split()
sxyz = [float(value) for value in sxyz]
stensor.append(sxyz)
sxx = stensor[0][0]
syy = stensor[1][1]
szz = stensor[2][2]
# symmetrize off-diagonal components
sxy = 0.5 * (stensor[0][1] + stensor[1][0])
sxz = 0.5 * (stensor[0][2] + stensor[2][0])
syz = 0.5 * (stensor[1][2] + stensor[2][1])
sout = [sxx,syy,szz,sxy,sxz,syz]
#fp.close()
return eout,fout,sout
# -------------------------------------
# main program
# command-line args
if len(sys.argv) != 3:
print("Syntax: python vasp_wrap.py file/zmq POSCARfile")
sys.exit(1)
mode = sys.argv[1]
poscar_template = sys.argv[2]
if mode == "file": cs = CSlib(1,mode,"tmp.couple",None)
elif mode == "zmq": cs = CSlib(1,mode,"*:5555",None)
else:
print("Syntax: python vasp_wrap.py file/zmq POSCARfile")
sys.exit(1)
natoms,ntypes,box = vasp_setup(poscar_template)
# initial message for MD protocol
msgID,nfield,fieldID,fieldtype,fieldlen = cs.recv()
if msgID != 0: error("Bad initial client/server handshake")
protocol = cs.unpack_string(1)
if protocol != "md": error("Mismatch in client/server protocol")
cs.send(0,0)
# endless server loop
while 1:
# recv message from client
# msgID = 0 = all-done message
msgID,nfield,fieldID,fieldtype,fieldlen = cs.recv()
if msgID < 0: break
# SETUP receive at beginning of each run
# required fields: DIM, PERIODICTY, ORIGIN, BOX,
# NATOMS, NTYPES, TYPES, COORDS
# optional fields: others in enum above, but VASP ignores them
if msgID == SETUP:
origin = []
box = []
natoms_recv = ntypes_recv = 0
types = []
coords = []
for field in fieldID:
if field == DIM:
dim = cs.unpack_int(DIM)
if dim != 3: error("VASP only performs 3d simulations")
elif field == PERIODICITY:
periodicity = cs.unpack(PERIODICITY,1)
if not periodicity[0] or not periodicity[1] or not periodicity[2]:
error("VASP wrapper only currently supports fully periodic systems")
elif field == ORIGIN:
origin = cs.unpack(ORIGIN,1)
elif field == BOX:
box = cs.unpack(BOX,1)
elif field == NATOMS:
natoms_recv = cs.unpack_int(NATOMS)
if natoms != natoms_recv:
error("VASP wrapper mis-match in number of atoms")
elif field == NTYPES:
ntypes_recv = cs.unpack_int(NTYPES)
if ntypes != ntypes_recv:
error("VASP wrapper mis-match in number of atom types")
elif field == TYPES:
types = cs.unpack(TYPES,1)
elif field == COORDS:
coords = cs.unpack(COORDS,1)
if not origin or not box or not natoms or not ntypes or \
not types or not coords:
error("Required VASP wrapper setup field not received");
# STEP receive at each timestep of run or minimization
# required fields: COORDS
# optional fields: ORIGIN, BOX
elif msgID == STEP:
coords = []
for field in fieldID:
if field == COORDS:
coords = cs.unpack(COORDS,1)
elif field == ORIGIN:
origin = cs.unpack(ORIGIN,1)
elif field == BOX:
box = cs.unpack(BOX,1)
if not coords: error("Required VASP wrapper step field not received");
else: error("VASP wrapper received unrecognized message")
# create POSCAR file
poscar_write(poscar_template,natoms,ntypes,types,coords,box)
# invoke VASP
print("\nLaunching VASP ...")
print(vaspcmd)
subprocess.check_output(vaspcmd,stderr=subprocess.STDOUT,shell=True)
# process VASP output
energy,forces,virial = vasprun_read()
# convert VASP kilobars to bars
for i,value in enumerate(virial): virial[i] *= 1000.0
# return forces, energy, pressure to client
cs.send(msgID,3);
cs.pack(FORCES,4,3*natoms,forces)
cs.pack_double(ENERGY,energy)
cs.pack(VIRIAL,4,6,virial)
# final reply to client
cs.send(0,0)
# clean-up
del cs

File diff suppressed because it is too large Load Diff

View File

@ -83,6 +83,7 @@ kim: use of potentials in Knowledge Base for Interatomic Models (KIM)
latte: use of LATTE density-functional tight-binding quantum code
meam: MEAM test for SiC and shear (same as shear examples)
melt: rapid melt of 3d LJ system
message: client/server coupling of 2 codes
micelle: self-assembly of small lipid-like molecules into 2d bilayers
min: energy minimization of 2d LJ melt
mscg: parameterize a multi-scale coarse-graining (MSCG) model

View File

@ -17,8 +17,9 @@ atom_modify sort 0 0
compute XRD all xrd 1.541838 Ni 2Theta 40 80 c 2 2 2 LP 1 echo
compute SAED all saed 0.0251 Ni Kmax 0.85 Zone 1 0 0 c 0.025 0.025 0.025 &
dR_Ewald 0.05 echo manual
compute SAED all saed 0.0251 Ni Kmax 0.85 &
Zone 0 0 0 c 0.025 0.025 0.025 &
dR_Ewald 0.01 echo manual
fix 1 all ave/histo/weight 1 1 1 40 80 200 c_XRD[1] c_XRD[2] &
mode vector file $A.hist.xrd

View File

@ -1,35 +0,0 @@
variable A string bulkNi
log $A.log
boundary p p p
units metal
timestep 0.001
lattice fcc 3.52
region box block 0 20 0 20 0 20
create_box 1 box
create_atoms 1 box
pair_style none
mass * 58.71
atom_modify sort 0 0
compute XRD all xrd 1.541838 Ni 2Theta 40 80 c 2 2 2 LP 1 echo
compute SAED all saed 0.0251 Ni Kmax 0.85 &
Zone 0 0 0 c 0.025 0.025 0.025 &
dR_Ewald 0.01 echo manual
fix 1 all ave/histo/weight 1 1 1 40 80 200 c_XRD[1] c_XRD[2] &
mode vector file $A.hist.xrd
fix 2 all saed/vtk 1 1 1 c_SAED file $A_001.saed
dump 1 all custom 1 $A.dump id x y z
run 0
unfix 1
unfix 2
uncompute XRD
uncompute SAED

View File

@ -0,0 +1,25 @@
LAMMPS Description
8 atoms
2 atom types
0 1 xlo xhi
0 1 ylo yhi
0 1 zlo zhi
Masses
1 22.98976928
2 35.45
Atoms
1 2 1 0.25 0.25 0.25
2 1 -1 0.75 0.25 0.25
3 1 -1 0.25 0.75 0.25
4 2 1 0.75 0.75 0.25
5 1 -1 0.25 0.25 0.75
6 2 1 0.75 0.25 0.75
7 2 1 0.25 0.75 0.75
8 1 -1 0.75 0.75 0.75

View File

@ -0,0 +1,316 @@
LAMMPS Description
300 atoms
1 atom types
0 10 xlo xhi
0 10 ylo yhi
0 10 zlo zhi
Masses
1 1.0
Atoms
1 1 1 0 0 4.5
2 1 -1 0 0 5.5
3 1 1 0 1 4.5
4 1 -1 0 1 5.5
5 1 1 0 2 4.5
6 1 -1 0 2 5.5
7 1 1 0 3 4.5
8 1 -1 0 3 5.5
9 1 1 0 4 4.5
10 1 -1 0 4 5.5
11 1 1 0 5 4.5
12 1 -1 0 5 5.5
13 1 1 0 6 4.5
14 1 -1 0 6 5.5
15 1 1 0 7 4.5
16 1 -1 0 7 5.5
17 1 1 0 8 4.5
18 1 -1 0 8 5.5
19 1 1 0 9 4.5
20 1 -1 0 9 5.5
21 1 1 1 0 4.5
22 1 -1 1 0 5.5
23 1 1 1 1 4.5
24 1 -1 1 1 5.5
25 1 1 1 2 4.5
26 1 -1 1 2 5.5
27 1 1 1 3 4.5
28 1 -1 1 3 5.5
29 1 1 1 4 4.5
30 1 -1 1 4 5.5
31 1 1 1 5 4.5
32 1 -1 1 5 5.5
33 1 1 1 6 4.5
34 1 -1 1 6 5.5
35 1 1 1 7 4.5
36 1 -1 1 7 5.5
37 1 1 1 8 4.5
38 1 -1 1 8 5.5
39 1 1 1 9 4.5
40 1 -1 1 9 5.5
41 1 1 2 0 4.5
42 1 -1 2 0 5.5
43 1 1 2 1 4.5
44 1 -1 2 1 5.5
45 1 1 2 2 4.5
46 1 -1 2 2 5.5
47 1 1 2 3 4.5
48 1 -1 2 3 5.5
49 1 1 2 4 4.5
50 1 -1 2 4 5.5
51 1 1 2 5 4.5
52 1 -1 2 5 5.5
53 1 1 2 6 4.5
54 1 -1 2 6 5.5
55 1 1 2 7 4.5
56 1 -1 2 7 5.5
57 1 1 2 8 4.5
58 1 -1 2 8 5.5
59 1 1 2 9 4.5
60 1 -1 2 9 5.5
61 1 1 3 0 4.5
62 1 -1 3 0 5.5
63 1 1 3 1 4.5
64 1 -1 3 1 5.5
65 1 1 3 2 4.5
66 1 -1 3 2 5.5
67 1 1 3 3 4.5
68 1 -1 3 3 5.5
69 1 1 3 4 4.5
70 1 -1 3 4 5.5
71 1 1 3 5 4.5
72 1 -1 3 5 5.5
73 1 1 3 6 4.5
74 1 -1 3 6 5.5
75 1 1 3 7 4.5
76 1 -1 3 7 5.5
77 1 1 3 8 4.5
78 1 -1 3 8 5.5
79 1 1 3 9 4.5
80 1 -1 3 9 5.5
81 1 1 4 0 4.5
82 1 -1 4 0 5.5
83 1 1 4 1 4.5
84 1 -1 4 1 5.5
85 1 1 4 2 4.5
86 1 -1 4 2 5.5
87 1 1 4 3 4.5
88 1 -1 4 3 5.5
89 1 1 4 4 4.5
90 1 -1 4 4 5.5
91 1 1 4 5 4.5
92 1 -1 4 5 5.5
93 1 1 4 6 4.5
94 1 -1 4 6 5.5
95 1 1 4 7 4.5
96 1 -1 4 7 5.5
97 1 1 4 8 4.5
98 1 -1 4 8 5.5
99 1 1 4 9 4.5
100 1 -1 4 9 5.5
101 1 1 5 0 4.5
102 1 -1 5 0 5.5
103 1 1 5 1 4.5
104 1 -1 5 1 5.5
105 1 1 5 2 4.5
106 1 -1 5 2 5.5
107 1 1 5 3 4.5
108 1 -1 5 3 5.5
109 1 1 5 4 4.5
110 1 -1 5 4 5.5
111 1 1 5 5 4.5
112 1 -1 5 5 5.5
113 1 1 5 6 4.5
114 1 -1 5 6 5.5
115 1 1 5 7 4.5
116 1 -1 5 7 5.5
117 1 1 5 8 4.5
118 1 -1 5 8 5.5
119 1 1 5 9 4.5
120 1 -1 5 9 5.5
121 1 1 6 0 4.5
122 1 -1 6 0 5.5
123 1 1 6 1 4.5
124 1 -1 6 1 5.5
125 1 1 6 2 4.5
126 1 -1 6 2 5.5
127 1 1 6 3 4.5
128 1 -1 6 3 5.5
129 1 1 6 4 4.5
130 1 -1 6 4 5.5
131 1 1 6 5 4.5
132 1 -1 6 5 5.5
133 1 1 6 6 4.5
134 1 -1 6 6 5.5
135 1 1 6 7 4.5
136 1 -1 6 7 5.5
137 1 1 6 8 4.5
138 1 -1 6 8 5.5
139 1 1 6 9 4.5
140 1 -1 6 9 5.5
141 1 1 7 0 4.5
142 1 -1 7 0 5.5
143 1 1 7 1 4.5
144 1 -1 7 1 5.5
145 1 1 7 2 4.5
146 1 -1 7 2 5.5
147 1 1 7 3 4.5
148 1 -1 7 3 5.5
149 1 1 7 4 4.5
150 1 -1 7 4 5.5
151 1 1 7 5 4.5
152 1 -1 7 5 5.5
153 1 1 7 6 4.5
154 1 -1 7 6 5.5
155 1 1 7 7 4.5
156 1 -1 7 7 5.5
157 1 1 7 8 4.5
158 1 -1 7 8 5.5
159 1 1 7 9 4.5
160 1 -1 7 9 5.5
161 1 1 8 0 4.5
162 1 -1 8 0 5.5
163 1 1 8 1 4.5
164 1 -1 8 1 5.5
165 1 1 8 2 4.5
166 1 -1 8 2 5.5
167 1 1 8 3 4.5
168 1 -1 8 3 5.5
169 1 1 8 4 4.5
170 1 -1 8 4 5.5
171 1 1 8 5 4.5
172 1 -1 8 5 5.5
173 1 1 8 6 4.5
174 1 -1 8 6 5.5
175 1 1 8 7 4.5
176 1 -1 8 7 5.5
177 1 1 8 8 4.5
178 1 -1 8 8 5.5
179 1 1 8 9 4.5
180 1 -1 8 9 5.5
181 1 1 9 0 4.5
182 1 -1 9 0 5.5
183 1 1 9 1 4.5
184 1 -1 9 1 5.5
185 1 1 9 2 4.5
186 1 -1 9 2 5.5
187 1 1 9 3 4.5
188 1 -1 9 3 5.5
189 1 1 9 4 4.5
190 1 -1 9 4 5.5
191 1 1 9 5 4.5
192 1 -1 9 5 5.5
193 1 1 9 6 4.5
194 1 -1 9 6 5.5
195 1 1 9 7 4.5
196 1 -1 9 7 5.5
197 1 1 9 8 4.5
198 1 -1 9 8 5.5
199 1 1 9 9 4.5
200 1 -1 9 9 5.5
201 1 -1 9.28495 2.13839 8.88019
202 1 1 4.99281 4.17459 9.83905
203 1 -1 4.91265 6.89408 2.39989
204 1 1 4.43647 3.68895 8.86086
205 1 -1 0.659075 7.07271 0.179131
206 1 1 7.791 3.40021 0.969703
207 1 -1 1.18008 3.63874 7.28751
208 1 1 8.51522 5.24681 6.37702
209 1 -1 4.24226 9.60726 3.16084
210 1 1 8.43745 8.23344 9.2883
211 1 -1 8.48509 8.84988 9.43407
212 1 1 2.81127 8.9903 0.00909212
213 1 -1 6.38283 6.20858 9.92482
214 1 1 4.59962 5.7925 7.52571
215 1 -1 7.03797 7.09336 8.15957
216 1 1 6.68103 8.04734 7.95661
217 1 -1 2.531 8.47145 1.6209
218 1 1 6.71915 8.79876 9.59581
219 1 -1 4.96758 0.0381298 0.827927
220 1 1 9.22955 1.04572 0.84722
221 1 -1 2.3224 2.57084 8.07306
222 1 1 1.94283 3.17375 3.92051
223 1 -1 2.34735 1.91295 1.29127
224 1 1 3.33928 3.30688 0.892089
225 1 -1 1.19738 4.40402 8.70835
226 1 1 7.44541 4.94803 8.28211
227 1 -1 5.93272 1.18886 1.56518
228 1 1 8.50709 8.70343 1.24939
229 1 -1 5.54016 3.38865 8.61698
230 1 1 9.47644 0.573085 3.05941
231 1 -1 9.39695 4.46542 1.84205
232 1 1 3.52268 5.60212 0.333999
233 1 -1 3.69009 9.40954 6.10446
234 1 1 3.96836 6.15307 7.57803
235 1 -1 2.02535 0.0418407 3.21642
236 1 1 2.97488 8.79711 8.33242
237 1 -1 2.4122 1.79458 3.04173
238 1 1 9.72355 3.67773 1.52435
239 1 -1 8.55216 6.1623 1.53201
240 1 1 4.98973 2.41459 9.84381
241 1 -1 8.8901 5.9006 1.97649
242 1 1 9.09932 2.23783 1.42554
243 1 -1 6.70722 8.21769 1.21953
244 1 1 6.83768 0.84508 3.25165
245 1 -1 0.222115 3.07945 0.51825
246 1 1 0.503918 9.34932 6.25278
247 1 -1 0.803159 8.7017 9.46211
248 1 1 4.88636 5.00147 9.65639
249 1 -1 1.62258 0.767285 9.63596
250 1 1 2.70143 3.01111 7.74859
251 1 -1 4.41574 5.31824 0.538729
252 1 1 1.64724 5.18097 3.59205
253 1 -1 2.33672 3.21408 6.6081
254 1 1 7.46603 1.53668 9.09844
255 1 -1 3.61269 8.44556 6.99789
256 1 1 6.95465 6.83045 9.31002
257 1 -1 5.91831 9.01549 3.4626
258 1 1 6.56503 8.42229 3.27105
259 1 -1 4.50822 9.59753 3.47025
260 1 1 4.17357 5.27384 7.34774
261 1 -1 7.70968 6.5292 3.54779
262 1 1 4.7977 4.94239 6.24947
263 1 -1 9.24016 9.36994 6.71263
264 1 1 7.36888 8.75922 0.52403
265 1 -1 9.92895 5.87551 6.21586
266 1 1 3.86308 6.71601 9.69083
267 1 -1 8.90048 0.298719 0.573852
268 1 1 6.58753 6.67768 1.83984
269 1 -1 8.672 0.367497 2.21864
270 1 1 3.44519 3.30359 6.52249
271 1 -1 7.24717 3.25113 3.41567
272 1 1 9.53447 5.81336 1.79208
273 1 -1 1.01722 6.42534 0.715
274 1 1 3.58808 4.92392 7.00979
275 1 -1 1.21399 3.56951 6.34505
276 1 1 3.50336 0.942722 2.76989
277 1 -1 9.45475 6.06299 0.659023
278 1 1 3.44464 4.03075 6.20179
279 1 -1 0.949331 5.40183 8.51385
280 1 1 6.41118 2.62135 2.31132
281 1 -1 3.58837 9.78355 7.04966
282 1 1 9.2267 3.19593 2.10384
283 1 -1 1.83092 2.35627 3.93061
284 1 1 4.97203 4.92287 1.8049
285 1 -1 7.4097 4.757 8.604
286 1 1 0.746575 7.69038 0.89134
287 1 -1 8.54862 6.59135 2.18888
288 1 1 2.18747 4.82994 0.761718
289 1 -1 5.71622 2.51116 6.85522
290 1 1 6.95554 1.83187 8.31157
291 1 -1 7.31818 6.60081 2.63208
292 1 1 0.744495 2.73429 9.86022
293 1 -1 5.1573 8.70962 2.53418
294 1 1 2.40385 1.54057 1.9297
295 1 -1 3.42609 2.25856 2.28437
296 1 1 6.66173 3.70851 9.70052
297 1 -1 7.88966 1.4343 8.91223
298 1 1 3.91118 5.22253 6.29642
299 1 -1 9.17618 3.98313 9.82158
300 1 1 4.95424 5.93521 1.3652

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,37 @@
# Point dipoles in a 2d box
units lj
atom_style full
read_data data.NaCl
replicate 8 8 8
velocity all create 1.5 49893
neighbor 1.0 bin
neigh_modify delay 0
fix 1 all nve
# LAMMPS computes pairwise and long-range Coulombics
#pair_style coul/long 3.0
#pair_coeff * *
#kspace_style pppm 1.0e-3
# Scafacos computes entire long-range Coulombics
# use dummy pair style to perform atom sorting
pair_style zero 1.0
pair_coeff * *
#fix 2 all scafacos p3m tolerance field 0.001
kspace_style scafacos p3m 0.001
kspace_style scafacos tolerance field
timestep 0.005
thermo 10
run 100

View File

@ -0,0 +1,31 @@
units lj
atom_style charge
read_data data.cloud_wall
velocity all set 0.0 0.0 0.0 mom no
pair_style zero 1.0
pair_coeff * *
neighbor 1.0 bin
neigh_modify delay 0
fix 1 all nve
kspace_style scafacos ewald 0.001
kspace_modify scafacos tolerance field
timestep 0.005
thermo_style custom step atoms cpu temp pe ke etotal ecoul press
run_style verlet
#dump simple all custom 1000 id x y z vx vy vz
#dump dmp all custom 1000 part.dump id mol x y z vx vy vz fx fy fz q mass
#dump dmpvtk all vtk 1000 vtk/part_*.vtk id mol x y z vx vy vz fx fy fz q mass
#dump_modify dmpvtk pad 7
thermo 10
run 100

View File

@ -0,0 +1,37 @@
units lj
atom_style charge
read_data data.cloud_wall
velocity all set 0.0 0.0 0.0 mom no
pair_style zero 1.0
pair_coeff * *
neighbor 1.0 bin
neigh_modify delay 0
fix 1 all nve
#pair_style coul/long 3.0
#pair_coeff * *
#kspace_style pppm 1.0e-3
kspace_style scafacos fmm 1.0e-3
kspace_modify scafacos tolerance energy_rel
kspace_modify scafacos fmm_tuning 1
timestep 0.005
thermo_style custom step atoms cpu temp pe ke etotal ecoul press
run_style verlet
#dump simple all custom 1000 id x y z vx vy vz
#dump dmp all custom 1000 part.dump id mol x y z vx vy vz fx fy fz q mass
#dump dmpvtk all vtk 1000 vtk/part_*.vtk id mol x y z vx vy vz fx fy fz q mass
#dump_modify dmpvtk pad 7
thermo 10
run 100

View File

@ -0,0 +1,31 @@
units lj
atom_style charge
read_data data.cloud_wall
velocity all set 0.0 0.0 0.0 mom no
pair_style zero 1.0
pair_coeff * *
neighbor 1.0 bin
neigh_modify delay 0
fix 1 all nve
kspace_style scafacos p2nfft 0.001
kspace_modify scafacos tolerance field
timestep 0.005
thermo_style custom step atoms cpu temp pe ke etotal ecoul press
run_style verlet
#dump simple all custom 1000 id x y z vx vy vz
#dump dmp all custom 1000 part.dump id mol x y z vx vy vz fx fy fz q mass
#dump dmpvtk all vtk 1000 vtk/part_*.vtk id mol x y z vx vy vz fx fy fz q mass
#dump_modify dmpvtk pad 7
thermo 10
run 100

View File

@ -0,0 +1,31 @@
units lj
atom_style charge
read_data data.cloud_wall
velocity all set 0.0 0.0 0.0 mom no
pair_style zero 1.0
pair_coeff * *
neighbor 1.0 bin
neigh_modify delay 0
fix 1 all nve
kspace_style scafacos p3m 0.001
kspace_modify scafacos tolerance field
timestep 0.005
thermo_style custom step atoms cpu temp pe ke etotal ecoul press
run_style verlet
#dump simple all custom 1000 id x y z vx vy vz
#dump dmp all custom 1000 part.dump id mol x y z vx vy vz fx fy fz q mass
#dump dmpvtk all vtk 1000 vtk/part_*.vtk id mol x y z vx vy vz fx fy fz q mass
#dump_modify dmpvtk pad 7
thermo 10
run 100

View File

@ -0,0 +1,37 @@
# Point dipoles in a 2d box
units lj
atom_style charge
read_data data.NaCl
replicate 8 8 8
velocity all create 1.5 49893
neighbor 1.0 bin
neigh_modify delay 0
fix 1 all nve
# LAMMPS computes pairwise and long-range Coulombics
#pair_style coul/long 3.0
#pair_coeff * *
#kspace_style pppm 1.0e-3
# Scafacos computes entire long-range Coulombics
# use dummy pair style to perform atom sorting
pair_style zero 1.0
pair_coeff * *
#fix 2 all scafacos p3m tolerance field 0.001
kspace_style scafacos ewald 0.001
kspace_modify scafacos tolerance field
timestep 0.005
thermo 10
run 100

View File

@ -0,0 +1,37 @@
# Point dipoles in a 2d box
units lj
atom_style charge
read_data data.NaCl
replicate 8 8 8
velocity all create 1.5 49893
neighbor 1.0 bin
neigh_modify delay 0
fix 1 all nve
# LAMMPS computes pairwise and long-range Coulombics
#pair_style coul/long 3.0
#pair_coeff * *
#kspace_style pppm 1.0e-3
# Scafacos computes entire long-range Coulombics
# use dummy pair style to perform atom sorting
pair_style zero 1.0
pair_coeff * *
#fix 2 all scafacos p3m tolerance field 0.001
kspace_style scafacos fmm 0.001
kspace_modify scafacos tolerance energy
timestep 0.005
thermo 10
run 100

View File

@ -0,0 +1,34 @@
# Point dipoles in a 2d box
units lj
atom_style charge
read_data data.hammersley_sphere
change_box all boundary f f f
velocity all create 1.5 49893
neighbor 1.0 bin
neigh_modify delay 0
fix 1 all nve
# LAMMPS computes pairwise and long-range Coulombics
#pair_style coul/long 3.0
#pair_coeff * *
#kspace_style pppm 1.0e-3
# Scafacos computes entire long-range Coulombics
# use dummy pair style to perform atom sorting
pair_style zero 1.0
pair_coeff * *
#fix 2 all scafacos p3m tolerance field 0.001
kspace_style scafacos direct 0.001
timestep 0.005
thermo 1
run 20

View File

@ -0,0 +1,37 @@
# Point dipoles in a 2d box
units lj
atom_style charge
read_data data.hammersley_sphere
change_box all boundary f f f
velocity all create 1.5 49893
neighbor 1.0 bin
neigh_modify delay 0
fix 1 all nve
# LAMMPS computes pairwise and long-range Coulombics
#pair_style coul/long 3.0
#pair_coeff * *
#kspace_style pppm 1.0e-3
# Scafacos computes entire long-range Coulombics
# use dummy pair style to perform atom sorting
pair_style zero 1.0
pair_coeff * *
#fix 2 all scafacos p3m tolerance field 0.001
kspace_style scafacos fmm 0.001
kspace_modify scafacos tolerance energy_rel
kspace_modify scafacos fmm_tuning 1
timestep 0.005
thermo 1
run 20

View File

@ -0,0 +1,36 @@
# Point dipoles in a 2d box
units lj
atom_style charge
read_data data.hammersley_sphere
change_box all boundary f f f
velocity all create 1.5 49893
neighbor 1.0 bin
neigh_modify delay 0
fix 1 all nve
# LAMMPS computes pairwise and long-range Coulombics
#pair_style coul/long 3.0
#pair_coeff * *
#kspace_style pppm 1.0e-3
# Scafacos computes entire long-range Coulombics
# use dummy pair style to perform atom sorting
pair_style zero 1.0
pair_coeff * *
#fix 2 all scafacos p3m tolerance field 0.001
kspace_style scafacos p2nfft 0.001
kspace_modify scafacos tolerance potential
timestep 0.005
thermo 1
run 20

View File

@ -0,0 +1,37 @@
# Point dipoles in a 2d box
units lj
atom_style charge
read_data data.NaCl
replicate 8 8 8
velocity all create 1.5 49893
neighbor 1.0 bin
neigh_modify delay 0
fix 1 all nve
# LAMMPS computes pairwise and long-range Coulombics
#pair_style coul/long 3.0
#pair_coeff * *
#kspace_style pppm 1.0e-3
# Scafacos computes entire long-range Coulombics
# use dummy pair style to perform atom sorting
pair_style zero 1.0
pair_coeff * *
#fix 2 all scafacos p3m tolerance field 0.001
kspace_style scafacos p2nfft 0.001
kspace_modify scafacos tolerance field
timestep 0.005
thermo 10
run 100

View File

@ -0,0 +1,37 @@
# Point dipoles in a 2d box
units lj
atom_style charge
read_data data.NaCl
replicate 8 8 8
velocity all create 1.5 49893
neighbor 1.0 bin
neigh_modify delay 0
fix 1 all nve
# LAMMPS computes pairwise and long-range Coulombics
#pair_style coul/long 3.0
#pair_coeff * *
#kspace_style pppm 1.0e-3
# Scafacos computes entire long-range Coulombics
# use dummy pair style to perform atom sorting
pair_style zero 1.0
pair_coeff * *
#fix 2 all scafacos p3m tolerance field 0.001
kspace_style scafacos p3m 0.001
kspace_modify scafacos tolerance field
timestep 0.005
thermo 10
run 100

View File

@ -0,0 +1,92 @@
LAMMPS (2 Aug 2018)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (src/comm.cpp:87)
using 1 OpenMP thread(s) per MPI task
units lj
atom_style charge
read_data data.cloud_wall
orthogonal box = (0 0 0) to (10 10 10)
2 by 2 by 4 MPI processor grid
reading atoms ...
300 atoms
velocity all set 0.0 0.0 0.0 mom no
pair_style zero 1.0
pair_coeff * *
neighbor 1.0 bin
neigh_modify delay 0
fix 1 all nve
kspace_style scafacos ewald 0.001
kspace_modify scafacos tolerance field
timestep 0.005
thermo_style custom step atoms cpu temp pe ke etotal ecoul press
run_style verlet
#dump simple all custom 1000 id x y z vx vy vz
#dump dmp all custom 1000 part.dump id mol x y z vx vy vz fx fy fz q mass
#dump dmpvtk all vtk 1000 vtk/part_*.vtk id mol x y z vx vy vz fx fy fz q mass
#dump_modify dmpvtk pad 7
thermo 10
run 100
Setting up ScaFaCoS with solver ewald ...
Neighbor list info ...
update every 1 steps, delay 0 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 2
ghost atom cutoff = 2
binsize = 1, bins = 10 10 10
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair zero, perpetual
attributes: half, newton on
pair build: half/bin/atomonly/newton
stencil: half/bin/3d/newton
bin: standard
Per MPI rank memory allocation (min/avg/max) = 3.313 | 3.501 | 3.689 Mbytes
Step Atoms CPU Temp PotEng KinEng TotEng E_coul Press
0 300 0 0 0.49647271 0 0.49647271 0 0
10 300 0.051135063 0.014582562 0.44286522 0.02180093 0.46466616 0 0.0043601861
20 300 0.10210872 0.058693359 0.37869251 0.087746571 0.46643909 0 0.017549314
30 300 0.15278506 0.13468789 0.26730177 0.2013584 0.46866017 0 0.040271679
40 300 0.19430375 0.50949535 0.083356437 0.76169555 0.84505198 0 0.15233911
50 300 0.23220921 1.1731116 -0.055261984 1.7538018 1.6985399 0 0.35076037
60 300 0.27002859 1.3589639 -0.33351524 2.031651 1.6981358 0 0.4063302
70 300 0.30781388 1.6482648 -0.76570045 2.4641559 1.6984554 0 0.49283118
80 300 0.34566283 2.8640899 -2.4038488 4.2818144 1.8779656 0 0.85636288
90 300 0.38424087 93.168442 -2.5911448 139.28682 136.69568 0 27.857364
100 300 0.42331123 94.146897 -1.3480439 140.74961 139.40157 0 28.149922
Loop time of 0.423331 on 16 procs for 100 steps with 300 atoms
Performance: 102047.913 tau/day, 236.222 timesteps/s
99.2% CPU use with 16 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 2.5988e-05 | 3.6508e-05 | 5.5075e-05 | 0.0 | 0.01
Kspace | 0.41852 | 0.41925 | 0.41976 | 0.1 | 99.04
Neigh | 0.00023413 | 0.00056887 | 0.0012875 | 0.0 | 0.13
Comm | 0.0019519 | 0.0022772 | 0.0027158 | 0.5 | 0.54
Output | 0.00028276 | 0.00030752 | 0.0003624 | 0.0 | 0.07
Modify | 8.3685e-05 | 0.0001286 | 0.00018764 | 0.0 | 0.03
Other | | 0.000758 | | | 0.18
Nlocal: 18.75 ave 39 max 6 min
Histogram: 6 1 1 0 1 2 2 1 1 1
Nghost: 122.812 ave 195 max 63 min
Histogram: 8 0 0 0 0 0 0 1 3 4
Neighs: 160.625 ave 598 max 13 min
Histogram: 8 2 1 1 1 0 0 2 0 1
Total # of neighbors = 2570
Ave neighs/atom = 8.56667
Neighbor list builds = 23
Dangerous builds = 16
Total wall time: 0:00:00

View File

@ -0,0 +1,92 @@
LAMMPS (2 Aug 2018)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (src/comm.cpp:87)
using 1 OpenMP thread(s) per MPI task
units lj
atom_style charge
read_data data.cloud_wall
orthogonal box = (0 0 0) to (10 10 10)
2 by 2 by 2 MPI processor grid
reading atoms ...
300 atoms
velocity all set 0.0 0.0 0.0 mom no
pair_style zero 1.0
pair_coeff * *
neighbor 1.0 bin
neigh_modify delay 0
fix 1 all nve
kspace_style scafacos ewald 0.001
kspace_modify scafacos tolerance field
timestep 0.005
thermo_style custom step atoms cpu temp pe ke etotal ecoul press
run_style verlet
#dump simple all custom 1000 id x y z vx vy vz
#dump dmp all custom 1000 part.dump id mol x y z vx vy vz fx fy fz q mass
#dump dmpvtk all vtk 1000 vtk/part_*.vtk id mol x y z vx vy vz fx fy fz q mass
#dump_modify dmpvtk pad 7
thermo 10
run 100
Setting up ScaFaCoS with solver ewald ...
Neighbor list info ...
update every 1 steps, delay 0 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 2
ghost atom cutoff = 2
binsize = 1, bins = 10 10 10
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair zero, perpetual
attributes: half, newton on
pair build: half/bin/atomonly/newton
stencil: half/bin/3d/newton
bin: standard
Per MPI rank memory allocation (min/avg/max) = 3.317 | 3.317 | 3.317 Mbytes
Step Atoms CPU Temp PotEng KinEng TotEng E_coul Press
0 300 0 0 0.49647271 0 0.49647271 0 0
10 300 0.057411432 0.014718629 0.45088339 0.02200435 0.47288774 0 0.00440087
20 300 0.11482716 0.05922597 0.38470912 0.088542825 0.47325194 0 0.017708565
30 300 0.17278481 0.13587829 0.27058048 0.20313804 0.47371852 0 0.040627608
40 300 0.23021507 0.51353118 0.088432648 0.76772911 0.85616176 0 0.15354582
50 300 0.28812647 1.1760001 -0.058088247 1.7581201 1.7000319 0 0.35162403
60 300 0.34651113 1.3627885 -0.33736672 2.0373688 1.7000021 0 0.40747376
70 300 0.40509939 1.6529365 -0.77082139 2.4711401 1.7003187 0 0.49422802
80 300 0.46342874 2.9569837 -2.4624654 4.4206907 1.9582253 0 0.88413814
90 300 0.52329254 81.642726 -2.5370215 122.05588 119.51885 0 24.411175
100 300 0.58335209 85.047974 -1.128107 127.14672 126.01861 0 25.429344
Loop time of 0.583369 on 8 procs for 100 steps with 300 atoms
Performance: 74052.598 tau/day, 171.418 timesteps/s
99.7% CPU use with 8 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 4.0531e-05 | 4.6492e-05 | 4.8876e-05 | 0.0 | 0.01
Kspace | 0.57805 | 0.5785 | 0.57893 | 0.0 | 99.17
Neigh | 0.00062275 | 0.00091892 | 0.0013313 | 0.0 | 0.16
Comm | 0.002604 | 0.0028289 | 0.0031538 | 0.3 | 0.48
Output | 0.0002265 | 0.0002434 | 0.00029039 | 0.0 | 0.04
Modify | 0.00016117 | 0.00017747 | 0.00019884 | 0.0 | 0.03
Other | | 0.00065 | | | 0.11
Nlocal: 37.5 ave 46 max 31 min
Histogram: 2 0 0 2 1 0 2 0 0 1
Nghost: 203.875 ave 212 max 192 min
Histogram: 1 0 1 0 0 2 1 0 0 3
Neighs: 321.625 ave 599 max 112 min
Histogram: 1 2 0 1 1 0 1 1 0 1
Total # of neighbors = 2573
Ave neighs/atom = 8.57667
Neighbor list builds = 23
Dangerous builds = 16
Total wall time: 0:00:00

View File

@ -0,0 +1,99 @@
LAMMPS (2 Aug 2018)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (src/comm.cpp:87)
using 1 OpenMP thread(s) per MPI task
units lj
atom_style charge
read_data data.cloud_wall
orthogonal box = (0 0 0) to (10 10 10)
1 by 1 by 1 MPI processor grid
reading atoms ...
300 atoms
velocity all set 0.0 0.0 0.0 mom no
pair_style zero 1.0
pair_coeff * *
neighbor 1.0 bin
neigh_modify delay 0
fix 1 all nve
#pair_style coul/long 3.0
#pair_coeff * *
#kspace_style pppm 1.0e-3
kspace_style scafacos fmm 1.0e-3
kspace_modify scafacos tolerance energy_rel
kspace_modify scafacos fmm_tuning 1
ScaFaCoS setting fmm inhomogen tuning ...
timestep 0.005
thermo_style custom step atoms cpu temp pe ke etotal ecoul press
run_style verlet
#dump simple all custom 1000 id x y z vx vy vz
#dump dmp all custom 1000 part.dump id mol x y z vx vy vz fx fy fz q mass
#dump dmpvtk all vtk 1000 vtk/part_*.vtk id mol x y z vx vy vz fx fy fz q mass
#dump_modify dmpvtk pad 7
thermo 10
run 100
Setting up ScaFaCoS with solver fmm ...
Neighbor list info ...
update every 1 steps, delay 0 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 2
ghost atom cutoff = 2
binsize = 1, bins = 10 10 10
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair zero, perpetual
attributes: half, newton on
pair build: half/bin/atomonly/newton
stencil: half/bin/3d/newton
bin: standard
Per MPI rank memory allocation (min/avg/max) = 3.34 | 3.34 | 3.34 Mbytes
Step Atoms CPU Temp PotEng KinEng TotEng E_coul Press
0 300 0 0 0.49646402 0 0.49646402 0 0.016548801
10 300 0.063865185 0.015455559 0.47335833 0.02310606 0.49646439 0 0.020399823
20 300 0.12760854 0.06229069 0.40334177 0.093124582 0.49646635 0 0.032069642
30 300 0.19143319 0.14310163 0.28254277 0.21393694 0.49647971 0 0.05220548
40 300 0.25553131 0.52929788 0.089669015 0.79130033 0.88096934 0 0.16124903
50 300 0.31961966 1.1963022 -0.082792461 1.7884718 1.7056794 0 0.35493462
60 300 0.38388991 1.3928167 -0.37659239 2.082261 1.7056686 0 0.40389911
70 300 0.44797421 1.7069009 -0.84571914 2.5518169 1.7060978 0 0.48217274
80 300 0.50961447 15.358343 -3.368063 22.960722 19.592659 0 4.4798757
90 300 0.57181501 42.280432 -2.1623864 63.209247 61.04686 0 12.56977
100 300 0.63501096 41.48079 -0.89904529 62.013782 61.114736 0 12.372788
Loop time of 0.635022 on 1 procs for 100 steps with 300 atoms
Performance: 68029.122 tau/day, 157.475 timesteps/s
99.7% CPU use with 1 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 9.0837e-05 | 9.0837e-05 | 9.0837e-05 | 0.0 | 0.01
Kspace | 0.62877 | 0.62877 | 0.62877 | 0.0 | 99.01
Neigh | 0.0035319 | 0.0035319 | 0.0035319 | 0.0 | 0.56
Comm | 0.0010211 | 0.0010211 | 0.0010211 | 0.0 | 0.16
Output | 0.00014758 | 0.00014758 | 0.00014758 | 0.0 | 0.02
Modify | 0.0010428 | 0.0010428 | 0.0010428 | 0.0 | 0.16
Other | | 0.0004218 | | | 0.07
Nlocal: 300 ave 300 max 300 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 374 ave 374 max 374 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 2459 ave 2459 max 2459 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 2459
Ave neighs/atom = 8.19667
Neighbor list builds = 15
Dangerous builds = 0
Total wall time: 0:00:00

View File

@ -0,0 +1,99 @@
LAMMPS (2 Aug 2018)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (src/comm.cpp:87)
using 1 OpenMP thread(s) per MPI task
units lj
atom_style charge
read_data data.cloud_wall
orthogonal box = (0 0 0) to (10 10 10)
2 by 2 by 4 MPI processor grid
reading atoms ...
300 atoms
velocity all set 0.0 0.0 0.0 mom no
pair_style zero 1.0
pair_coeff * *
neighbor 1.0 bin
neigh_modify delay 0
fix 1 all nve
#pair_style coul/long 3.0
#pair_coeff * *
#kspace_style pppm 1.0e-3
kspace_style scafacos fmm 1.0e-3
kspace_modify scafacos tolerance energy_rel
kspace_modify scafacos fmm_tuning 1
ScaFaCoS setting fmm inhomogen tuning ...
timestep 0.005
thermo_style custom step atoms cpu temp pe ke etotal ecoul press
run_style verlet
#dump simple all custom 1000 id x y z vx vy vz
#dump dmp all custom 1000 part.dump id mol x y z vx vy vz fx fy fz q mass
#dump dmpvtk all vtk 1000 vtk/part_*.vtk id mol x y z vx vy vz fx fy fz q mass
#dump_modify dmpvtk pad 7
thermo 10
run 100
Setting up ScaFaCoS with solver fmm ...
Neighbor list info ...
update every 1 steps, delay 0 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 2
ghost atom cutoff = 2
binsize = 1, bins = 10 10 10
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair zero, perpetual
attributes: half, newton on
pair build: half/bin/atomonly/newton
stencil: half/bin/3d/newton
bin: standard
Per MPI rank memory allocation (min/avg/max) = 3.313 | 3.501 | 3.689 Mbytes
Step Atoms CPU Temp PotEng KinEng TotEng E_coul Press
0 300 0 0 0.49646402 0 0.49646402 0 0.016548801
10 300 0.023007393 0.015455559 0.47335833 0.02310606 0.49646439 0 0.020399823
20 300 0.045746088 0.06229069 0.40334177 0.093124582 0.49646635 0 0.032069642
30 300 0.068123341 0.14310163 0.28254277 0.21393694 0.49647971 0 0.05220548
40 300 0.090359211 0.52929788 0.089669015 0.79130033 0.88096934 0 0.16124903
50 300 0.11304998 1.1963022 -0.082792461 1.7884718 1.7056794 0 0.35493462
60 300 0.13585806 1.3928167 -0.37659239 2.082261 1.7056686 0 0.40389911
70 300 0.15867376 1.7069009 -0.84571914 2.5518169 1.7060978 0 0.48217274
80 300 0.18324137 15.358343 -3.368063 22.960722 19.592659 0 4.4798757
90 300 0.20960689 42.280432 -2.1623864 63.209247 61.04686 0 12.56977
100 300 0.23539281 41.48079 -0.89904529 62.013782 61.114736 0 12.372788
Loop time of 0.235411 on 16 procs for 100 steps with 300 atoms
Performance: 183509.107 tau/day, 424.790 timesteps/s
97.9% CPU use with 16 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 3.2425e-05 | 4.4718e-05 | 6.1274e-05 | 0.0 | 0.02
Kspace | 0.23097 | 0.23143 | 0.2318 | 0.1 | 98.31
Neigh | 0.00015116 | 0.00035347 | 0.00075746 | 0.0 | 0.15
Comm | 0.0020316 | 0.002282 | 0.0025339 | 0.3 | 0.97
Output | 0.00034404 | 0.00037053 | 0.00042701 | 0.0 | 0.16
Modify | 9.3937e-05 | 0.00014532 | 0.00018811 | 0.0 | 0.06
Other | | 0.0007878 | | | 0.33
Nlocal: 18.75 ave 36 max 6 min
Histogram: 4 3 1 0 0 1 2 1 2 2
Nghost: 127 ave 196 max 71 min
Histogram: 8 0 0 0 0 0 0 1 6 1
Neighs: 153.688 ave 491 max 10 min
Histogram: 8 1 1 1 1 1 0 0 0 3
Total # of neighbors = 2459
Ave neighs/atom = 8.19667
Neighbor list builds = 15
Dangerous builds = 0
Total wall time: 0:00:01

View File

@ -0,0 +1,99 @@
LAMMPS (2 Aug 2018)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (src/comm.cpp:87)
using 1 OpenMP thread(s) per MPI task
units lj
atom_style charge
read_data data.cloud_wall
orthogonal box = (0 0 0) to (10 10 10)
1 by 1 by 2 MPI processor grid
reading atoms ...
300 atoms
velocity all set 0.0 0.0 0.0 mom no
pair_style zero 1.0
pair_coeff * *
neighbor 1.0 bin
neigh_modify delay 0
fix 1 all nve
#pair_style coul/long 3.0
#pair_coeff * *
#kspace_style pppm 1.0e-3
kspace_style scafacos fmm 1.0e-3
kspace_modify scafacos tolerance energy_rel
kspace_modify scafacos fmm_tuning 1
ScaFaCoS setting fmm inhomogen tuning ...
timestep 0.005
thermo_style custom step atoms cpu temp pe ke etotal ecoul press
run_style verlet
#dump simple all custom 1000 id x y z vx vy vz
#dump dmp all custom 1000 part.dump id mol x y z vx vy vz fx fy fz q mass
#dump dmpvtk all vtk 1000 vtk/part_*.vtk id mol x y z vx vy vz fx fy fz q mass
#dump_modify dmpvtk pad 7
thermo 10
run 100
Setting up ScaFaCoS with solver fmm ...
Neighbor list info ...
update every 1 steps, delay 0 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 2
ghost atom cutoff = 2
binsize = 1, bins = 10 10 10
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair zero, perpetual
attributes: half, newton on
pair build: half/bin/atomonly/newton
stencil: half/bin/3d/newton
bin: standard
Per MPI rank memory allocation (min/avg/max) = 3.354 | 3.354 | 3.355 Mbytes
Step Atoms CPU Temp PotEng KinEng TotEng E_coul Press
0 300 0 0 0.49646402 0 0.49646402 0 0.016548801
10 300 0.038181543 0.015455559 0.47335833 0.02310606 0.49646439 0 0.020399823
20 300 0.076276302 0.06229069 0.40334177 0.093124582 0.49646635 0 0.032069642
30 300 0.11437607 0.14310163 0.28254277 0.21393694 0.49647971 0 0.05220548
40 300 0.15244293 0.52929788 0.089669015 0.79130033 0.88096934 0 0.16124903
50 300 0.19081283 1.1963022 -0.082792461 1.7884718 1.7056794 0 0.35493462
60 300 0.22923493 1.3928167 -0.37659239 2.082261 1.7056686 0 0.40389911
70 300 0.26754427 1.7069009 -0.84571914 2.5518169 1.7060978 0 0.48217274
80 300 0.30721259 15.358343 -3.368063 22.960722 19.592659 0 4.4798757
90 300 0.34865618 42.280432 -2.1623864 63.209247 61.04686 0 12.56977
100 300 0.39100981 41.48079 -0.89904529 62.013782 61.114736 0 12.372788
Loop time of 0.391022 on 2 procs for 100 steps with 300 atoms
Performance: 110479.760 tau/day, 255.740 timesteps/s
99.6% CPU use with 2 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 8.0109e-05 | 8.1539e-05 | 8.297e-05 | 0.0 | 0.02
Kspace | 0.38534 | 0.38582 | 0.3863 | 0.1 | 98.67
Neigh | 0.0014851 | 0.0019699 | 0.0024548 | 1.1 | 0.50
Comm | 0.0019314 | 0.0020101 | 0.0020888 | 0.2 | 0.51
Output | 0.00014496 | 0.00017297 | 0.00020099 | 0.0 | 0.04
Modify | 0.0005033 | 0.00052273 | 0.00054216 | 0.0 | 0.13
Other | | 0.0004461 | | | 0.11
Nlocal: 150 ave 159 max 141 min
Histogram: 1 0 0 0 0 0 0 0 0 1
Nghost: 392 ave 395 max 389 min
Histogram: 1 0 0 0 0 0 0 0 0 1
Neighs: 1229.5 ave 1773 max 686 min
Histogram: 1 0 0 0 0 0 0 0 0 1
Total # of neighbors = 2459
Ave neighs/atom = 8.19667
Neighbor list builds = 15
Dangerous builds = 0
Total wall time: 0:00:01

View File

@ -0,0 +1,99 @@
LAMMPS (2 Aug 2018)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (src/comm.cpp:87)
using 1 OpenMP thread(s) per MPI task
units lj
atom_style charge
read_data data.cloud_wall
orthogonal box = (0 0 0) to (10 10 10)
1 by 2 by 2 MPI processor grid
reading atoms ...
300 atoms
velocity all set 0.0 0.0 0.0 mom no
pair_style zero 1.0
pair_coeff * *
neighbor 1.0 bin
neigh_modify delay 0
fix 1 all nve
#pair_style coul/long 3.0
#pair_coeff * *
#kspace_style pppm 1.0e-3
kspace_style scafacos fmm 1.0e-3
kspace_modify scafacos tolerance energy_rel
kspace_modify scafacos fmm_tuning 1
ScaFaCoS setting fmm inhomogen tuning ...
timestep 0.005
thermo_style custom step atoms cpu temp pe ke etotal ecoul press
run_style verlet
#dump simple all custom 1000 id x y z vx vy vz
#dump dmp all custom 1000 part.dump id mol x y z vx vy vz fx fy fz q mass
#dump dmpvtk all vtk 1000 vtk/part_*.vtk id mol x y z vx vy vz fx fy fz q mass
#dump_modify dmpvtk pad 7
thermo 10
run 100
Setting up ScaFaCoS with solver fmm ...
Neighbor list info ...
update every 1 steps, delay 0 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 2
ghost atom cutoff = 2
binsize = 1, bins = 10 10 10
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair zero, perpetual
attributes: half, newton on
pair build: half/bin/atomonly/newton
stencil: half/bin/3d/newton
bin: standard
Per MPI rank memory allocation (min/avg/max) = 3.333 | 3.333 | 3.333 Mbytes
Step Atoms CPU Temp PotEng KinEng TotEng E_coul Press
0 300 0 0 0.49646402 0 0.49646402 0 0.016548801
10 300 0.029414415 0.015455559 0.47335833 0.02310606 0.49646439 0 0.020399823
20 300 0.058616877 0.06229069 0.40334177 0.093124582 0.49646635 0 0.032069642
30 300 0.087769508 0.14310163 0.28254277 0.21393694 0.49647971 0 0.05220548
40 300 0.1168611 0.52929788 0.089669015 0.79130033 0.88096934 0 0.16124903
50 300 0.14482284 1.1963022 -0.082792461 1.7884718 1.7056794 0 0.35493462
60 300 0.17198443 1.3928167 -0.37659239 2.082261 1.7056686 0 0.40389911
70 300 0.19868851 1.7069009 -0.84571914 2.5518169 1.7060978 0 0.48217274
80 300 0.22835517 15.358343 -3.368063 22.960722 19.592659 0 4.4798757
90 300 0.26023602 42.280432 -2.1623864 63.209247 61.04686 0 12.56977
100 300 0.29043221 41.48079 -0.89904529 62.013782 61.114736 0 12.372788
Loop time of 0.290448 on 4 procs for 100 steps with 300 atoms
Performance: 148735.741 tau/day, 344.296 timesteps/s
99.0% CPU use with 4 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 5.9605e-05 | 6.187e-05 | 6.4135e-05 | 0.0 | 0.02
Kspace | 0.28551 | 0.28584 | 0.28604 | 0.0 | 98.41
Neigh | 0.00077796 | 0.0010615 | 0.0013225 | 0.7 | 0.37
Comm | 0.002372 | 0.0024325 | 0.002497 | 0.1 | 0.84
Output | 0.00025368 | 0.0002659 | 0.00029516 | 0.0 | 0.09
Modify | 0.00030279 | 0.00031865 | 0.00033021 | 0.0 | 0.11
Other | | 0.0004706 | | | 0.16
Nlocal: 75 ave 81 max 70 min
Histogram: 2 0 0 0 0 0 0 1 0 1
Nghost: 282.5 ave 290 max 274 min
Histogram: 1 0 0 1 0 0 0 0 1 1
Neighs: 614.75 ave 981 max 285 min
Histogram: 1 1 0 0 0 0 0 1 0 1
Total # of neighbors = 2459
Ave neighs/atom = 8.19667
Neighbor list builds = 15
Dangerous builds = 0
Total wall time: 0:00:01

View File

@ -0,0 +1,99 @@
LAMMPS (2 Aug 2018)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (src/comm.cpp:87)
using 1 OpenMP thread(s) per MPI task
units lj
atom_style charge
read_data data.cloud_wall
orthogonal box = (0 0 0) to (10 10 10)
2 by 2 by 2 MPI processor grid
reading atoms ...
300 atoms
velocity all set 0.0 0.0 0.0 mom no
pair_style zero 1.0
pair_coeff * *
neighbor 1.0 bin
neigh_modify delay 0
fix 1 all nve
#pair_style coul/long 3.0
#pair_coeff * *
#kspace_style pppm 1.0e-3
kspace_style scafacos fmm 1.0e-3
kspace_modify scafacos tolerance energy_rel
kspace_modify scafacos fmm_tuning 1
ScaFaCoS setting fmm inhomogen tuning ...
timestep 0.005
thermo_style custom step atoms cpu temp pe ke etotal ecoul press
run_style verlet
#dump simple all custom 1000 id x y z vx vy vz
#dump dmp all custom 1000 part.dump id mol x y z vx vy vz fx fy fz q mass
#dump dmpvtk all vtk 1000 vtk/part_*.vtk id mol x y z vx vy vz fx fy fz q mass
#dump_modify dmpvtk pad 7
thermo 10
run 100
Setting up ScaFaCoS with solver fmm ...
Neighbor list info ...
update every 1 steps, delay 0 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 2
ghost atom cutoff = 2
binsize = 1, bins = 10 10 10
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair zero, perpetual
attributes: half, newton on
pair build: half/bin/atomonly/newton
stencil: half/bin/3d/newton
bin: standard
Per MPI rank memory allocation (min/avg/max) = 3.317 | 3.317 | 3.317 Mbytes
Step Atoms CPU Temp PotEng KinEng TotEng E_coul Press
0 300 0 0 0.49646402 0 0.49646402 0 0.016548801
10 300 0.026465416 0.015455559 0.47335833 0.02310606 0.49646439 0 0.020399823
20 300 0.057377338 0.06229069 0.40334177 0.093124582 0.49646635 0 0.032069642
30 300 0.088356495 0.14310163 0.28254277 0.21393694 0.49647971 0 0.05220548
40 300 0.11900806 0.52929788 0.089669015 0.79130033 0.88096934 0 0.16124903
50 300 0.15157914 1.1963022 -0.082792461 1.7884718 1.7056794 0 0.35493462
60 300 0.18608141 1.3928167 -0.37659239 2.082261 1.7056686 0 0.40389911
70 300 0.21956491 1.7069009 -0.84571914 2.5518169 1.7060978 0 0.48217274
80 300 0.24269128 15.358343 -3.368063 22.960722 19.592659 0 4.4798757
90 300 0.26847005 42.280432 -2.1623864 63.209247 61.04686 0 12.56977
100 300 0.29283834 41.48079 -0.89904529 62.013782 61.114736 0 12.372788
Loop time of 0.292855 on 8 procs for 100 steps with 300 atoms
Performance: 147513.337 tau/day, 341.466 timesteps/s
98.4% CPU use with 8 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 4.7207e-05 | 5.5045e-05 | 6.9618e-05 | 0.0 | 0.02
Kspace | 0.28739 | 0.28773 | 0.2881 | 0.0 | 98.25
Neigh | 0.00040698 | 0.00060901 | 0.00082922 | 0.0 | 0.21
Comm | 0.0029533 | 0.0031788 | 0.0034056 | 0.3 | 1.09
Output | 0.00029063 | 0.00030866 | 0.00035119 | 0.0 | 0.11
Modify | 0.00018978 | 0.00022188 | 0.00026703 | 0.0 | 0.08
Other | | 0.0007486 | | | 0.26
Nlocal: 37.5 ave 45 max 31 min
Histogram: 1 1 1 1 1 0 1 0 1 1
Nghost: 200 ave 209 max 189 min
Histogram: 1 0 0 0 1 4 0 0 0 2
Neighs: 307.375 ave 514 max 115 min
Histogram: 2 1 0 1 1 0 0 0 1 2
Total # of neighbors = 2459
Ave neighs/atom = 8.19667
Neighbor list builds = 15
Dangerous builds = 0
Total wall time: 0:00:01

View File

@ -0,0 +1,92 @@
LAMMPS (2 Aug 2018)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (src/comm.cpp:87)
using 1 OpenMP thread(s) per MPI task
units lj
atom_style charge
read_data data.cloud_wall
orthogonal box = (0 0 0) to (10 10 10)
1 by 1 by 1 MPI processor grid
reading atoms ...
300 atoms
velocity all set 0.0 0.0 0.0 mom no
pair_style zero 1.0
pair_coeff * *
neighbor 1.0 bin
neigh_modify delay 0
fix 1 all nve
kspace_style scafacos p2nfft 0.001
kspace_modify scafacos tolerance field
timestep 0.005
thermo_style custom step atoms cpu temp pe ke etotal ecoul press
run_style verlet
#dump simple all custom 1000 id x y z vx vy vz
#dump dmp all custom 1000 part.dump id mol x y z vx vy vz fx fy fz q mass
#dump dmpvtk all vtk 1000 vtk/part_*.vtk id mol x y z vx vy vz fx fy fz q mass
#dump_modify dmpvtk pad 7
thermo 10
run 100
Setting up ScaFaCoS with solver p2nfft ...
Neighbor list info ...
update every 1 steps, delay 0 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 2
ghost atom cutoff = 2
binsize = 1, bins = 10 10 10
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair zero, perpetual
attributes: half, newton on
pair build: half/bin/atomonly/newton
stencil: half/bin/3d/newton
bin: standard
Per MPI rank memory allocation (min/avg/max) = 3.34 | 3.34 | 3.34 Mbytes
Step Atoms CPU Temp PotEng KinEng TotEng E_coul Press
0 300 0 0 0.49683273 0 0.49683273 0 0.016561091
10 300 0.071435928 0.015479312 0.47369009 0.023141571 0.49683166 0 0.020417984
20 300 0.14302707 0.062386358 0.40356181 0.093267605 0.49682941 0 0.032105581
30 300 0.21480989 0.14331637 0.2825636 0.21425798 0.49682157 0 0.052270382
40 300 0.28638172 0.53041843 0.089505208 0.79297556 0.88248077 0 0.16157862
50 300 0.35810781 1.1948397 -0.083317439 1.7862853 1.7029679 0 0.35447982
60 300 0.42993116 1.3915614 -0.37745551 2.0803842 1.7029287 0 0.40349499
70 300 0.50181961 1.7061978 -0.84746071 2.5507657 1.703305 0 0.48190445
80 300 0.57404566 20.692093 -3.32971 30.93468 27.60497 0 6.0759456
90 300 0.64724708 48.999403 -2.1632167 73.254107 71.090891 0 14.578714
100 300 0.72128963 51.199785 -0.81127924 76.543678 75.732399 0 15.281693
Loop time of 0.721302 on 1 procs for 100 steps with 300 atoms
Performance: 59891.733 tau/day, 138.638 timesteps/s
100.0% CPU use with 1 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 8.893e-05 | 8.893e-05 | 8.893e-05 | 0.0 | 0.01
Kspace | 0.71502 | 0.71502 | 0.71502 | 0.0 | 99.13
Neigh | 0.0035415 | 0.0035415 | 0.0035415 | 0.0 | 0.49
Comm | 0.001024 | 0.001024 | 0.001024 | 0.0 | 0.14
Output | 0.00015044 | 0.00015044 | 0.00015044 | 0.0 | 0.02
Modify | 0.0010409 | 0.0010409 | 0.0010409 | 0.0 | 0.14
Other | | 0.0004385 | | | 0.06
Nlocal: 300 ave 300 max 300 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 381 ave 381 max 381 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 2461 ave 2461 max 2461 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 2461
Ave neighs/atom = 8.20333
Neighbor list builds = 15
Dangerous builds = 0
Total wall time: 0:00:00

View File

@ -0,0 +1,92 @@
LAMMPS (2 Aug 2018)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (src/comm.cpp:87)
using 1 OpenMP thread(s) per MPI task
units lj
atom_style charge
read_data data.cloud_wall
orthogonal box = (0 0 0) to (10 10 10)
2 by 2 by 4 MPI processor grid
reading atoms ...
300 atoms
velocity all set 0.0 0.0 0.0 mom no
pair_style zero 1.0
pair_coeff * *
neighbor 1.0 bin
neigh_modify delay 0
fix 1 all nve
kspace_style scafacos p2nfft 0.001
kspace_modify scafacos tolerance field
timestep 0.005
thermo_style custom step atoms cpu temp pe ke etotal ecoul press
run_style verlet
#dump simple all custom 1000 id x y z vx vy vz
#dump dmp all custom 1000 part.dump id mol x y z vx vy vz fx fy fz q mass
#dump dmpvtk all vtk 1000 vtk/part_*.vtk id mol x y z vx vy vz fx fy fz q mass
#dump_modify dmpvtk pad 7
thermo 10
run 100
Setting up ScaFaCoS with solver p2nfft ...
Neighbor list info ...
update every 1 steps, delay 0 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 2
ghost atom cutoff = 2
binsize = 1, bins = 10 10 10
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair zero, perpetual
attributes: half, newton on
pair build: half/bin/atomonly/newton
stencil: half/bin/3d/newton
bin: standard
Per MPI rank memory allocation (min/avg/max) = 3.313 | 3.501 | 3.689 Mbytes
Step Atoms CPU Temp PotEng KinEng TotEng E_coul Press
0 300 0 0 0.49683273 0 0.49683273 0 0.016561091
10 300 0.015678644 0.015479312 0.47369009 0.023141571 0.49683166 0 0.020417984
20 300 0.031283855 0.062386358 0.40356181 0.093267605 0.49682941 0 0.032105581
30 300 0.046878099 0.14331637 0.2825636 0.21425798 0.49682157 0 0.052270382
40 300 0.062416077 0.53041843 0.089505208 0.79297556 0.88248077 0 0.16157862
50 300 0.078029871 1.1948397 -0.083317439 1.7862853 1.7029679 0 0.35447982
60 300 0.093806505 1.3915614 -0.37745551 2.0803842 1.7029287 0 0.40349499
70 300 0.1096344 1.7061978 -0.84746071 2.5507657 1.703305 0 0.48190445
80 300 0.12532592 20.692093 -3.32971 30.93468 27.60497 0 6.0759456
90 300 0.14175463 48.999403 -2.1632167 73.254107 71.090891 0 14.578714
100 300 0.15838337 51.199785 -0.81127924 76.543678 75.732399 0 15.281693
Loop time of 0.158406 on 16 procs for 100 steps with 300 atoms
Performance: 272716.448 tau/day, 631.288 timesteps/s
99.4% CPU use with 16 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 2.718e-05 | 3.7491e-05 | 5.6744e-05 | 0.0 | 0.02
Kspace | 0.15435 | 0.15482 | 0.15516 | 0.1 | 97.74
Neigh | 0.00014806 | 0.0003508 | 0.00074744 | 0.0 | 0.22
Comm | 0.0016866 | 0.0019967 | 0.0023787 | 0.5 | 1.26
Output | 0.00027871 | 0.00033027 | 0.00038028 | 0.0 | 0.21
Modify | 8.0347e-05 | 0.00011933 | 0.00016522 | 0.0 | 0.08
Other | | 0.0007506 | | | 0.47
Nlocal: 18.75 ave 33 max 6 min
Histogram: 2 6 0 0 0 0 2 1 2 3
Nghost: 128.875 ave 198 max 71 min
Histogram: 7 1 0 0 0 0 0 1 5 2
Neighs: 153.812 ave 490 max 14 min
Histogram: 8 0 3 0 1 1 0 0 1 2
Total # of neighbors = 2461
Ave neighs/atom = 8.20333
Neighbor list builds = 15
Dangerous builds = 0
Total wall time: 0:00:00

View File

@ -0,0 +1,92 @@
LAMMPS (2 Aug 2018)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (src/comm.cpp:87)
using 1 OpenMP thread(s) per MPI task
units lj
atom_style charge
read_data data.cloud_wall
orthogonal box = (0 0 0) to (10 10 10)
1 by 1 by 2 MPI processor grid
reading atoms ...
300 atoms
velocity all set 0.0 0.0 0.0 mom no
pair_style zero 1.0
pair_coeff * *
neighbor 1.0 bin
neigh_modify delay 0
fix 1 all nve
kspace_style scafacos p2nfft 0.001
kspace_modify scafacos tolerance field
timestep 0.005
thermo_style custom step atoms cpu temp pe ke etotal ecoul press
run_style verlet
#dump simple all custom 1000 id x y z vx vy vz
#dump dmp all custom 1000 part.dump id mol x y z vx vy vz fx fy fz q mass
#dump dmpvtk all vtk 1000 vtk/part_*.vtk id mol x y z vx vy vz fx fy fz q mass
#dump_modify dmpvtk pad 7
thermo 10
run 100
Setting up ScaFaCoS with solver p2nfft ...
Neighbor list info ...
update every 1 steps, delay 0 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 2
ghost atom cutoff = 2
binsize = 1, bins = 10 10 10
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair zero, perpetual
attributes: half, newton on
pair build: half/bin/atomonly/newton
stencil: half/bin/3d/newton
bin: standard
Per MPI rank memory allocation (min/avg/max) = 3.354 | 3.354 | 3.355 Mbytes
Step Atoms CPU Temp PotEng KinEng TotEng E_coul Press
0 300 0 0 0.49683273 0 0.49683273 0 0.016561091
10 300 0.044083834 0.015479312 0.47369009 0.023141571 0.49683166 0 0.020417984
20 300 0.088379145 0.062386358 0.40356181 0.093267605 0.49682941 0 0.032105581
30 300 0.13264704 0.14331637 0.2825636 0.21425798 0.49682157 0 0.052270382
40 300 0.17687225 0.53041843 0.089505208 0.79297556 0.88248077 0 0.16157862
50 300 0.22116137 1.1948397 -0.083317439 1.7862853 1.7029679 0 0.35447982
60 300 0.26515126 1.3915614 -0.37745551 2.0803842 1.7029287 0 0.40349499
70 300 0.30891085 1.7061978 -0.84746071 2.5507657 1.703305 0 0.48190445
80 300 0.35292292 20.692093 -3.32971 30.93468 27.60497 0 6.0759456
90 300 0.39845228 48.999403 -2.1632167 73.254107 71.090891 0 14.578714
100 300 0.44492316 51.199785 -0.81127924 76.543678 75.732399 0 15.281693
Loop time of 0.444937 on 2 procs for 100 steps with 300 atoms
Performance: 97092.373 tau/day, 224.751 timesteps/s
100.0% CPU use with 2 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 7.7248e-05 | 7.844e-05 | 7.9632e-05 | 0.0 | 0.02
Kspace | 0.43932 | 0.43979 | 0.44026 | 0.1 | 98.84
Neigh | 0.0014915 | 0.0019662 | 0.0024409 | 1.1 | 0.44
Comm | 0.0019331 | 0.0019941 | 0.0020552 | 0.1 | 0.45
Output | 0.00013781 | 0.00016308 | 0.00018835 | 0.0 | 0.04
Modify | 0.00050378 | 0.00050449 | 0.00050521 | 0.0 | 0.11
Other | | 0.0004425 | | | 0.10
Nlocal: 150 ave 157 max 143 min
Histogram: 1 0 0 0 0 0 0 0 0 1
Nghost: 399 ave 402 max 396 min
Histogram: 1 0 0 0 0 0 0 0 0 1
Neighs: 1230.5 ave 1756 max 705 min
Histogram: 1 0 0 0 0 0 0 0 0 1
Total # of neighbors = 2461
Ave neighs/atom = 8.20333
Neighbor list builds = 15
Dangerous builds = 0
Total wall time: 0:00:00

View File

@ -0,0 +1,92 @@
LAMMPS (2 Aug 2018)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (src/comm.cpp:87)
using 1 OpenMP thread(s) per MPI task
units lj
atom_style charge
read_data data.cloud_wall
orthogonal box = (0 0 0) to (10 10 10)
1 by 2 by 2 MPI processor grid
reading atoms ...
300 atoms
velocity all set 0.0 0.0 0.0 mom no
pair_style zero 1.0
pair_coeff * *
neighbor 1.0 bin
neigh_modify delay 0
fix 1 all nve
kspace_style scafacos p2nfft 0.001
kspace_modify scafacos tolerance field
timestep 0.005
thermo_style custom step atoms cpu temp pe ke etotal ecoul press
run_style verlet
#dump simple all custom 1000 id x y z vx vy vz
#dump dmp all custom 1000 part.dump id mol x y z vx vy vz fx fy fz q mass
#dump dmpvtk all vtk 1000 vtk/part_*.vtk id mol x y z vx vy vz fx fy fz q mass
#dump_modify dmpvtk pad 7
thermo 10
run 100
Setting up ScaFaCoS with solver p2nfft ...
Neighbor list info ...
update every 1 steps, delay 0 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 2
ghost atom cutoff = 2
binsize = 1, bins = 10 10 10
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair zero, perpetual
attributes: half, newton on
pair build: half/bin/atomonly/newton
stencil: half/bin/3d/newton
bin: standard
Per MPI rank memory allocation (min/avg/max) = 3.333 | 3.333 | 3.333 Mbytes
Step Atoms CPU Temp PotEng KinEng TotEng E_coul Press
0 300 0 0 0.49683273 0 0.49683273 0 0.016561091
10 300 0.02743125 0.015479312 0.47369009 0.023141571 0.49683166 0 0.020417984
20 300 0.05494833 0.062386358 0.40356181 0.093267605 0.49682941 0 0.032105581
30 300 0.082517862 0.14331637 0.2825636 0.21425798 0.49682157 0 0.052270382
40 300 0.11015558 0.53041843 0.089505208 0.79297556 0.88248077 0 0.16157862
50 300 0.13790298 1.1948397 -0.083317439 1.7862853 1.7029679 0 0.35447982
60 300 0.1660006 1.3915614 -0.37745551 2.0803842 1.7029287 0 0.40349499
70 300 0.1937964 1.7061978 -0.84746071 2.5507657 1.703305 0 0.48190445
80 300 0.22181106 20.692093 -3.32971 30.93468 27.60497 0 6.0759456
90 300 0.25105524 48.999403 -2.1632167 73.254107 71.090891 0 14.578714
100 300 0.28086019 51.199785 -0.81127924 76.543678 75.732399 0 15.281693
Loop time of 0.280875 on 4 procs for 100 steps with 300 atoms
Performance: 153805.254 tau/day, 356.031 timesteps/s
99.7% CPU use with 4 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 5.6744e-05 | 6.0022e-05 | 6.4135e-05 | 0.0 | 0.02
Kspace | 0.27651 | 0.27682 | 0.27714 | 0.0 | 98.56
Neigh | 0.00079465 | 0.001082 | 0.0014107 | 0.8 | 0.39
Comm | 0.0019372 | 0.002014 | 0.0020835 | 0.1 | 0.72
Output | 0.00018406 | 0.00019914 | 0.00023413 | 0.0 | 0.07
Modify | 0.0002749 | 0.00028563 | 0.00029325 | 0.0 | 0.10
Other | | 0.0004173 | | | 0.15
Nlocal: 75 ave 81 max 69 min
Histogram: 1 0 0 0 1 1 0 0 0 1
Nghost: 287 ave 296 max 278 min
Histogram: 1 0 1 0 0 0 0 1 0 1
Neighs: 615.25 ave 964 max 286 min
Histogram: 1 1 0 0 0 0 0 1 0 1
Total # of neighbors = 2461
Ave neighs/atom = 8.20333
Neighbor list builds = 15
Dangerous builds = 0
Total wall time: 0:00:00

View File

@ -0,0 +1,92 @@
LAMMPS (2 Aug 2018)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (src/comm.cpp:87)
using 1 OpenMP thread(s) per MPI task
units lj
atom_style charge
read_data data.cloud_wall
orthogonal box = (0 0 0) to (10 10 10)
2 by 2 by 2 MPI processor grid
reading atoms ...
300 atoms
velocity all set 0.0 0.0 0.0 mom no
pair_style zero 1.0
pair_coeff * *
neighbor 1.0 bin
neigh_modify delay 0
fix 1 all nve
kspace_style scafacos p2nfft 0.001
kspace_modify scafacos tolerance field
timestep 0.005
thermo_style custom step atoms cpu temp pe ke etotal ecoul press
run_style verlet
#dump simple all custom 1000 id x y z vx vy vz
#dump dmp all custom 1000 part.dump id mol x y z vx vy vz fx fy fz q mass
#dump dmpvtk all vtk 1000 vtk/part_*.vtk id mol x y z vx vy vz fx fy fz q mass
#dump_modify dmpvtk pad 7
thermo 10
run 100
Setting up ScaFaCoS with solver p2nfft ...
Neighbor list info ...
update every 1 steps, delay 0 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 2
ghost atom cutoff = 2
binsize = 1, bins = 10 10 10
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair zero, perpetual
attributes: half, newton on
pair build: half/bin/atomonly/newton
stencil: half/bin/3d/newton
bin: standard
Per MPI rank memory allocation (min/avg/max) = 3.317 | 3.317 | 3.317 Mbytes
Step Atoms CPU Temp PotEng KinEng TotEng E_coul Press
0 300 0 0 0.49683273 0 0.49683273 0 0.016561091
10 300 0.01961565 0.015479312 0.47369009 0.023141571 0.49683166 0 0.020417984
20 300 0.039346695 0.062386358 0.40356181 0.093267605 0.49682941 0 0.032105581
30 300 0.059037447 0.14331637 0.2825636 0.21425798 0.49682157 0 0.052270382
40 300 0.078732729 0.53041843 0.089505208 0.79297556 0.88248077 0 0.16157862
50 300 0.098586798 1.1948397 -0.083317439 1.7862853 1.7029679 0 0.35447982
60 300 0.11857247 1.3915614 -0.37745551 2.0803842 1.7029287 0 0.40349499
70 300 0.1385541 1.7061978 -0.84746071 2.5507657 1.703305 0 0.48190445
80 300 0.15850091 20.692093 -3.32971 30.93468 27.60497 0 6.0759456
90 300 0.17892075 48.999403 -2.1632167 73.254107 71.090891 0 14.578714
100 300 0.19964767 51.199785 -0.81127924 76.543678 75.732399 0 15.281693
Loop time of 0.199664 on 8 procs for 100 steps with 300 atoms
Performance: 216363.074 tau/day, 500.840 timesteps/s
99.4% CPU use with 8 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 4.5061e-05 | 4.7535e-05 | 5.3167e-05 | 0.0 | 0.02
Kspace | 0.19551 | 0.19584 | 0.19611 | 0.0 | 98.08
Neigh | 0.00041366 | 0.00060952 | 0.00082064 | 0.0 | 0.31
Comm | 0.0021496 | 0.0022282 | 0.0024025 | 0.2 | 1.12
Output | 0.0002346 | 0.00024167 | 0.00027847 | 0.0 | 0.12
Modify | 0.00016665 | 0.00017652 | 0.0001924 | 0.0 | 0.09
Other | | 0.0005245 | | | 0.26
Nlocal: 37.5 ave 42 max 33 min
Histogram: 2 1 0 1 0 0 1 0 1 2
Nghost: 202.25 ave 212 max 194 min
Histogram: 1 0 2 1 0 2 0 1 0 1
Neighs: 307.625 ave 505 max 129 min
Histogram: 3 0 0 1 1 0 0 0 1 2
Total # of neighbors = 2461
Ave neighs/atom = 8.20333
Neighbor list builds = 15
Dangerous builds = 0
Total wall time: 0:00:00

View File

@ -0,0 +1,102 @@
LAMMPS (2 Aug 2018)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (src/comm.cpp:87)
using 1 OpenMP thread(s) per MPI task
# Point dipoles in a 2d box
units lj
atom_style charge
read_data data.NaCl
orthogonal box = (0 0 0) to (1 1 1)
1 by 1 by 1 MPI processor grid
reading atoms ...
8 atoms
replicate 8 8 8
orthogonal box = (0 0 0) to (8 8 8)
1 by 1 by 1 MPI processor grid
4096 atoms
Time spent = 0.000498772 secs
velocity all create 1.5 49893
neighbor 1.0 bin
neigh_modify delay 0
fix 1 all nve
# LAMMPS computes pairwise and long-range Coulombics
#pair_style coul/long 3.0
#pair_coeff * *
#kspace_style pppm 1.0e-3
# Scafacos computes entire long-range Coulombics
# use dummy pair style to perform atom sorting
pair_style zero 1.0
pair_coeff * *
#fix 2 all scafacos p3m tolerance field 0.001
kspace_style scafacos ewald 0.001
kspace_modify scafacos tolerance field
timestep 0.005
thermo 10
run 100
Setting up ScaFaCoS with solver ewald ...
Neighbor list info ...
update every 1 steps, delay 0 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 2
ghost atom cutoff = 2
binsize = 1, bins = 8 8 8
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair zero, perpetual
attributes: half, newton on
pair build: half/bin/atomonly/newton
stencil: half/bin/3d/newton
bin: standard
Per MPI rank memory allocation (min/avg/max) = 5.813 | 5.813 | 5.813 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1.5 -1.7475938 0 0.50185691 11.99707
10 1.500011 -1.747529 0 0.50193816 11.997158
20 1.5000023 -1.7475152 0 0.50193898 11.997089
30 1.4999308 -1.747404 0 0.50194285 11.996517
40 1.4997722 -1.7471622 0 0.50194686 11.995248
50 1.4995835 -1.746878 0 0.50194808 11.993739
60 1.4996054 -1.7469114 0 0.50194749 11.993914
70 1.5004341 -1.7481558 0 0.50194592 12.000543
80 1.5033218 -1.7524875 0 0.50194458 12.023638
90 1.5108306 -1.7637462 0 0.50194636 12.083694
100 1.5292479 -1.7913449 0 0.50196695 12.230996
Loop time of 1121.22 on 1 procs for 100 steps with 4096 atoms
Performance: 38.530 tau/day, 0.089 timesteps/s
100.0% CPU use with 1 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.0015197 | 0.0015197 | 0.0015197 | 0.0 | 0.00
Kspace | 1121.2 | 1121.2 | 1121.2 | 0.0 |100.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0.013699 | 0.013699 | 0.013699 | 0.0 | 0.00
Output | 0.00038314 | 0.00038314 | 0.00038314 | 0.0 | 0.00
Modify | 0.011126 | 0.011126 | 0.011126 | 0.0 | 0.00
Other | | 0.00418 | | | 0.00
Nlocal: 4096 ave 4096 max 4096 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 9728 ave 9728 max 9728 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 524288 ave 524288 max 524288 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 524288
Ave neighs/atom = 128
Neighbor list builds = 0
Dangerous builds = 0
Total wall time: 0:18:57

View File

@ -0,0 +1,102 @@
LAMMPS (2 Aug 2018)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (src/comm.cpp:87)
using 1 OpenMP thread(s) per MPI task
# Point dipoles in a 2d box
units lj
atom_style charge
read_data data.NaCl
orthogonal box = (0 0 0) to (1 1 1)
2 by 2 by 4 MPI processor grid
reading atoms ...
8 atoms
replicate 8 8 8
orthogonal box = (0 0 0) to (8 8 8)
2 by 2 by 4 MPI processor grid
4096 atoms
Time spent = 0.000462294 secs
velocity all create 1.5 49893
neighbor 1.0 bin
neigh_modify delay 0
fix 1 all nve
# LAMMPS computes pairwise and long-range Coulombics
#pair_style coul/long 3.0
#pair_coeff * *
#kspace_style pppm 1.0e-3
# Scafacos computes entire long-range Coulombics
# use dummy pair style to perform atom sorting
pair_style zero 1.0
pair_coeff * *
#fix 2 all scafacos p3m tolerance field 0.001
kspace_style scafacos ewald 0.001
kspace_modify scafacos tolerance field
timestep 0.005
thermo 10
run 100
Setting up ScaFaCoS with solver ewald ...
Neighbor list info ...
update every 1 steps, delay 0 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 2
ghost atom cutoff = 2
binsize = 1, bins = 8 8 8
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair zero, perpetual
attributes: half, newton on
pair build: half/bin/atomonly/newton
stencil: half/bin/3d/newton
bin: standard
Per MPI rank memory allocation (min/avg/max) = 3.501 | 3.501 | 3.501 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1.5 -1.7475938 0 0.50185691 -nan
10 1.500011 -1.747529 0 0.50193816 -nan
20 1.5000023 -1.7475152 0 0.50193898 -nan
30 1.4999308 -1.747404 0 0.50194285 -nan
40 1.4997722 -1.7471622 0 0.50194686 -nan
50 1.4995835 -1.746878 0 0.50194808 -nan
60 1.4996054 -1.7469114 0 0.50194749 -nan
70 1.5004341 -1.7481558 0 0.50194592 -nan
80 1.5033218 -1.7524875 0 0.50194458 -nan
90 1.5108306 -1.7637462 0 0.50194636 -nan
100 1.5292479 -1.7913449 0 0.50196695 -nan
Loop time of 80.2777 on 16 procs for 100 steps with 4096 atoms
Performance: 538.132 tau/day, 1.246 timesteps/s
99.8% CPU use with 16 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.0003705 | 0.00039807 | 0.00048542 | 0.0 | 0.00
Kspace | 80.262 | 80.263 | 80.264 | 0.0 | 99.98
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0.010191 | 0.011419 | 0.012416 | 0.6 | 0.01
Output | 0.00028253 | 0.00033158 | 0.0004065 | 0.0 | 0.00
Modify | 0.00082541 | 0.0008464 | 0.00087833 | 0.0 | 0.00
Other | | 0.001511 | | | 0.00
Nlocal: 256 ave 256 max 256 min
Histogram: 16 0 0 0 0 0 0 0 0 0
Nghost: 2816 ave 2816 max 2816 min
Histogram: 16 0 0 0 0 0 0 0 0 0
Neighs: 32768 ave 32768 max 32768 min
Histogram: 16 0 0 0 0 0 0 0 0 0
Total # of neighbors = 524288
Ave neighs/atom = 128
Neighbor list builds = 0
Dangerous builds = 0
Total wall time: 0:01:22

View File

@ -0,0 +1,102 @@
LAMMPS (2 Aug 2018)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (src/comm.cpp:87)
using 1 OpenMP thread(s) per MPI task
# Point dipoles in a 2d box
units lj
atom_style charge
read_data data.NaCl
orthogonal box = (0 0 0) to (1 1 1)
1 by 1 by 2 MPI processor grid
reading atoms ...
8 atoms
replicate 8 8 8
orthogonal box = (0 0 0) to (8 8 8)
1 by 1 by 2 MPI processor grid
4096 atoms
Time spent = 0.000344753 secs
velocity all create 1.5 49893
neighbor 1.0 bin
neigh_modify delay 0
fix 1 all nve
# LAMMPS computes pairwise and long-range Coulombics
#pair_style coul/long 3.0
#pair_coeff * *
#kspace_style pppm 1.0e-3
# Scafacos computes entire long-range Coulombics
# use dummy pair style to perform atom sorting
pair_style zero 1.0
pair_coeff * *
#fix 2 all scafacos p3m tolerance field 0.001
kspace_style scafacos ewald 0.001
kspace_modify scafacos tolerance field
timestep 0.005
thermo 10
run 100
Setting up ScaFaCoS with solver ewald ...
Neighbor list info ...
update every 1 steps, delay 0 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 2
ghost atom cutoff = 2
binsize = 1, bins = 8 8 8
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair zero, perpetual
attributes: half, newton on
pair build: half/bin/atomonly/newton
stencil: half/bin/3d/newton
bin: standard
Per MPI rank memory allocation (min/avg/max) = 4.574 | 4.574 | 4.574 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1.5 -1.7475938 0 0.50185691 11.99707
10 1.500011 -1.747529 0 0.50193816 11.997158
20 1.5000023 -1.7475152 0 0.50193898 11.997089
30 1.4999308 -1.747404 0 0.50194285 11.996517
40 1.4997722 -1.7471622 0 0.50194686 11.995248
50 1.4995835 -1.746878 0 0.50194808 11.993739
60 1.4996054 -1.7469114 0 0.50194749 11.993914
70 1.5004341 -1.7481558 0 0.50194592 12.000543
80 1.5033218 -1.7524875 0 0.50194458 12.023638
90 1.5108306 -1.7637462 0 0.50194636 12.083694
100 1.5292479 -1.7913449 0 0.50196695 12.230996
Loop time of 566.796 on 2 procs for 100 steps with 4096 atoms
Performance: 76.218 tau/day, 0.176 timesteps/s
100.0% CPU use with 2 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.0010231 | 0.0010413 | 0.0010595 | 0.1 | 0.00
Kspace | 566.77 | 566.77 | 566.77 | 0.0 | 99.99
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0.019707 | 0.01982 | 0.019932 | 0.1 | 0.00
Output | 0.0002656 | 0.00029266 | 0.00031972 | 0.0 | 0.00
Modify | 0.0055575 | 0.0055707 | 0.0055838 | 0.0 | 0.00
Other | | 0.002497 | | | 0.00
Nlocal: 2048 ave 2048 max 2048 min
Histogram: 2 0 0 0 0 0 0 0 0 0
Nghost: 7168 ave 7168 max 7168 min
Histogram: 2 0 0 0 0 0 0 0 0 0
Neighs: 262144 ave 262144 max 262144 min
Histogram: 2 0 0 0 0 0 0 0 0 0
Total # of neighbors = 524288
Ave neighs/atom = 128
Neighbor list builds = 0
Dangerous builds = 0
Total wall time: 0:09:38

View File

@ -0,0 +1,102 @@
LAMMPS (2 Aug 2018)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (src/comm.cpp:87)
using 1 OpenMP thread(s) per MPI task
# Point dipoles in a 2d box
units lj
atom_style charge
read_data data.NaCl
orthogonal box = (0 0 0) to (1 1 1)
1 by 2 by 2 MPI processor grid
reading atoms ...
8 atoms
replicate 8 8 8
orthogonal box = (0 0 0) to (8 8 8)
1 by 2 by 2 MPI processor grid
4096 atoms
Time spent = 0.000261068 secs
velocity all create 1.5 49893
neighbor 1.0 bin
neigh_modify delay 0
fix 1 all nve
# LAMMPS computes pairwise and long-range Coulombics
#pair_style coul/long 3.0
#pair_coeff * *
#kspace_style pppm 1.0e-3
# Scafacos computes entire long-range Coulombics
# use dummy pair style to perform atom sorting
pair_style zero 1.0
pair_coeff * *
#fix 2 all scafacos p3m tolerance field 0.001
kspace_style scafacos ewald 0.001
kspace_modify scafacos tolerance field
timestep 0.005
thermo 10
run 100
Setting up ScaFaCoS with solver ewald ...
Neighbor list info ...
update every 1 steps, delay 0 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 2
ghost atom cutoff = 2
binsize = 1, bins = 8 8 8
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair zero, perpetual
attributes: half, newton on
pair build: half/bin/atomonly/newton
stencil: half/bin/3d/newton
bin: standard
Per MPI rank memory allocation (min/avg/max) = 4.008 | 4.008 | 4.008 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1.5 -1.7475938 0 0.50185691 -nan
10 1.500011 -1.747529 0 0.50193816 -nan
20 1.5000023 -1.7475152 0 0.50193898 -nan
30 1.4999308 -1.747404 0 0.50194285 -nan
40 1.4997722 -1.7471622 0 0.50194686 -nan
50 1.4995835 -1.746878 0 0.50194808 -nan
60 1.4996054 -1.7469114 0 0.50194749 -nan
70 1.5004341 -1.7481558 0 0.50194592 -nan
80 1.5033218 -1.7524875 0 0.50194458 -nan
90 1.5108306 -1.7637462 0 0.50194636 -nan
100 1.5292479 -1.7913449 0 0.50196695 -nan
Loop time of 295.996 on 4 procs for 100 steps with 4096 atoms
Performance: 145.948 tau/day, 0.338 timesteps/s
99.9% CPU use with 4 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.00071096 | 0.00071985 | 0.00072813 | 0.0 | 0.00
Kspace | 295.98 | 295.98 | 295.98 | 0.0 | 99.99
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0.013666 | 0.013736 | 0.013795 | 0.0 | 0.00
Output | 0.00023484 | 0.00025135 | 0.00029254 | 0.0 | 0.00
Modify | 0.0029099 | 0.002973 | 0.0030224 | 0.1 | 0.00
Other | | 0.001821 | | | 0.00
Nlocal: 1024 ave 1024 max 1024 min
Histogram: 4 0 0 0 0 0 0 0 0 0
Nghost: 5120 ave 5120 max 5120 min
Histogram: 4 0 0 0 0 0 0 0 0 0
Neighs: 131072 ave 131072 max 131072 min
Histogram: 4 0 0 0 0 0 0 0 0 0
Total # of neighbors = 524288
Ave neighs/atom = 128
Neighbor list builds = 0
Dangerous builds = 0
Total wall time: 0:05:02

View File

@ -0,0 +1,102 @@
LAMMPS (2 Aug 2018)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (src/comm.cpp:87)
using 1 OpenMP thread(s) per MPI task
# Point dipoles in a 2d box
units lj
atom_style charge
read_data data.NaCl
orthogonal box = (0 0 0) to (1 1 1)
2 by 2 by 2 MPI processor grid
reading atoms ...
8 atoms
replicate 8 8 8
orthogonal box = (0 0 0) to (8 8 8)
2 by 2 by 2 MPI processor grid
4096 atoms
Time spent = 0.000232935 secs
velocity all create 1.5 49893
neighbor 1.0 bin
neigh_modify delay 0
fix 1 all nve
# LAMMPS computes pairwise and long-range Coulombics
#pair_style coul/long 3.0
#pair_coeff * *
#kspace_style pppm 1.0e-3
# Scafacos computes entire long-range Coulombics
# use dummy pair style to perform atom sorting
pair_style zero 1.0
pair_coeff * *
#fix 2 all scafacos p3m tolerance field 0.001
kspace_style scafacos ewald 0.001
kspace_modify scafacos tolerance field
timestep 0.005
thermo 10
run 100
Setting up ScaFaCoS with solver ewald ...
Neighbor list info ...
update every 1 steps, delay 0 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 2
ghost atom cutoff = 2
binsize = 1, bins = 8 8 8
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair zero, perpetual
attributes: half, newton on
pair build: half/bin/atomonly/newton
stencil: half/bin/3d/newton
bin: standard
Per MPI rank memory allocation (min/avg/max) = 3.508 | 3.508 | 3.508 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1.5 -1.7475938 0 0.50185691 -nan
10 1.500011 -1.747529 0 0.50193816 -nan
20 1.5000023 -1.7475152 0 0.50193898 -nan
30 1.4999308 -1.747404 0 0.50194285 -nan
40 1.4997722 -1.7471622 0 0.50194686 -nan
50 1.4995835 -1.746878 0 0.50194808 -nan
60 1.4996054 -1.7469114 0 0.50194749 -nan
70 1.5004341 -1.7481558 0 0.50194592 -nan
80 1.5033218 -1.7524875 0 0.50194458 -nan
90 1.5108306 -1.7637462 0 0.50194636 -nan
100 1.5292479 -1.7913449 0 0.50196695 -nan
Loop time of 154.44 on 8 procs for 100 steps with 4096 atoms
Performance: 279.720 tau/day, 0.647 timesteps/s
99.9% CPU use with 8 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.00049257 | 0.00051311 | 0.00059295 | 0.0 | 0.00
Kspace | 154.42 | 154.42 | 154.42 | 0.0 | 99.99
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0.012076 | 0.013177 | 0.014308 | 0.8 | 0.01
Output | 0.00025177 | 0.00028065 | 0.00030136 | 0.0 | 0.00
Modify | 0.0015776 | 0.0017182 | 0.0018268 | 0.2 | 0.00
Other | | 0.001309 | | | 0.00
Nlocal: 512 ave 512 max 512 min
Histogram: 8 0 0 0 0 0 0 0 0 0
Nghost: 3584 ave 3584 max 3584 min
Histogram: 8 0 0 0 0 0 0 0 0 0
Neighs: 65536 ave 65536 max 65536 min
Histogram: 8 0 0 0 0 0 0 0 0 0
Total # of neighbors = 524288
Ave neighs/atom = 128
Neighbor list builds = 0
Dangerous builds = 0
Total wall time: 0:02:38

View File

@ -0,0 +1,102 @@
LAMMPS (2 Aug 2018)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (src/comm.cpp:87)
using 1 OpenMP thread(s) per MPI task
# Point dipoles in a 2d box
units lj
atom_style charge
read_data data.NaCl
orthogonal box = (0 0 0) to (1 1 1)
1 by 1 by 1 MPI processor grid
reading atoms ...
8 atoms
replicate 8 8 8
orthogonal box = (0 0 0) to (8 8 8)
1 by 1 by 1 MPI processor grid
4096 atoms
Time spent = 0.000518799 secs
velocity all create 1.5 49893
neighbor 1.0 bin
neigh_modify delay 0
fix 1 all nve
# LAMMPS computes pairwise and long-range Coulombics
#pair_style coul/long 3.0
#pair_coeff * *
#kspace_style pppm 1.0e-3
# Scafacos computes entire long-range Coulombics
# use dummy pair style to perform atom sorting
pair_style zero 1.0
pair_coeff * *
#fix 2 all scafacos p3m tolerance field 0.001
kspace_style scafacos fmm 0.001
kspace_modify scafacos tolerance energy
timestep 0.005
thermo 10
run 100
Setting up ScaFaCoS with solver fmm ...
Neighbor list info ...
update every 1 steps, delay 0 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 2
ghost atom cutoff = 2
binsize = 1, bins = 8 8 8
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair zero, perpetual
attributes: half, newton on
pair build: half/bin/atomonly/newton
stencil: half/bin/3d/newton
bin: standard
Per MPI rank memory allocation (min/avg/max) = 5.813 | 5.813 | 5.813 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1.5 -1.7475646 0 0.50188608 10.44368
10 1.5000016 -1.7475671 0 0.50188602 10.44369
20 1.4999827 -1.7475388 0 0.50188592 10.443564
30 1.4999016 -1.7474173 0 0.5018858 10.443023
40 1.4997356 -1.7471685 0 0.50188572 10.441917
50 1.4995414 -1.7468771 0 0.5018858 10.440623
60 1.4995587 -1.7469027 0 0.50188622 10.440739
70 1.5003837 -1.7481389 0 0.50188727 10.446238
80 1.5032684 -1.7524625 0 0.50188958 10.465466
90 1.5107749 -1.763714 0 0.50189507 10.515502
100 1.52919 -1.791306 0 0.50191895 10.638261
Loop time of 34.7058 on 1 procs for 100 steps with 4096 atoms
Performance: 1244.749 tau/day, 2.881 timesteps/s
100.0% CPU use with 1 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.0015228 | 0.0015228 | 0.0015228 | 0.0 | 0.00
Kspace | 34.675 | 34.675 | 34.675 | 0.0 | 99.91
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0.013741 | 0.013741 | 0.013741 | 0.0 | 0.04
Output | 0.00041246 | 0.00041246 | 0.00041246 | 0.0 | 0.00
Modify | 0.01107 | 0.01107 | 0.01107 | 0.0 | 0.03
Other | | 0.004232 | | | 0.01
Nlocal: 4096 ave 4096 max 4096 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 9728 ave 9728 max 9728 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 524288 ave 524288 max 524288 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 524288
Ave neighs/atom = 128
Neighbor list builds = 0
Dangerous builds = 0
Total wall time: 0:00:35

View File

@ -0,0 +1,102 @@
LAMMPS (2 Aug 2018)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (src/comm.cpp:87)
using 1 OpenMP thread(s) per MPI task
# Point dipoles in a 2d box
units lj
atom_style charge
read_data data.NaCl
orthogonal box = (0 0 0) to (1 1 1)
2 by 2 by 4 MPI processor grid
reading atoms ...
8 atoms
replicate 8 8 8
orthogonal box = (0 0 0) to (8 8 8)
2 by 2 by 4 MPI processor grid
4096 atoms
Time spent = 0.000400543 secs
velocity all create 1.5 49893
neighbor 1.0 bin
neigh_modify delay 0
fix 1 all nve
# LAMMPS computes pairwise and long-range Coulombics
#pair_style coul/long 3.0
#pair_coeff * *
#kspace_style pppm 1.0e-3
# Scafacos computes entire long-range Coulombics
# use dummy pair style to perform atom sorting
pair_style zero 1.0
pair_coeff * *
#fix 2 all scafacos p3m tolerance field 0.001
kspace_style scafacos fmm 0.001
kspace_modify scafacos tolerance energy
timestep 0.005
thermo 10
run 100
Setting up ScaFaCoS with solver fmm ...
Neighbor list info ...
update every 1 steps, delay 0 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 2
ghost atom cutoff = 2
binsize = 1, bins = 8 8 8
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair zero, perpetual
attributes: half, newton on
pair build: half/bin/atomonly/newton
stencil: half/bin/3d/newton
bin: standard
Per MPI rank memory allocation (min/avg/max) = 3.501 | 3.501 | 3.501 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1.5 -1.7475646 0 0.50188608 10.44368
10 1.5000016 -1.7475671 0 0.50188602 10.44369
20 1.4999827 -1.7475388 0 0.50188592 10.443564
30 1.4999016 -1.7474173 0 0.5018858 10.443023
40 1.4997356 -1.7471685 0 0.50188572 10.441917
50 1.4995414 -1.7468771 0 0.5018858 10.440623
60 1.4995587 -1.7469027 0 0.50188622 10.440739
70 1.5003837 -1.7481389 0 0.50188727 10.446238
80 1.5032684 -1.7524625 0 0.50188958 10.465466
90 1.5107749 -1.763714 0 0.50189507 10.515502
100 1.52919 -1.791306 0 0.50191895 10.638261
Loop time of 4.23774 on 16 procs for 100 steps with 4096 atoms
Performance: 10194.102 tau/day, 23.597 timesteps/s
99.6% CPU use with 16 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.00038028 | 0.00040729 | 0.00046206 | 0.0 | 0.01
Kspace | 4.2206 | 4.2211 | 4.2216 | 0.0 | 99.61
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0.011439 | 0.012491 | 0.013172 | 0.4 | 0.29
Output | 0.00042915 | 0.000489 | 0.00061274 | 0.0 | 0.01
Modify | 0.00093102 | 0.00099151 | 0.0010982 | 0.0 | 0.02
Other | | 0.002255 | | | 0.05
Nlocal: 256 ave 256 max 256 min
Histogram: 16 0 0 0 0 0 0 0 0 0
Nghost: 2816 ave 2816 max 2816 min
Histogram: 16 0 0 0 0 0 0 0 0 0
Neighs: 32768 ave 32768 max 32768 min
Histogram: 16 0 0 0 0 0 0 0 0 0
Total # of neighbors = 524288
Ave neighs/atom = 128
Neighbor list builds = 0
Dangerous builds = 0
Total wall time: 0:00:06

View File

@ -0,0 +1,102 @@
LAMMPS (2 Aug 2018)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (src/comm.cpp:87)
using 1 OpenMP thread(s) per MPI task
# Point dipoles in a 2d box
units lj
atom_style charge
read_data data.NaCl
orthogonal box = (0 0 0) to (1 1 1)
1 by 1 by 2 MPI processor grid
reading atoms ...
8 atoms
replicate 8 8 8
orthogonal box = (0 0 0) to (8 8 8)
1 by 1 by 2 MPI processor grid
4096 atoms
Time spent = 0.0003407 secs
velocity all create 1.5 49893
neighbor 1.0 bin
neigh_modify delay 0
fix 1 all nve
# LAMMPS computes pairwise and long-range Coulombics
#pair_style coul/long 3.0
#pair_coeff * *
#kspace_style pppm 1.0e-3
# Scafacos computes entire long-range Coulombics
# use dummy pair style to perform atom sorting
pair_style zero 1.0
pair_coeff * *
#fix 2 all scafacos p3m tolerance field 0.001
kspace_style scafacos fmm 0.001
kspace_modify scafacos tolerance energy
timestep 0.005
thermo 10
run 100
Setting up ScaFaCoS with solver fmm ...
Neighbor list info ...
update every 1 steps, delay 0 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 2
ghost atom cutoff = 2
binsize = 1, bins = 8 8 8
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair zero, perpetual
attributes: half, newton on
pair build: half/bin/atomonly/newton
stencil: half/bin/3d/newton
bin: standard
Per MPI rank memory allocation (min/avg/max) = 4.574 | 4.574 | 4.574 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1.5 -1.7475646 0 0.50188608 10.44368
10 1.5000016 -1.7475671 0 0.50188602 10.44369
20 1.4999827 -1.7475388 0 0.50188592 10.443564
30 1.4999016 -1.7474173 0 0.5018858 10.443023
40 1.4997356 -1.7471685 0 0.50188572 10.441917
50 1.4995414 -1.7468771 0 0.5018858 10.440623
60 1.4995587 -1.7469027 0 0.50188622 10.440739
70 1.5003837 -1.7481389 0 0.50188727 10.446238
80 1.5032684 -1.7524625 0 0.50188958 10.465466
90 1.5107749 -1.763714 0 0.50189507 10.515502
100 1.52919 -1.791306 0 0.50191895 10.638261
Loop time of 17.9401 on 2 procs for 100 steps with 4096 atoms
Performance: 2408.014 tau/day, 5.574 timesteps/s
99.9% CPU use with 2 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.0010042 | 0.0010235 | 0.0010428 | 0.1 | 0.01
Kspace | 17.912 | 17.912 | 17.912 | 0.0 | 99.84
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0.018252 | 0.018528 | 0.018804 | 0.2 | 0.10
Output | 0.00034094 | 0.00035989 | 0.00037885 | 0.0 | 0.00
Modify | 0.0055602 | 0.0056567 | 0.0057533 | 0.1 | 0.03
Other | | 0.002716 | | | 0.02
Nlocal: 2048 ave 2048 max 2048 min
Histogram: 2 0 0 0 0 0 0 0 0 0
Nghost: 7168 ave 7168 max 7168 min
Histogram: 2 0 0 0 0 0 0 0 0 0
Neighs: 262144 ave 262144 max 262144 min
Histogram: 2 0 0 0 0 0 0 0 0 0
Total # of neighbors = 524288
Ave neighs/atom = 128
Neighbor list builds = 0
Dangerous builds = 0
Total wall time: 0:00:19

View File

@ -0,0 +1,102 @@
LAMMPS (2 Aug 2018)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (src/comm.cpp:87)
using 1 OpenMP thread(s) per MPI task
# Point dipoles in a 2d box
units lj
atom_style charge
read_data data.NaCl
orthogonal box = (0 0 0) to (1 1 1)
1 by 2 by 2 MPI processor grid
reading atoms ...
8 atoms
replicate 8 8 8
orthogonal box = (0 0 0) to (8 8 8)
1 by 2 by 2 MPI processor grid
4096 atoms
Time spent = 0.000270367 secs
velocity all create 1.5 49893
neighbor 1.0 bin
neigh_modify delay 0
fix 1 all nve
# LAMMPS computes pairwise and long-range Coulombics
#pair_style coul/long 3.0
#pair_coeff * *
#kspace_style pppm 1.0e-3
# Scafacos computes entire long-range Coulombics
# use dummy pair style to perform atom sorting
pair_style zero 1.0
pair_coeff * *
#fix 2 all scafacos p3m tolerance field 0.001
kspace_style scafacos fmm 0.001
kspace_modify scafacos tolerance energy
timestep 0.005
thermo 10
run 100
Setting up ScaFaCoS with solver fmm ...
Neighbor list info ...
update every 1 steps, delay 0 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 2
ghost atom cutoff = 2
binsize = 1, bins = 8 8 8
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair zero, perpetual
attributes: half, newton on
pair build: half/bin/atomonly/newton
stencil: half/bin/3d/newton
bin: standard
Per MPI rank memory allocation (min/avg/max) = 4.008 | 4.008 | 4.008 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1.5 -1.7475646 0 0.50188608 10.44368
10 1.5000016 -1.7475671 0 0.50188602 10.44369
20 1.4999827 -1.7475388 0 0.50188592 10.443564
30 1.4999016 -1.7474173 0 0.5018858 10.443023
40 1.4997356 -1.7471685 0 0.50188572 10.441917
50 1.4995414 -1.7468771 0 0.5018858 10.440623
60 1.4995587 -1.7469027 0 0.50188622 10.440739
70 1.5003837 -1.7481389 0 0.50188727 10.446238
80 1.5032684 -1.7524625 0 0.50188958 10.465466
90 1.5107749 -1.763714 0 0.50189507 10.515502
100 1.52919 -1.791306 0 0.50191895 10.638261
Loop time of 10.0781 on 4 procs for 100 steps with 4096 atoms
Performance: 4286.533 tau/day, 9.923 timesteps/s
99.9% CPU use with 4 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.00071096 | 0.00073177 | 0.00075269 | 0.0 | 0.01
Kspace | 10.056 | 10.057 | 10.057 | 0.0 | 99.79
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0.01492 | 0.015036 | 0.015207 | 0.1 | 0.15
Output | 0.00036311 | 0.00039428 | 0.00046515 | 0.0 | 0.00
Modify | 0.002944 | 0.0030704 | 0.0033708 | 0.3 | 0.03
Other | | 0.002214 | | | 0.02
Nlocal: 1024 ave 1024 max 1024 min
Histogram: 4 0 0 0 0 0 0 0 0 0
Nghost: 5120 ave 5120 max 5120 min
Histogram: 4 0 0 0 0 0 0 0 0 0
Neighs: 131072 ave 131072 max 131072 min
Histogram: 4 0 0 0 0 0 0 0 0 0
Total # of neighbors = 524288
Ave neighs/atom = 128
Neighbor list builds = 0
Dangerous builds = 0
Total wall time: 0:00:11

View File

@ -0,0 +1,102 @@
LAMMPS (2 Aug 2018)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (src/comm.cpp:87)
using 1 OpenMP thread(s) per MPI task
# Point dipoles in a 2d box
units lj
atom_style charge
read_data data.NaCl
orthogonal box = (0 0 0) to (1 1 1)
2 by 2 by 2 MPI processor grid
reading atoms ...
8 atoms
replicate 8 8 8
orthogonal box = (0 0 0) to (8 8 8)
2 by 2 by 2 MPI processor grid
4096 atoms
Time spent = 0.000236988 secs
velocity all create 1.5 49893
neighbor 1.0 bin
neigh_modify delay 0
fix 1 all nve
# LAMMPS computes pairwise and long-range Coulombics
#pair_style coul/long 3.0
#pair_coeff * *
#kspace_style pppm 1.0e-3
# Scafacos computes entire long-range Coulombics
# use dummy pair style to perform atom sorting
pair_style zero 1.0
pair_coeff * *
#fix 2 all scafacos p3m tolerance field 0.001
kspace_style scafacos fmm 0.001
kspace_modify scafacos tolerance energy
timestep 0.005
thermo 10
run 100
Setting up ScaFaCoS with solver fmm ...
Neighbor list info ...
update every 1 steps, delay 0 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 2
ghost atom cutoff = 2
binsize = 1, bins = 8 8 8
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair zero, perpetual
attributes: half, newton on
pair build: half/bin/atomonly/newton
stencil: half/bin/3d/newton
bin: standard
Per MPI rank memory allocation (min/avg/max) = 3.508 | 3.508 | 3.508 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1.5 -1.7475646 0 0.50188608 10.44368
10 1.5000016 -1.7475671 0 0.50188602 10.44369
20 1.4999827 -1.7475388 0 0.50188592 10.443564
30 1.4999016 -1.7474173 0 0.5018858 10.443023
40 1.4997356 -1.7471685 0 0.50188572 10.441917
50 1.4995414 -1.7468771 0 0.5018858 10.440623
60 1.4995587 -1.7469027 0 0.50188622 10.440739
70 1.5003837 -1.7481389 0 0.50188727 10.446238
80 1.5032684 -1.7524625 0 0.50188958 10.465466
90 1.5107749 -1.763714 0 0.50189507 10.515502
100 1.52919 -1.791306 0 0.50191895 10.638261
Loop time of 5.96037 on 8 procs for 100 steps with 4096 atoms
Performance: 7247.876 tau/day, 16.777 timesteps/s
99.8% CPU use with 8 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.00049591 | 0.0005368 | 0.00056005 | 0.0 | 0.01
Kspace | 5.94 | 5.941 | 5.9419 | 0.0 | 99.68
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0.013702 | 0.014631 | 0.015768 | 0.6 | 0.25
Output | 0.00044751 | 0.00048846 | 0.00058961 | 0.0 | 0.01
Modify | 0.0016675 | 0.0017205 | 0.0017893 | 0.1 | 0.03
Other | | 0.001971 | | | 0.03
Nlocal: 512 ave 512 max 512 min
Histogram: 8 0 0 0 0 0 0 0 0 0
Nghost: 3584 ave 3584 max 3584 min
Histogram: 8 0 0 0 0 0 0 0 0 0
Neighs: 65536 ave 65536 max 65536 min
Histogram: 8 0 0 0 0 0 0 0 0 0
Total # of neighbors = 524288
Ave neighs/atom = 128
Neighbor list builds = 0
Dangerous builds = 0
Total wall time: 0:00:07

View File

@ -0,0 +1,102 @@
LAMMPS (2 Aug 2018)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (src/comm.cpp:87)
using 1 OpenMP thread(s) per MPI task
# Point dipoles in a 2d box
units lj
atom_style charge
read_data data.NaCl
orthogonal box = (0 0 0) to (1 1 1)
1 by 1 by 1 MPI processor grid
reading atoms ...
8 atoms
replicate 8 8 8
orthogonal box = (0 0 0) to (8 8 8)
1 by 1 by 1 MPI processor grid
4096 atoms
Time spent = 0.00049448 secs
velocity all create 1.5 49893
neighbor 1.0 bin
neigh_modify delay 0
fix 1 all nve
# LAMMPS computes pairwise and long-range Coulombics
#pair_style coul/long 3.0
#pair_coeff * *
#kspace_style pppm 1.0e-3
# Scafacos computes entire long-range Coulombics
# use dummy pair style to perform atom sorting
pair_style zero 1.0
pair_coeff * *
#fix 2 all scafacos p3m tolerance field 0.001
kspace_style scafacos p2nfft 0.001
kspace_modify scafacos tolerance field
timestep 0.005
thermo 10
run 100
Setting up ScaFaCoS with solver p2nfft ...
Neighbor list info ...
update every 1 steps, delay 0 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 2
ghost atom cutoff = 2
binsize = 1, bins = 8 8 8
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair zero, perpetual
attributes: half, newton on
pair build: half/bin/atomonly/newton
stencil: half/bin/3d/newton
bin: standard
Per MPI rank memory allocation (min/avg/max) = 5.813 | 5.813 | 5.813 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1.5 -1.7477245 0 0.50172614 10.443537
10 1.5000176 -1.7475898 0 0.50188725 10.443798
20 1.5000161 -1.7475262 0 0.50194874 10.443843
30 1.4999486 -1.7474019 0 0.50197176 10.443413
40 1.4997889 -1.7471525 0 0.50198161 10.442357
50 1.4995945 -1.7468614 0 0.50198122 10.441061
60 1.499609 -1.7468813 0 0.50198309 10.44116
70 1.5004314 -1.7481179 0 0.50197962 10.446638
80 1.5033149 -1.7524495 0 0.50197233 10.46585
90 1.5108219 -1.7637095 0 0.50197005 10.515883
100 1.529239 -1.7913105 0 0.501988 10.638649
Loop time of 18.1113 on 1 procs for 100 steps with 4096 atoms
Performance: 2385.257 tau/day, 5.521 timesteps/s
100.0% CPU use with 1 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.0014985 | 0.0014985 | 0.0014985 | 0.0 | 0.01
Kspace | 18.079 | 18.079 | 18.079 | 0.0 | 99.82
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0.014229 | 0.014229 | 0.014229 | 0.0 | 0.08
Output | 0.0004642 | 0.0004642 | 0.0004642 | 0.0 | 0.00
Modify | 0.011227 | 0.011227 | 0.011227 | 0.0 | 0.06
Other | | 0.004455 | | | 0.02
Nlocal: 4096 ave 4096 max 4096 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 9728 ave 9728 max 9728 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 524288 ave 524288 max 524288 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 524288
Ave neighs/atom = 128
Neighbor list builds = 0
Dangerous builds = 0
Total wall time: 0:00:21

View File

@ -0,0 +1,102 @@
LAMMPS (2 Aug 2018)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (src/comm.cpp:87)
using 1 OpenMP thread(s) per MPI task
# Point dipoles in a 2d box
units lj
atom_style charge
read_data data.NaCl
orthogonal box = (0 0 0) to (1 1 1)
2 by 2 by 4 MPI processor grid
reading atoms ...
8 atoms
replicate 8 8 8
orthogonal box = (0 0 0) to (8 8 8)
2 by 2 by 4 MPI processor grid
4096 atoms
Time spent = 0.000361443 secs
velocity all create 1.5 49893
neighbor 1.0 bin
neigh_modify delay 0
fix 1 all nve
# LAMMPS computes pairwise and long-range Coulombics
#pair_style coul/long 3.0
#pair_coeff * *
#kspace_style pppm 1.0e-3
# Scafacos computes entire long-range Coulombics
# use dummy pair style to perform atom sorting
pair_style zero 1.0
pair_coeff * *
#fix 2 all scafacos p3m tolerance field 0.001
kspace_style scafacos p2nfft 0.001
kspace_modify scafacos tolerance field
timestep 0.005
thermo 10
run 100
Setting up ScaFaCoS with solver p2nfft ...
Neighbor list info ...
update every 1 steps, delay 0 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 2
ghost atom cutoff = 2
binsize = 1, bins = 8 8 8
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair zero, perpetual
attributes: half, newton on
pair build: half/bin/atomonly/newton
stencil: half/bin/3d/newton
bin: standard
Per MPI rank memory allocation (min/avg/max) = 3.501 | 3.501 | 3.501 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1.5 -1.7477245 0 0.50172614 10.443537
10 1.5000176 -1.7475898 0 0.50188725 10.443798
20 1.5000161 -1.7475262 0 0.50194874 10.443843
30 1.4999486 -1.7474019 0 0.50197176 10.443413
40 1.4997889 -1.7471525 0 0.50198161 10.442357
50 1.4995945 -1.7468614 0 0.50198122 10.441061
60 1.499609 -1.7468813 0 0.50198309 10.44116
70 1.5004314 -1.7481179 0 0.50197962 10.446638
80 1.5033149 -1.7524495 0 0.50197233 10.46585
90 1.5108219 -1.7637095 0 0.50197005 10.515883
100 1.529239 -1.7913105 0 0.501988 10.638649
Loop time of 1.56685 on 16 procs for 100 steps with 4096 atoms
Performance: 27571.239 tau/day, 63.822 timesteps/s
99.8% CPU use with 16 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.00036407 | 0.00040755 | 0.00047517 | 0.0 | 0.03
Kspace | 1.5521 | 1.553 | 1.5536 | 0.0 | 99.12
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0.009537 | 0.010175 | 0.011894 | 0.6 | 0.65
Output | 0.000319 | 0.00039139 | 0.00052881 | 0.0 | 0.02
Modify | 0.00086999 | 0.00097834 | 0.0010362 | 0.0 | 0.06
Other | | 0.001859 | | | 0.12
Nlocal: 256 ave 256 max 256 min
Histogram: 16 0 0 0 0 0 0 0 0 0
Nghost: 2816 ave 2816 max 2816 min
Histogram: 16 0 0 0 0 0 0 0 0 0
Neighs: 32768 ave 32768 max 32768 min
Histogram: 16 0 0 0 0 0 0 0 0 0
Total # of neighbors = 524288
Ave neighs/atom = 128
Neighbor list builds = 0
Dangerous builds = 0
Total wall time: 0:00:01

View File

@ -0,0 +1,102 @@
LAMMPS (2 Aug 2018)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (src/comm.cpp:87)
using 1 OpenMP thread(s) per MPI task
# Point dipoles in a 2d box
units lj
atom_style charge
read_data data.NaCl
orthogonal box = (0 0 0) to (1 1 1)
1 by 1 by 2 MPI processor grid
reading atoms ...
8 atoms
replicate 8 8 8
orthogonal box = (0 0 0) to (8 8 8)
1 by 1 by 2 MPI processor grid
4096 atoms
Time spent = 0.0003438 secs
velocity all create 1.5 49893
neighbor 1.0 bin
neigh_modify delay 0
fix 1 all nve
# LAMMPS computes pairwise and long-range Coulombics
#pair_style coul/long 3.0
#pair_coeff * *
#kspace_style pppm 1.0e-3
# Scafacos computes entire long-range Coulombics
# use dummy pair style to perform atom sorting
pair_style zero 1.0
pair_coeff * *
#fix 2 all scafacos p3m tolerance field 0.001
kspace_style scafacos p2nfft 0.001
kspace_modify scafacos tolerance field
timestep 0.005
thermo 10
run 100
Setting up ScaFaCoS with solver p2nfft ...
Neighbor list info ...
update every 1 steps, delay 0 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 2
ghost atom cutoff = 2
binsize = 1, bins = 8 8 8
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair zero, perpetual
attributes: half, newton on
pair build: half/bin/atomonly/newton
stencil: half/bin/3d/newton
bin: standard
Per MPI rank memory allocation (min/avg/max) = 4.574 | 4.574 | 4.574 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1.5 -1.7477245 0 0.50172614 10.443537
10 1.5000176 -1.7475898 0 0.50188725 10.443798
20 1.5000161 -1.7475262 0 0.50194874 10.443843
30 1.4999486 -1.7474019 0 0.50197176 10.443413
40 1.4997889 -1.7471525 0 0.50198161 10.442357
50 1.4995945 -1.7468614 0 0.50198122 10.441061
60 1.499609 -1.7468813 0 0.50198309 10.44116
70 1.5004314 -1.7481179 0 0.50197962 10.446638
80 1.5033149 -1.7524495 0 0.50197233 10.46585
90 1.5108219 -1.7637095 0 0.50197005 10.515883
100 1.529239 -1.7913105 0 0.501988 10.638649
Loop time of 9.38943 on 2 procs for 100 steps with 4096 atoms
Performance: 4600.920 tau/day, 10.650 timesteps/s
99.9% CPU use with 2 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.0010064 | 0.0010065 | 0.0010066 | 0.0 | 0.01
Kspace | 9.3602 | 9.3603 | 9.3604 | 0.0 | 99.69
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0.019444 | 0.01968 | 0.019916 | 0.2 | 0.21
Output | 0.00033355 | 0.00035357 | 0.0003736 | 0.0 | 0.00
Modify | 0.0055819 | 0.0056176 | 0.0056534 | 0.0 | 0.06
Other | | 0.002495 | | | 0.03
Nlocal: 2048 ave 2048 max 2048 min
Histogram: 2 0 0 0 0 0 0 0 0 0
Nghost: 7168 ave 7168 max 7168 min
Histogram: 2 0 0 0 0 0 0 0 0 0
Neighs: 262144 ave 262144 max 262144 min
Histogram: 2 0 0 0 0 0 0 0 0 0
Total # of neighbors = 524288
Ave neighs/atom = 128
Neighbor list builds = 0
Dangerous builds = 0
Total wall time: 0:00:11

View File

@ -0,0 +1,102 @@
LAMMPS (2 Aug 2018)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (src/comm.cpp:87)
using 1 OpenMP thread(s) per MPI task
# Point dipoles in a 2d box
units lj
atom_style charge
read_data data.NaCl
orthogonal box = (0 0 0) to (1 1 1)
1 by 2 by 2 MPI processor grid
reading atoms ...
8 atoms
replicate 8 8 8
orthogonal box = (0 0 0) to (8 8 8)
1 by 2 by 2 MPI processor grid
4096 atoms
Time spent = 0.000260592 secs
velocity all create 1.5 49893
neighbor 1.0 bin
neigh_modify delay 0
fix 1 all nve
# LAMMPS computes pairwise and long-range Coulombics
#pair_style coul/long 3.0
#pair_coeff * *
#kspace_style pppm 1.0e-3
# Scafacos computes entire long-range Coulombics
# use dummy pair style to perform atom sorting
pair_style zero 1.0
pair_coeff * *
#fix 2 all scafacos p3m tolerance field 0.001
kspace_style scafacos p2nfft 0.001
kspace_modify scafacos tolerance field
timestep 0.005
thermo 10
run 100
Setting up ScaFaCoS with solver p2nfft ...
Neighbor list info ...
update every 1 steps, delay 0 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 2
ghost atom cutoff = 2
binsize = 1, bins = 8 8 8
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair zero, perpetual
attributes: half, newton on
pair build: half/bin/atomonly/newton
stencil: half/bin/3d/newton
bin: standard
Per MPI rank memory allocation (min/avg/max) = 4.008 | 4.008 | 4.008 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1.5 -1.7477245 0 0.50172614 10.443537
10 1.5000176 -1.7475898 0 0.50188725 10.443798
20 1.5000161 -1.7475262 0 0.50194874 10.443843
30 1.4999486 -1.7474019 0 0.50197176 10.443413
40 1.4997889 -1.7471525 0 0.50198161 10.442357
50 1.4995945 -1.7468614 0 0.50198122 10.441061
60 1.499609 -1.7468813 0 0.50198309 10.44116
70 1.5004314 -1.7481179 0 0.50197962 10.446638
80 1.5033149 -1.7524495 0 0.50197233 10.46585
90 1.5108219 -1.7637095 0 0.50197005 10.515883
100 1.529239 -1.7913105 0 0.501988 10.638649
Loop time of 5.09997 on 4 procs for 100 steps with 4096 atoms
Performance: 8470.643 tau/day, 19.608 timesteps/s
99.8% CPU use with 4 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.00069928 | 0.00071001 | 0.00073647 | 0.0 | 0.01
Kspace | 5.0795 | 5.0796 | 5.0797 | 0.0 | 99.60
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0.014101 | 0.014216 | 0.014331 | 0.1 | 0.28
Output | 0.00030541 | 0.00033581 | 0.00039625 | 0.0 | 0.01
Modify | 0.0030217 | 0.0030621 | 0.0030868 | 0.0 | 0.06
Other | | 0.002036 | | | 0.04
Nlocal: 1024 ave 1024 max 1024 min
Histogram: 4 0 0 0 0 0 0 0 0 0
Nghost: 5120 ave 5120 max 5120 min
Histogram: 4 0 0 0 0 0 0 0 0 0
Neighs: 131072 ave 131072 max 131072 min
Histogram: 4 0 0 0 0 0 0 0 0 0
Total # of neighbors = 524288
Ave neighs/atom = 128
Neighbor list builds = 0
Dangerous builds = 0
Total wall time: 0:00:05

View File

@ -0,0 +1,102 @@
LAMMPS (2 Aug 2018)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (src/comm.cpp:87)
using 1 OpenMP thread(s) per MPI task
# Point dipoles in a 2d box
units lj
atom_style charge
read_data data.NaCl
orthogonal box = (0 0 0) to (1 1 1)
2 by 2 by 2 MPI processor grid
reading atoms ...
8 atoms
replicate 8 8 8
orthogonal box = (0 0 0) to (8 8 8)
2 by 2 by 2 MPI processor grid
4096 atoms
Time spent = 0.000324488 secs
velocity all create 1.5 49893
neighbor 1.0 bin
neigh_modify delay 0
fix 1 all nve
# LAMMPS computes pairwise and long-range Coulombics
#pair_style coul/long 3.0
#pair_coeff * *
#kspace_style pppm 1.0e-3
# Scafacos computes entire long-range Coulombics
# use dummy pair style to perform atom sorting
pair_style zero 1.0
pair_coeff * *
#fix 2 all scafacos p3m tolerance field 0.001
kspace_style scafacos p2nfft 0.001
kspace_modify scafacos tolerance field
timestep 0.005
thermo 10
run 100
Setting up ScaFaCoS with solver p2nfft ...
Neighbor list info ...
update every 1 steps, delay 0 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 2
ghost atom cutoff = 2
binsize = 1, bins = 8 8 8
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair zero, perpetual
attributes: half, newton on
pair build: half/bin/atomonly/newton
stencil: half/bin/3d/newton
bin: standard
Per MPI rank memory allocation (min/avg/max) = 3.508 | 3.508 | 3.508 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1.5 -1.7477245 0 0.50172614 10.443537
10 1.5000176 -1.7475898 0 0.50188725 10.443798
20 1.5000161 -1.7475262 0 0.50194874 10.443843
30 1.4999486 -1.7474019 0 0.50197176 10.443413
40 1.4997889 -1.7471525 0 0.50198161 10.442357
50 1.4995945 -1.7468614 0 0.50198122 10.441061
60 1.499609 -1.7468813 0 0.50198309 10.44116
70 1.5004314 -1.7481179 0 0.50197962 10.446638
80 1.5033149 -1.7524495 0 0.50197233 10.46585
90 1.5108219 -1.7637095 0 0.50197005 10.515883
100 1.529239 -1.7913105 0 0.501988 10.638649
Loop time of 2.88506 on 8 procs for 100 steps with 4096 atoms
Performance: 14973.700 tau/day, 34.661 timesteps/s
99.6% CPU use with 8 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.000489 | 0.00051507 | 0.00052857 | 0.0 | 0.02
Kspace | 2.8657 | 2.866 | 2.8664 | 0.0 | 99.34
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0.014354 | 0.014851 | 0.015097 | 0.2 | 0.51
Output | 0.00037169 | 0.00042769 | 0.00054169 | 0.0 | 0.01
Modify | 0.0015774 | 0.0016578 | 0.0018044 | 0.2 | 0.06
Other | | 0.001645 | | | 0.06
Nlocal: 512 ave 512 max 512 min
Histogram: 8 0 0 0 0 0 0 0 0 0
Nghost: 3584 ave 3584 max 3584 min
Histogram: 8 0 0 0 0 0 0 0 0 0
Neighs: 65536 ave 65536 max 65536 min
Histogram: 8 0 0 0 0 0 0 0 0 0
Total # of neighbors = 524288
Ave neighs/atom = 128
Neighbor list builds = 0
Dangerous builds = 0
Total wall time: 0:00:03

View File

@ -0,0 +1,105 @@
LAMMPS (2 Aug 2018)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (src/comm.cpp:87)
using 1 OpenMP thread(s) per MPI task
# Point dipoles in a 2d box
units lj
atom_style charge
read_data data.hammersley_shphere
orthogonal box = (-50.5 -50.5 -50.5) to (51.5 51.5 51.5)
1 by 1 by 1 MPI processor grid
reading atoms ...
1000 atoms
change_box all boundary f f f
velocity all create 1.5 49893
neighbor 1.0 bin
neigh_modify delay 0
fix 1 all nve
# LAMMPS computes pairwise and long-range Coulombics
#pair_style coul/long 3.0
#pair_coeff * *
#kspace_style pppm 1.0e-3
# Scafacos computes entire long-range Coulombics
# use dummy pair style to perform atom sorting
pair_style zero 1.0
pair_coeff * *
#fix 2 all scafacos p3m tolerance field 0.001
kspace_style scafacos direct 0.001
timestep 0.005
thermo 1
run 20
Setting up ScaFaCoS with solver direct ...
Neighbor list info ...
update every 1 steps, delay 0 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 2
ghost atom cutoff = 2
binsize = 1, bins = 102 102 102
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair zero, perpetual
attributes: half, newton on
pair build: half/bin/atomonly/newton
stencil: half/bin/3d/newton
bin: standard
Per MPI rank memory allocation (min/avg/max) = 10.3 | 10.3 | 10.3 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1.5 -0.62417787 0 1.6235721 0.0015678854
1 18.780041 -10.770002 0 17.371889 0.016718957
2 65.289192 -11.084705 0 86.751149 0.060353634
3 121.92987 -7.0625759 0 175.64933 0.11404974
4 185.78164 -5.8777512 0 272.51604 0.17462195
5 286.36222 -4.382053 0 424.73173 0.26918926
6 481.42206 -4.3095567 0 717.1014 0.45274088
7 488.59167 -3.8685194 0 728.2861 0.45956866
8 497.85287 -3.0417966 0 742.99073 0.46838116
9 499.61615 -3.419003 0 745.2558 0.46983345
10 502.63684 -2.8360961 0 750.36521 0.47280809
11 504.4846 -2.7628105 0 753.20736 0.47462793
12 506.54485 -2.8460356 0 756.21142 0.47651441
13 508.27211 -2.730935 0 758.91482 0.47813752
14 510.57045 -2.6094877 0 762.48033 0.48031431
15 513.14798 -2.7150827 0 766.23717 0.48275229
16 515.78124 -2.3961811 0 770.50201 0.48526333
17 515.70265 -2.2982683 0 770.48215 0.48526617
18 515.7081 -2.1515983 0 770.63699 0.48530393
19 515.74906 -2.0581436 0 770.79182 0.48530977
20 515.70883 -1.8922577 0 770.89742 0.48527105
Loop time of 0.465839 on 1 procs for 20 steps with 1000 atoms
Performance: 18547.165 tau/day, 42.933 timesteps/s
99.9% CPU use with 1 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.00021982 | 0.00021982 | 0.00021982 | 0.0 | 0.05
Kspace | 0.3218 | 0.3218 | 0.3218 | 0.0 | 69.08
Neigh | 0.14249 | 0.14249 | 0.14249 | 0.0 | 30.59
Comm | 0.00014853 | 0.00014853 | 0.00014853 | 0.0 | 0.03
Output | 0.00026131 | 0.00026131 | 0.00026131 | 0.0 | 0.06
Modify | 0.00055146 | 0.00055146 | 0.00055146 | 0.0 | 0.12
Other | | 0.0003715 | | | 0.08
Nlocal: 1000 ave 1000 max 1000 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 0 ave 0 max 0 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 247817 ave 247817 max 247817 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 247817
Ave neighs/atom = 247.817
Neighbor list builds = 19
Dangerous builds = 18
Total wall time: 0:00:00

View File

@ -0,0 +1,105 @@
LAMMPS (2 Aug 2018)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (src/comm.cpp:87)
using 1 OpenMP thread(s) per MPI task
# Point dipoles in a 2d box
units lj
atom_style charge
read_data data.hammersley_shphere
orthogonal box = (-50.5 -50.5 -50.5) to (51.5 51.5 51.5)
1 by 1 by 2 MPI processor grid
reading atoms ...
1000 atoms
change_box all boundary f f f
velocity all create 1.5 49893
neighbor 1.0 bin
neigh_modify delay 0
fix 1 all nve
# LAMMPS computes pairwise and long-range Coulombics
#pair_style coul/long 3.0
#pair_coeff * *
#kspace_style pppm 1.0e-3
# Scafacos computes entire long-range Coulombics
# use dummy pair style to perform atom sorting
pair_style zero 1.0
pair_coeff * *
#fix 2 all scafacos p3m tolerance field 0.001
kspace_style scafacos direct 0.001
timestep 0.005
thermo 1
run 20
Setting up ScaFaCoS with solver direct ...
Neighbor list info ...
update every 1 steps, delay 0 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 2
ghost atom cutoff = 2
binsize = 1, bins = 102 102 102
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair zero, perpetual
attributes: half, newton on
pair build: half/bin/atomonly/newton
stencil: half/bin/3d/newton
bin: standard
Per MPI rank memory allocation (min/avg/max) = 6.48 | 6.861 | 7.243 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1.5 -0.62417787 0 1.6235721 0.0015678854
1 18.780041 -10.770002 0 17.371889 0.016718957
2 65.289192 -11.084705 0 86.751149 0.060353634
3 121.92987 -7.0625759 0 175.64933 0.11404974
4 185.78164 -5.8777512 0 272.51604 0.17462195
5 286.36222 -4.382053 0 424.73173 0.26918926
6 481.42206 -4.3095567 0 717.1014 0.45274088
7 488.59167 -3.8685194 0 728.2861 0.45956866
8 497.85287 -3.0417966 0 742.99073 0.46838116
9 499.61615 -3.419003 0 745.2558 0.46983345
10 502.63684 -2.8360961 0 750.36521 0.47280809
11 504.4846 -2.7628105 0 753.20736 0.47462793
12 506.54485 -2.8460356 0 756.21142 0.47651441
13 508.27211 -2.730935 0 758.91482 0.47813752
14 510.57045 -2.6094877 0 762.48033 0.48031431
15 513.14798 -2.7150827 0 766.23717 0.48275229
16 515.78124 -2.3961811 0 770.50201 0.48526333
17 515.70265 -2.2982683 0 770.48215 0.48526617
18 515.7081 -2.1515983 0 770.63699 0.48530393
19 515.74906 -2.0581436 0 770.79182 0.48530977
20 515.70883 -1.8922577 0 770.89742 0.48527105
Loop time of 0.284007 on 2 procs for 20 steps with 1000 atoms
Performance: 30421.778 tau/day, 70.421 timesteps/s
99.1% CPU use with 2 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.00022578 | 0.00022626 | 0.00022674 | 0.0 | 0.08
Kspace | 0.18253 | 0.20503 | 0.22752 | 5.0 | 72.19
Neigh | 0.05363 | 0.076239 | 0.098848 | 8.2 | 26.84
Comm | 0.0014737 | 0.0016443 | 0.0018148 | 0.4 | 0.58
Output | 0.000247 | 0.00032353 | 0.00040007 | 0.0 | 0.11
Modify | 0.00029159 | 0.00029731 | 0.00030303 | 0.0 | 0.10
Other | | 0.0002506 | | | 0.09
Nlocal: 500 ave 516 max 484 min
Histogram: 1 0 0 0 0 0 0 0 0 1
Nghost: 456.5 ave 475 max 438 min
Histogram: 1 0 0 0 0 0 0 0 0 1
Neighs: 123908 ave 172139 max 75678 min
Histogram: 1 0 0 0 0 0 0 0 0 1
Total # of neighbors = 247817
Ave neighs/atom = 247.817
Neighbor list builds = 19
Dangerous builds = 18
Total wall time: 0:00:00

View File

@ -0,0 +1,105 @@
LAMMPS (2 Aug 2018)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (src/comm.cpp:87)
using 1 OpenMP thread(s) per MPI task
# Point dipoles in a 2d box
units lj
atom_style charge
read_data data.hammersley_shphere
orthogonal box = (-50.5 -50.5 -50.5) to (51.5 51.5 51.5)
1 by 2 by 2 MPI processor grid
reading atoms ...
1000 atoms
change_box all boundary f f f
velocity all create 1.5 49893
neighbor 1.0 bin
neigh_modify delay 0
fix 1 all nve
# LAMMPS computes pairwise and long-range Coulombics
#pair_style coul/long 3.0
#pair_coeff * *
#kspace_style pppm 1.0e-3
# Scafacos computes entire long-range Coulombics
# use dummy pair style to perform atom sorting
pair_style zero 1.0
pair_coeff * *
#fix 2 all scafacos p3m tolerance field 0.001
kspace_style scafacos direct 0.001
timestep 0.005
thermo 1
run 20
Setting up ScaFaCoS with solver direct ...
Neighbor list info ...
update every 1 steps, delay 0 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 2
ghost atom cutoff = 2
binsize = 1, bins = 102 102 102
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair zero, perpetual
attributes: half, newton on
pair build: half/bin/atomonly/newton
stencil: half/bin/3d/newton
bin: standard
Per MPI rank memory allocation (min/avg/max) = 4.837 | 5.123 | 5.6 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1.5 -0.62417787 0 1.6235721 0.0015678854
1 18.780041 -10.770002 0 17.371889 0.016718957
2 65.289192 -11.084705 0 86.751149 0.060353634
3 121.92987 -7.0625759 0 175.64933 0.11404974
4 185.78164 -5.8777512 0 272.51604 0.17462195
5 286.36222 -4.382053 0 424.73173 0.26918926
6 481.42206 -4.3095567 0 717.1014 0.45274088
7 488.59167 -3.8685194 0 728.2861 0.45956866
8 497.85287 -3.0417966 0 742.99073 0.46838116
9 499.61615 -3.419003 0 745.2558 0.46983345
10 502.63684 -2.8360961 0 750.36521 0.47280809
11 504.4846 -2.7628105 0 753.20736 0.47462793
12 506.54485 -2.8460356 0 756.21142 0.47651441
13 508.27211 -2.730935 0 758.91482 0.47813752
14 510.57045 -2.6094877 0 762.48033 0.48031431
15 513.14798 -2.7150827 0 766.23717 0.48275229
16 515.78124 -2.3961811 0 770.50201 0.48526333
17 515.70265 -2.2982683 0 770.48215 0.48526617
18 515.7081 -2.1515983 0 770.63699 0.48530393
19 515.74906 -2.0581436 0 770.79182 0.48530977
20 515.70883 -1.8922577 0 770.89742 0.48527105
Loop time of 0.161335 on 4 procs for 20 steps with 1000 atoms
Performance: 53553.228 tau/day, 123.966 timesteps/s
99.5% CPU use with 4 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.00022721 | 0.00023353 | 0.000247 | 0.0 | 0.14
Kspace | 0.10295 | 0.11808 | 0.13377 | 3.5 | 73.19
Neigh | 0.023849 | 0.039717 | 0.055031 | 6.1 | 24.62
Comm | 0.0023148 | 0.0025774 | 0.0028391 | 0.4 | 1.60
Output | 0.00029063 | 0.00038403 | 0.00050664 | 0.0 | 0.24
Modify | 0.00015664 | 0.00015944 | 0.00016165 | 0.0 | 0.10
Other | | 0.0001805 | | | 0.11
Nlocal: 250 ave 259 max 238 min
Histogram: 1 0 0 1 0 0 0 0 0 2
Nghost: 672.25 ave 683 max 663 min
Histogram: 2 0 0 0 0 0 0 0 1 1
Neighs: 61954.2 ave 97157 max 25016 min
Histogram: 1 0 0 1 0 0 1 0 0 1
Total # of neighbors = 247817
Ave neighs/atom = 247.817
Neighbor list builds = 19
Dangerous builds = 18
Total wall time: 0:00:00

View File

@ -0,0 +1,105 @@
LAMMPS (2 Aug 2018)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (src/comm.cpp:87)
using 1 OpenMP thread(s) per MPI task
# Point dipoles in a 2d box
units lj
atom_style charge
read_data data.hammersley_shphere
orthogonal box = (-50.5 -50.5 -50.5) to (51.5 51.5 51.5)
2 by 2 by 2 MPI processor grid
reading atoms ...
1000 atoms
change_box all boundary f f f
velocity all create 1.5 49893
neighbor 1.0 bin
neigh_modify delay 0
fix 1 all nve
# LAMMPS computes pairwise and long-range Coulombics
#pair_style coul/long 3.0
#pair_coeff * *
#kspace_style pppm 1.0e-3
# Scafacos computes entire long-range Coulombics
# use dummy pair style to perform atom sorting
pair_style zero 1.0
pair_coeff * *
#fix 2 all scafacos p3m tolerance field 0.001
kspace_style scafacos direct 0.001
timestep 0.005
thermo 1
run 20
Setting up ScaFaCoS with solver direct ...
Neighbor list info ...
update every 1 steps, delay 0 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 2
ghost atom cutoff = 2
binsize = 1, bins = 102 102 102
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair zero, perpetual
attributes: half, newton on
pair build: half/bin/atomonly/newton
stencil: half/bin/3d/newton
bin: standard
Per MPI rank memory allocation (min/avg/max) = 4.164 | 4.26 | 4.546 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1.5 -0.62417787 0 1.6235721 0.0015678854
1 18.780041 -10.770002 0 17.371889 0.016718957
2 65.289192 -11.084705 0 86.751149 0.060353634
3 121.92987 -7.0625759 0 175.64933 0.11404974
4 185.78164 -5.8777512 0 272.51604 0.17462195
5 286.36222 -4.382053 0 424.73173 0.26918926
6 481.42206 -4.3095567 0 717.1014 0.45274088
7 488.59167 -3.8685194 0 728.2861 0.45956866
8 497.85287 -3.0417966 0 742.99073 0.46838116
9 499.61615 -3.419003 0 745.2558 0.46983345
10 502.63684 -2.8360961 0 750.36521 0.47280809
11 504.4846 -2.7628105 0 753.20736 0.47462793
12 506.54485 -2.8460356 0 756.21142 0.47651441
13 508.27211 -2.730935 0 758.91482 0.47813752
14 510.57045 -2.6094877 0 762.48033 0.48031431
15 513.14798 -2.7150827 0 766.23717 0.48275229
16 515.78124 -2.3961811 0 770.50201 0.48526333
17 515.70265 -2.2982683 0 770.48215 0.48526617
18 515.7081 -2.1515983 0 770.63699 0.48530393
19 515.74906 -2.0581436 0 770.79182 0.48530977
20 515.70883 -1.8922577 0 770.89742 0.48527105
Loop time of 0.0883947 on 8 procs for 20 steps with 1000 atoms
Performance: 97743.448 tau/day, 226.258 timesteps/s
99.2% CPU use with 8 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.0002284 | 0.00024167 | 0.00029922 | 0.0 | 0.27
Kspace | 0.055725 | 0.063153 | 0.071883 | 2.4 | 71.44
Neigh | 0.012251 | 0.021348 | 0.029026 | 4.3 | 24.15
Comm | 0.0025573 | 0.0029825 | 0.0034359 | 0.5 | 3.37
Output | 0.00034451 | 0.00044149 | 0.00057721 | 0.0 | 0.50
Modify | 7.8917e-05 | 8.437e-05 | 8.9407e-05 | 0.0 | 0.10
Other | | 0.0001439 | | | 0.16
Nlocal: 125 ave 133 max 113 min
Histogram: 2 0 0 0 0 1 1 0 2 2
Nghost: 773.625 ave 788 max 764 min
Histogram: 1 1 2 1 1 0 0 0 1 1
Neighs: 30977.1 ave 50690 max 10447 min
Histogram: 1 1 1 0 1 1 0 0 2 1
Total # of neighbors = 247817
Ave neighs/atom = 247.817
Neighbor list builds = 19
Dangerous builds = 18
Total wall time: 0:00:00

View File

@ -0,0 +1,109 @@
LAMMPS (2 Aug 2018)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (src/comm.cpp:87)
using 1 OpenMP thread(s) per MPI task
# Point dipoles in a 2d box
units lj
atom_style charge
read_data data.hammersley_shphere
orthogonal box = (-50.5 -50.5 -50.5) to (51.5 51.5 51.5)
1 by 1 by 1 MPI processor grid
reading atoms ...
1000 atoms
change_box all boundary f f f
velocity all create 1.5 49893
neighbor 1.0 bin
neigh_modify delay 0
fix 1 all nve
# LAMMPS computes pairwise and long-range Coulombics
#pair_style coul/long 3.0
#pair_coeff * *
#kspace_style pppm 1.0e-3
# Scafacos computes entire long-range Coulombics
# use dummy pair style to perform atom sorting
pair_style zero 1.0
pair_coeff * *
#fix 2 all scafacos p3m tolerance field 0.001
kspace_style scafacos fmm 0.001
kspace_modify scafacos tolerance energy_rel
kspace_modify scafacos fmm_tuning 1
ScaFaCoS setting fmm inhomogen tuning ...
timestep 0.005
thermo 1
run 20
Setting up ScaFaCoS with solver fmm ...
Neighbor list info ...
update every 1 steps, delay 0 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 2
ghost atom cutoff = 2
binsize = 1, bins = 102 102 102
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair zero, perpetual
attributes: half, newton on
pair build: half/bin/atomonly/newton
stencil: half/bin/3d/newton
bin: standard
Per MPI rank memory allocation (min/avg/max) = 10.3 | 10.3 | 10.3 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1.5 -0.62417141 0 1.6235786 0.0015676581
1 18.780412 -10.770009 0 17.372438 0.016719188
2 65.294131 -11.084501 0 86.758754 0.06035827
3 121.92555 -7.0612033 0 175.64423 0.1140457
4 185.71165 -5.8781334 0 272.41077 0.17455524
5 286.28339 -4.3800108 0 424.61565 0.26911306
6 481.28097 -4.3052012 0 716.89433 0.45262045
7 487.26022 -3.8672741 0 726.29216 0.45830216
8 493.65478 -3.0242687 0 736.71742 0.46443761
9 495.66203 -3.4336343 0 739.31592 0.46613014
10 498.41831 -2.8837072 0 743.99613 0.46887706
11 499.20944 -2.7724783 0 745.29287 0.46966875
12 500.97345 -2.8281484 0 747.88057 0.47126462
13 507.46412 -2.7752775 0 757.65971 0.47728761
14 525.35729 -2.5749814 0 784.67292 0.49422171
15 563.9578 -2.9982381 0 842.09253 0.53043696
16 645.47602 -2.5519203 0 964.69389 0.60730795
17 647.09276 -2.2568468 0 967.41166 0.60891914
18 647.12596 -2.2791003 0 967.43915 0.60900309
19 647.24862 -2.2495226 0 967.65253 0.60908339
20 647.51175 -2.0239179 0 968.27244 0.60932598
Loop time of 0.797289 on 1 procs for 20 steps with 1000 atoms
Performance: 10836.721 tau/day, 25.085 timesteps/s
100.0% CPU use with 1 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.00022364 | 0.00022364 | 0.00022364 | 0.0 | 0.03
Kspace | 0.6524 | 0.6524 | 0.6524 | 0.0 | 81.83
Neigh | 0.14312 | 0.14312 | 0.14312 | 0.0 | 17.95
Comm | 0.00020337 | 0.00020337 | 0.00020337 | 0.0 | 0.03
Output | 0.00036621 | 0.00036621 | 0.00036621 | 0.0 | 0.05
Modify | 0.00058126 | 0.00058126 | 0.00058126 | 0.0 | 0.07
Other | | 0.0003934 | | | 0.05
Nlocal: 1000 ave 1000 max 1000 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 0 ave 0 max 0 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 244342 ave 244342 max 244342 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 244342
Ave neighs/atom = 244.342
Neighbor list builds = 19
Dangerous builds = 18
Total wall time: 0:00:01

View File

@ -0,0 +1,109 @@
LAMMPS (2 Aug 2018)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (src/comm.cpp:87)
using 1 OpenMP thread(s) per MPI task
# Point dipoles in a 2d box
units lj
atom_style charge
read_data data.hammersley_shphere
orthogonal box = (-50.5 -50.5 -50.5) to (51.5 51.5 51.5)
1 by 1 by 2 MPI processor grid
reading atoms ...
1000 atoms
change_box all boundary f f f
velocity all create 1.5 49893
neighbor 1.0 bin
neigh_modify delay 0
fix 1 all nve
# LAMMPS computes pairwise and long-range Coulombics
#pair_style coul/long 3.0
#pair_coeff * *
#kspace_style pppm 1.0e-3
# Scafacos computes entire long-range Coulombics
# use dummy pair style to perform atom sorting
pair_style zero 1.0
pair_coeff * *
#fix 2 all scafacos p3m tolerance field 0.001
kspace_style scafacos fmm 0.001
kspace_modify scafacos tolerance energy_rel
kspace_modify scafacos fmm_tuning 1
ScaFaCoS setting fmm inhomogen tuning ...
timestep 0.005
thermo 1
run 20
Setting up ScaFaCoS with solver fmm ...
Neighbor list info ...
update every 1 steps, delay 0 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 2
ghost atom cutoff = 2
binsize = 1, bins = 102 102 102
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair zero, perpetual
attributes: half, newton on
pair build: half/bin/atomonly/newton
stencil: half/bin/3d/newton
bin: standard
Per MPI rank memory allocation (min/avg/max) = 6.48 | 6.861 | 7.243 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1.5 -0.62417141 0 1.6235786 0.0015676581
1 18.780412 -10.770009 0 17.372438 0.016719188
2 65.294131 -11.084501 0 86.758754 0.06035827
3 121.92555 -7.0612033 0 175.64423 0.1140457
4 185.71165 -5.8781334 0 272.41077 0.17455524
5 286.28339 -4.3800108 0 424.61565 0.26911306
6 481.28097 -4.3052012 0 716.89433 0.45262045
7 487.26022 -3.8672741 0 726.29216 0.45830216
8 493.65478 -3.0242687 0 736.71742 0.46443761
9 495.66203 -3.4336343 0 739.31592 0.46613014
10 498.41831 -2.8837072 0 743.99613 0.46887706
11 499.20944 -2.7724783 0 745.29287 0.46966875
12 500.97345 -2.8281484 0 747.88057 0.47126462
13 507.46412 -2.7752775 0 757.65971 0.47728761
14 525.35729 -2.5749814 0 784.67292 0.49422171
15 563.9578 -2.9982381 0 842.09253 0.53043696
16 645.47602 -2.5519203 0 964.69389 0.60730795
17 647.09276 -2.2568468 0 967.41166 0.60891914
18 647.12596 -2.2791003 0 967.43915 0.60900309
19 647.24862 -2.2495226 0 967.65253 0.60908339
20 647.51175 -2.0239179 0 968.27244 0.60932598
Loop time of 0.701186 on 2 procs for 20 steps with 1000 atoms
Performance: 12321.981 tau/day, 28.523 timesteps/s
99.7% CPU use with 2 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.00022388 | 0.00022912 | 0.00023437 | 0.0 | 0.03
Kspace | 0.60189 | 0.62405 | 0.64621 | 2.8 | 89.00
Neigh | 0.051681 | 0.073973 | 0.096265 | 8.2 | 10.55
Comm | 0.0016983 | 0.0018919 | 0.0020854 | 0.4 | 0.27
Output | 0.00034356 | 0.00044572 | 0.00054789 | 0.0 | 0.06
Modify | 0.00031281 | 0.0003171 | 0.00032139 | 0.0 | 0.05
Other | | 0.0002786 | | | 0.04
Nlocal: 500 ave 509 max 491 min
Histogram: 1 0 0 0 0 0 0 0 0 1
Nghost: 455.5 ave 467 max 444 min
Histogram: 1 0 0 0 0 0 0 0 0 1
Neighs: 122171 ave 171834 max 72508 min
Histogram: 1 0 0 0 0 0 0 0 0 1
Total # of neighbors = 244342
Ave neighs/atom = 244.342
Neighbor list builds = 19
Dangerous builds = 18
Total wall time: 0:00:01

Some files were not shown because too many files have changed in this diff Show More