forked from lijiext/lammps
Merge branch 'master' into kim-v2-update
This commit is contained in:
commit
f61f43a56b
|
@ -17,6 +17,7 @@ src/GPU/* @ndtrung81
|
|||
src/KOKKOS/* @stanmoore1
|
||||
src/KIM/* @ellio167
|
||||
src/LATTE/* @cnegre
|
||||
src/MESSAGE/* @sjplimp
|
||||
src/SPIN/* @julient31
|
||||
src/USER-CGDNA/* @ohenrich
|
||||
src/USER-CGSDK/* @akohlmey
|
||||
|
@ -29,19 +30,86 @@ src/USER-MOFFF/* @hheenen
|
|||
src/USER-MOLFILE/* @akohlmey
|
||||
src/USER-NETCDF/* @pastewka
|
||||
src/USER-PHONON/* @lingtikong
|
||||
src/USER-PTM/* @pmla
|
||||
src/USER-OMP/* @akohlmey
|
||||
src/USER-QMMM/* @akohlmey
|
||||
src/USER-REAXC/* @hasanmetin
|
||||
src/USER-SCAFACOS/* @rhalver
|
||||
src/USER-TALLY/* @akohlmey
|
||||
src/USER-UEF/* @danicholson
|
||||
src/USER-VTK/* @rbberger
|
||||
|
||||
|
||||
# individual files in packages
|
||||
src/GPU/pair_vashishta_gpu.* @andeplane
|
||||
src/KOKKOS/pair_vashishta_kokkos.* @andeplane
|
||||
src/MANYBODY/pair_vashishta_table.* @andeplane
|
||||
src/MANYBODY/pair_atm.* @sergeylishchuk
|
||||
src/USER-MISC/fix_bond_react.* @jrgissing
|
||||
src/USER-MISC/*_grem.* @dstelter92
|
||||
src/USER-MISC/compute_stress_mop*.* @RomainVermorel
|
||||
|
||||
# core LAMMPS classes
|
||||
src/lammps.* @sjplimp
|
||||
src/pointers.h @sjplimp
|
||||
src/atom.* @sjplimp
|
||||
src/atom_vec.* @sjplimp
|
||||
src/angle.* @sjplimp
|
||||
src/bond.* @sjplimp
|
||||
src/comm*.* @sjplimp
|
||||
src/compute.* @sjplimp
|
||||
src/dihedral.* @sjplimp
|
||||
src/domain.* @sjplimp
|
||||
src/dump*.* @sjplimp
|
||||
src/error.* @sjplimp
|
||||
src/finish.* @sjplimp
|
||||
src/fix.* @sjplimp
|
||||
src/force.* @sjplimp
|
||||
src/group.* @sjplimp
|
||||
src/improper.* @sjplimp
|
||||
src/kspace.* @sjplimp
|
||||
src/lmptyp.h @sjplimp
|
||||
src/library.* @sjplimp
|
||||
src/main.cpp @sjplimp
|
||||
src/memory.* @sjplimp
|
||||
src/modify.* @sjplimp
|
||||
src/molecule.* @sjplimp
|
||||
src/my_page.h @sjplimp
|
||||
src/my_pool_chunk.h @sjplimp
|
||||
src/npair*.* @sjplimp
|
||||
src/ntopo*.* @sjplimp
|
||||
src/nstencil*.* @sjplimp
|
||||
src/neighbor.* @sjplimp
|
||||
src/nbin*.* @sjplimp
|
||||
src/neigh_*.* @sjplimp
|
||||
src/output.* @sjplimp
|
||||
src/pair.* @sjplimp
|
||||
src/rcb.* @sjplimp
|
||||
src/random_*.* @sjplimp
|
||||
src/region*.* @sjplimp
|
||||
src/rcb.* @sjplimp
|
||||
src/read*.* @sjplimp
|
||||
src/rerun.* @sjplimp
|
||||
src/run.* @sjplimp
|
||||
src/respa.* @sjplimp
|
||||
src/set.* @sjplimp
|
||||
src/special.* @sjplimp
|
||||
src/suffix.h @sjplimp
|
||||
src/thermo.* @sjplimp
|
||||
src/universe.* @sjplimp
|
||||
src/update.* @sjplimp
|
||||
src/variable.* @sjplimp
|
||||
src/verlet.* @sjplimp
|
||||
src/velocity.* @sjplimp
|
||||
src/write_data.* @sjplimp
|
||||
src/write_restart.* @sjplimp
|
||||
|
||||
# overrides for specific files
|
||||
src/dump_movie.* @akohlmey
|
||||
src/exceptions.h @rbberger
|
||||
src/fix_nh.* @athomps
|
||||
src/info.* @akohlmey @rbberger
|
||||
src/timer.* @akohlmey
|
||||
|
||||
# tools
|
||||
tools/msi2lmp/* @akohlmey
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
*~
|
||||
*.o
|
||||
*.so
|
||||
*.lo
|
||||
*.cu_o
|
||||
*.ptx
|
||||
*_ptx.h
|
||||
|
@ -32,6 +33,7 @@ log.cite
|
|||
.Trashes
|
||||
ehthumbs.db
|
||||
Thumbs.db
|
||||
.clang-format
|
||||
|
||||
#cmake
|
||||
/build*
|
||||
|
|
|
@ -13,7 +13,7 @@ get_filename_component(LAMMPS_DOC_DIR ${CMAKE_CURRENT_SOURCE_DIR}/../doc ABSOLUT
|
|||
|
||||
|
||||
# To avoid conflicts with the conventional Makefile build system, we build everything here
|
||||
file(GLOB LIB_SOURCES ${LAMMPS_SOURCE_DIR}/*.cpp)
|
||||
file(GLOB LIB_SOURCES ${LAMMPS_SOURCE_DIR}/[^.]*.cpp)
|
||||
file(GLOB LMP_SOURCES ${LAMMPS_SOURCE_DIR}/main.cpp)
|
||||
list(REMOVE_ITEM LIB_SOURCES ${LMP_SOURCES})
|
||||
|
||||
|
@ -43,6 +43,29 @@ function(validate_option name values)
|
|||
endif()
|
||||
endfunction(validate_option)
|
||||
|
||||
function(get_lammps_version version_header variable)
|
||||
file(READ ${version_header} line)
|
||||
set(MONTHS x Jan Feb Mar Apr May Jun Jul Aug Sep Oct Nov Dec)
|
||||
string(REGEX REPLACE "#define LAMMPS_VERSION \"([0-9]+) ([A-Za-z]+) ([0-9]+)\"" "\\1" day "${line}")
|
||||
string(REGEX REPLACE "#define LAMMPS_VERSION \"([0-9]+) ([A-Za-z]+) ([0-9]+)\"" "\\2" month "${line}")
|
||||
string(REGEX REPLACE "#define LAMMPS_VERSION \"([0-9]+) ([A-Za-z]+) ([0-9]+)\"" "\\3" year "${line}")
|
||||
string(STRIP ${day} day)
|
||||
string(STRIP ${month} month)
|
||||
string(STRIP ${year} year)
|
||||
list(FIND MONTHS "${month}" month)
|
||||
string(LENGTH ${day} day_length)
|
||||
string(LENGTH ${month} month_length)
|
||||
if(day_length EQUAL 1)
|
||||
set(day "0${day}")
|
||||
endif()
|
||||
if(month_length EQUAL 1)
|
||||
set(month "0${month}")
|
||||
endif()
|
||||
set(${variable} "${year}${month}${day}" PARENT_SCOPE)
|
||||
endfunction()
|
||||
|
||||
get_lammps_version(${LAMMPS_SOURCE_DIR}/version.h LAMMPS_VERSION)
|
||||
|
||||
# Cmake modules/macros are in a subdirectory to keep this file cleaner
|
||||
set(CMAKE_MODULE_PATH ${CMAKE_CURRENT_SOURCE_DIR}/Modules)
|
||||
|
||||
|
@ -113,6 +136,7 @@ if(BUILD_EXE)
|
|||
if(LAMMPS_MACHINE)
|
||||
set(LAMMPS_MACHINE "_${LAMMPS_MACHINE}")
|
||||
endif()
|
||||
set(LAMMPS_BINARY lmp${LAMMPS_MACHINE})
|
||||
endif()
|
||||
|
||||
option(BUILD_LIB "Build LAMMPS library" OFF)
|
||||
|
@ -121,10 +145,10 @@ if(BUILD_LIB)
|
|||
if(BUILD_SHARED_LIBS) # for all pkg libs, mpi_stubs and linalg
|
||||
set(CMAKE_POSITION_INDEPENDENT_CODE ON)
|
||||
endif()
|
||||
set(LIB_SUFFIX "" CACHE STRING "Suffix to append to liblammps and pkg-config file")
|
||||
mark_as_advanced(LIB_SUFFIX)
|
||||
if(LIB_SUFFIX)
|
||||
set(LIB_SUFFIX "_${LIB_SUFFIX}")
|
||||
set(LAMMPS_LIB_SUFFIX "" CACHE STRING "Suffix to append to liblammps and pkg-config file")
|
||||
mark_as_advanced(LAMMPS_LIB_SUFFIX)
|
||||
if(LAMMPS_LIB_SUFFIX)
|
||||
set(LAMMPS_LIB_SUFFIX "_${LAMMPS_LIB_SUFFIX}")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
|
@ -139,6 +163,35 @@ set(LAMMPS_LINK_LIBS)
|
|||
set(LAMMPS_DEPS)
|
||||
set(LAMMPS_API_DEFINES)
|
||||
|
||||
set(DEFAULT_PACKAGES ASPHERE BODY CLASS2 COLLOID COMPRESS DIPOLE GRANULAR
|
||||
KSPACE MANYBODY MC MEAM MESSAGE MISC MOLECULE PERI REAX REPLICA RIGID SHOCK
|
||||
SPIN SNAP SRD KIM PYTHON MSCG MPIIO VORONOI POEMS LATTE USER-ATC USER-AWPMD
|
||||
USER-BOCS USER-CGDNA USER-MESO USER-CGSDK USER-COLVARS USER-DIFFRACTION
|
||||
USER-DPD USER-DRUDE USER-EFF USER-FEP USER-H5MD USER-LB USER-MANIFOLD
|
||||
USER-MEAMC USER-MGPT USER-MISC USER-MOFFF USER-MOLFILE USER-NETCDF
|
||||
USER-PHONON USER-PTM USER-QTB USER-REAXC USER-SCAFACOS USER-SMD USER-SMTBQ
|
||||
USER-SPH USER-TALLY USER-UEF USER-VTK USER-QUIP USER-QMMM)
|
||||
set(ACCEL_PACKAGES USER-OMP KOKKOS OPT USER-INTEL GPU)
|
||||
set(OTHER_PACKAGES CORESHELL QEQ)
|
||||
foreach(PKG ${DEFAULT_PACKAGES})
|
||||
option(PKG_${PKG} "Build ${PKG} Package" OFF)
|
||||
endforeach()
|
||||
foreach(PKG ${ACCEL_PACKAGES} ${OTHER_PACKAGES})
|
||||
option(PKG_${PKG} "Build ${PKG} Package" OFF)
|
||||
endforeach()
|
||||
|
||||
######################################################
|
||||
# packages with special compiler needs or external libs
|
||||
######################################################
|
||||
if(PKG_REAX OR PKG_MEAM OR PKG_USER-QUIP OR PKG_USER-QMMM OR PKG_LATTE OR PKG_USER-SCAFACOS)
|
||||
enable_language(Fortran)
|
||||
endif()
|
||||
|
||||
if(PKG_MEAM OR PKG_USER-H5MD OR PKG_USER-QMMM OR PKG_USER-SCAFACOS)
|
||||
enable_language(C)
|
||||
endif()
|
||||
|
||||
# do MPI detection after language activation, if MPI for these language is required
|
||||
find_package(MPI QUIET)
|
||||
option(BUILD_MPI "Build MPI version" ${MPI_FOUND})
|
||||
if(BUILD_MPI)
|
||||
|
@ -183,25 +236,52 @@ endif()
|
|||
option(CMAKE_VERBOSE_MAKEFILE "Verbose makefile" OFF)
|
||||
|
||||
option(ENABLE_TESTING "Enable testing" OFF)
|
||||
if(ENABLE_TESTING)
|
||||
if(ENABLE_TESTING AND BUILD_EXE)
|
||||
enable_testing()
|
||||
endif(ENABLE_TESTING)
|
||||
option(LAMMPS_TESTING_SOURCE_DIR "Location of lammps-testing source directory" "")
|
||||
option(LAMMPS_TESTING_GIT_TAG "Git tag of lammps-testing" "master")
|
||||
mark_as_advanced(LAMMPS_TESTING_SOURCE_DIR LAMMPS_TESTING_GIT_TAG)
|
||||
|
||||
set(DEFAULT_PACKAGES ASPHERE BODY CLASS2 COLLOID COMPRESS DIPOLE GRANULAR
|
||||
KSPACE MANYBODY MC MEAM MISC MOLECULE PERI REAX REPLICA RIGID SHOCK SPIN SNAP
|
||||
SRD KIM PYTHON MSCG MPIIO VORONOI POEMS LATTE USER-ATC USER-AWPMD USER-BOCS
|
||||
USER-CGDNA USER-MESO USER-CGSDK USER-COLVARS USER-DIFFRACTION USER-DPD USER-DRUDE
|
||||
USER-EFF USER-FEP USER-H5MD USER-LB USER-MANIFOLD USER-MEAMC USER-MGPT USER-MISC
|
||||
USER-MOFFF USER-MOLFILE USER-NETCDF USER-PHONON USER-QTB USER-REAXC USER-SMD
|
||||
USER-SMTBQ USER-SPH USER-TALLY USER-UEF USER-VTK USER-QUIP USER-QMMM)
|
||||
set(ACCEL_PACKAGES USER-OMP KOKKOS OPT USER-INTEL GPU)
|
||||
set(OTHER_PACKAGES CORESHELL QEQ)
|
||||
foreach(PKG ${DEFAULT_PACKAGES})
|
||||
option(PKG_${PKG} "Build ${PKG} Package" OFF)
|
||||
endforeach()
|
||||
foreach(PKG ${ACCEL_PACKAGES} ${OTHER_PACKAGES})
|
||||
option(PKG_${PKG} "Build ${PKG} Package" OFF)
|
||||
if (CMAKE_VERSION VERSION_GREATER "3.10.3" AND NOT LAMMPS_TESTING_SOURCE_DIR)
|
||||
include(FetchContent)
|
||||
|
||||
FetchContent_Declare(lammps-testing
|
||||
GIT_REPOSITORY https://github.com/lammps/lammps-testing.git
|
||||
GIT_TAG ${LAMMPS_TESTING_GIT_TAG}
|
||||
)
|
||||
|
||||
FetchContent_GetProperties(lammps-testing)
|
||||
if(NOT lammps-testing_POPULATED)
|
||||
message(STATUS "Downloading tests...")
|
||||
FetchContent_Populate(lammps-testing)
|
||||
endif()
|
||||
|
||||
set(LAMMPS_TESTING_SOURCE_DIR ${lammps-testing_SOURCE_DIR})
|
||||
elseif(NOT LAMMPS_TESTING_SOURCE_DIR)
|
||||
message(WARNING "Full test-suite requires CMake >= 3.11 or copy of\n"
|
||||
"https://github.com/lammps/lammps-testing in LAMMPS_TESTING_SOURCE_DIR")
|
||||
endif()
|
||||
|
||||
if(EXISTS ${LAMMPS_TESTING_SOURCE_DIR})
|
||||
message(STATUS "Running test discovery...")
|
||||
|
||||
file(GLOB_RECURSE TEST_SCRIPTS ${LAMMPS_TESTING_SOURCE_DIR}/tests/core/*/in.*)
|
||||
foreach(script_path ${TEST_SCRIPTS})
|
||||
get_filename_component(TEST_NAME ${script_path} EXT)
|
||||
get_filename_component(SCRIPT_NAME ${script_path} NAME)
|
||||
get_filename_component(PARENT_DIR ${script_path} DIRECTORY)
|
||||
string(SUBSTRING ${TEST_NAME} 1 -1 TEST_NAME)
|
||||
string(REPLACE "-" "_" TEST_NAME ${TEST_NAME})
|
||||
string(REPLACE "+" "_" TEST_NAME ${TEST_NAME})
|
||||
set(TEST_NAME "test_core_${TEST_NAME}_serial")
|
||||
add_test(${TEST_NAME} ${CMAKE_BINARY_DIR}/${LAMMPS_BINARY} -in ${SCRIPT_NAME})
|
||||
set_tests_properties(${TEST_NAME} PROPERTIES WORKING_DIRECTORY ${PARENT_DIR})
|
||||
endforeach()
|
||||
list(LENGTH TEST_SCRIPTS NUM_TESTS)
|
||||
|
||||
message(STATUS "Found ${NUM_TESTS} tests.")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
macro(pkg_depends PKG1 PKG2)
|
||||
if(PKG_${PKG1} AND NOT (PKG_${PKG2} OR BUILD_${PKG2}))
|
||||
|
@ -215,17 +295,7 @@ pkg_depends(MPIIO MPI)
|
|||
pkg_depends(USER-ATC MANYBODY)
|
||||
pkg_depends(USER-LB MPI)
|
||||
pkg_depends(USER-PHONON KSPACE)
|
||||
|
||||
######################################################
|
||||
# packages with special compiler needs or external libs
|
||||
######################################################
|
||||
if(PKG_REAX OR PKG_MEAM OR PKG_USER-QUIP OR PKG_USER-QMMM OR PKG_LATTE)
|
||||
enable_language(Fortran)
|
||||
endif()
|
||||
|
||||
if(PKG_MEAM OR PKG_USER-H5MD OR PKG_USER-QMMM)
|
||||
enable_language(C)
|
||||
endif()
|
||||
pkg_depends(USER-SCAFACOS MPI)
|
||||
|
||||
find_package(OpenMP QUIET)
|
||||
option(BUILD_OMP "Build with OpenMP support" ${OpenMP_FOUND})
|
||||
|
@ -279,7 +349,7 @@ if(PKG_MSCG OR PKG_USER-ATC OR PKG_USER-AWPMD OR PKG_USER-QUIP OR PKG_LATTE)
|
|||
find_package(BLAS)
|
||||
if(NOT LAPACK_FOUND OR NOT BLAS_FOUND)
|
||||
enable_language(Fortran)
|
||||
file(GLOB LAPACK_SOURCES ${LAMMPS_LIB_SOURCE_DIR}/linalg/*.[fF])
|
||||
file(GLOB LAPACK_SOURCES ${LAMMPS_LIB_SOURCE_DIR}/linalg/[^.]*.[fF])
|
||||
add_library(linalg STATIC ${LAPACK_SOURCES})
|
||||
set(LAPACK_LIBRARIES linalg)
|
||||
else()
|
||||
|
@ -403,6 +473,57 @@ if(PKG_LATTE)
|
|||
list(APPEND LAMMPS_LINK_LIBS ${LATTE_LIBRARIES} ${LAPACK_LIBRARIES})
|
||||
endif()
|
||||
|
||||
if(PKG_USER-SCAFACOS)
|
||||
find_package(GSL REQUIRED)
|
||||
option(DOWNLOAD_SCAFACOS "Download ScaFaCoS (instead of using the system's one)" OFF)
|
||||
if(DOWNLOAD_SCAFACOS)
|
||||
include(ExternalProject)
|
||||
ExternalProject_Add(scafacos_build
|
||||
URL https://github.com/scafacos/scafacos/releases/download/v1.0.1/scafacos-1.0.1.tar.gz
|
||||
URL_MD5 bd46d74e3296bd8a444d731bb10c1738
|
||||
CONFIGURE_COMMAND <SOURCE_DIR>/configure --prefix=<INSTALL_DIR>
|
||||
--disable-doc
|
||||
--enable-fcs-solvers=fmm,p2nfft,direct,ewald,p3m
|
||||
--with-internal-fftw
|
||||
--with-internal-pfft
|
||||
--with-internal-pnfft
|
||||
$<$<BOOL:${BUILD_SHARED_LIBS}>:--with-pic>
|
||||
FC=${CMAKE_MPI_Fortran_COMPILER}
|
||||
CXX=${CMAKE_MPI_CXX_COMPILER}
|
||||
CC=${CMAKE_MPI_C_COMPILER}
|
||||
F77=
|
||||
)
|
||||
ExternalProject_get_property(scafacos_build INSTALL_DIR)
|
||||
set(SCAFACOS_BUILD_DIR ${INSTALL_DIR})
|
||||
set(SCAFACOS_INCLUDE_DIRS ${SCAFACOS_BUILD_DIR}/include)
|
||||
list(APPEND LAMMPS_DEPS scafacos_build)
|
||||
# list and order from pkg_config file of ScaFaCoS build
|
||||
list(APPEND LAMMPS_LINK_LIBS ${SCAFACOS_BUILD_DIR}/lib/libfcs.a)
|
||||
list(APPEND LAMMPS_LINK_LIBS ${SCAFACOS_BUILD_DIR}/lib/libfcs_direct.a)
|
||||
list(APPEND LAMMPS_LINK_LIBS ${SCAFACOS_BUILD_DIR}/lib/libfcs_ewald.a)
|
||||
list(APPEND LAMMPS_LINK_LIBS ${SCAFACOS_BUILD_DIR}/lib/libfcs_fmm.a)
|
||||
list(APPEND LAMMPS_LINK_LIBS ${SCAFACOS_BUILD_DIR}/lib/libfcs_p2nfft.a)
|
||||
list(APPEND LAMMPS_LINK_LIBS ${SCAFACOS_BUILD_DIR}/lib/libfcs_p3m.a)
|
||||
list(APPEND LAMMPS_LINK_LIBS ${GSL_LIBRARIES})
|
||||
list(APPEND LAMMPS_LINK_LIBS ${SCAFACOS_BUILD_DIR}/lib/libfcs_near.a)
|
||||
list(APPEND LAMMPS_LINK_LIBS ${SCAFACOS_BUILD_DIR}/lib/libfcs_gridsort.a)
|
||||
list(APPEND LAMMPS_LINK_LIBS ${SCAFACOS_BUILD_DIR}/lib/libfcs_resort.a)
|
||||
list(APPEND LAMMPS_LINK_LIBS ${SCAFACOS_BUILD_DIR}/lib/libfcs_redist.a)
|
||||
list(APPEND LAMMPS_LINK_LIBS ${SCAFACOS_BUILD_DIR}/lib/libfcs_common.a)
|
||||
list(APPEND LAMMPS_LINK_LIBS ${SCAFACOS_BUILD_DIR}/lib/libfcs_pnfft.a)
|
||||
list(APPEND LAMMPS_LINK_LIBS ${SCAFACOS_BUILD_DIR}/lib/libfcs_pfft.a)
|
||||
list(APPEND LAMMPS_LINK_LIBS ${SCAFACOS_BUILD_DIR}/lib/libfcs_fftw3_mpi.a)
|
||||
list(APPEND LAMMPS_LINK_LIBS ${SCAFACOS_BUILD_DIR}/lib/libfcs_fftw3.a)
|
||||
list(APPEND LAMMPS_LINK_LIBS ${MPI_Fortran_LIBRARIES})
|
||||
list(APPEND LAMMPS_LINK_LIBS ${MPI_C_LIBRARIES})
|
||||
else()
|
||||
FIND_PACKAGE(PkgConfig REQUIRED)
|
||||
PKG_CHECK_MODULES(SCAFACOS scafacos REQUIRED)
|
||||
list(APPEND LAMMPS_LINK_LIBS ${SCAFACOS_LDFLAGS})
|
||||
endif()
|
||||
include_directories(${SCAFACOS_INCLUDE_DIRS})
|
||||
endif()
|
||||
|
||||
if(PKG_USER-MOLFILE)
|
||||
add_library(molfile INTERFACE)
|
||||
target_include_directories(molfile INTERFACE ${LAMMPS_LIB_SOURCE_DIR}/molfile)
|
||||
|
@ -412,8 +533,8 @@ endif()
|
|||
|
||||
if(PKG_USER-NETCDF)
|
||||
find_package(NetCDF REQUIRED)
|
||||
include_directories(NETCDF_INCLUDE_DIR)
|
||||
list(APPEND LAMMPS_LINK_LIBS ${NETCDF_LIBRARY})
|
||||
include_directories(${NETCDF_INCLUDE_DIRS})
|
||||
list(APPEND LAMMPS_LINK_LIBS ${NETCDF_LIBRARIES})
|
||||
add_definitions(-DLMP_HAS_NETCDF -DNC_64BIT_DATA=0x0020)
|
||||
endif()
|
||||
|
||||
|
@ -430,8 +551,9 @@ if(PKG_USER-SMD)
|
|||
set(EIGEN3_INCLUDE_DIR ${SOURCE_DIR})
|
||||
list(APPEND LAMMPS_DEPS Eigen3_build)
|
||||
else()
|
||||
find_package(Eigen3)
|
||||
if(NOT Eigen3_FOUND)
|
||||
find_package(Eigen3 NO_MODULE)
|
||||
mark_as_advanced(Eigen3_DIR)
|
||||
if(NOT EIGEN3_FOUND)
|
||||
message(FATAL_ERROR "Eigen3 not found, help CMake to find it by setting EIGEN3_INCLUDE_DIR, or set DOWNLOAD_EIGEN3=ON to download it")
|
||||
endif()
|
||||
endif()
|
||||
|
@ -481,6 +603,40 @@ if(PKG_KIM)
|
|||
include_directories(${KIM_INCLUDE_DIRS})
|
||||
endif()
|
||||
|
||||
if(PKG_MESSAGE)
|
||||
option(MESSAGE_ZMQ "Use ZeroMQ in MESSAGE package" OFF)
|
||||
file(GLOB_RECURSE cslib_SOURCES ${LAMMPS_LIB_SOURCE_DIR}/message/cslib/[^.]*.F
|
||||
${LAMMPS_LIB_SOURCE_DIR}/message/cslib/[^.]*.c
|
||||
${LAMMPS_LIB_SOURCE_DIR}/message/cslib/[^.]*.cpp)
|
||||
|
||||
if(BUILD_SHARED_LIBS)
|
||||
add_library(cslib SHARED ${cslib_SOURCES})
|
||||
else()
|
||||
add_library(cslib STATIC ${cslib_SOURCES})
|
||||
endif()
|
||||
|
||||
if(BUILD_MPI)
|
||||
target_compile_definitions(cslib PRIVATE -DMPI_YES)
|
||||
set_target_properties(cslib PROPERTIES OUTPUT_NAME "csmpi")
|
||||
else()
|
||||
target_compile_definitions(cslib PRIVATE -DMPI_NO)
|
||||
set_target_properties(cslib PROPERTIES OUTPUT_NAME "csnompi")
|
||||
endif()
|
||||
|
||||
if(MESSAGE_ZMQ)
|
||||
target_compile_definitions(cslib PRIVATE -DZMQ_YES)
|
||||
find_package(ZMQ REQUIRED)
|
||||
target_include_directories(cslib PRIVATE ${ZMQ_INCLUDE_DIRS})
|
||||
target_link_libraries(cslib PUBLIC ${ZMQ_LIBRARIES})
|
||||
else()
|
||||
target_compile_definitions(cslib PRIVATE -DZMQ_NO)
|
||||
target_include_directories(cslib PRIVATE ${LAMMPS_LIB_SOURCE_DIR}/message/cslib/src/STUBS_ZMQ)
|
||||
endif()
|
||||
|
||||
list(APPEND LAMMPS_LINK_LIBS cslib)
|
||||
include_directories(${LAMMPS_LIB_SOURCE_DIR}/message/cslib/src)
|
||||
endif()
|
||||
|
||||
if(PKG_MSCG)
|
||||
find_package(GSL REQUIRED)
|
||||
option(DOWNLOAD_MSCG "Download latte (instead of using the system's one)" OFF)
|
||||
|
@ -567,8 +723,8 @@ RegisterStyles(${LAMMPS_SOURCE_DIR})
|
|||
foreach(PKG ${DEFAULT_PACKAGES})
|
||||
set(${PKG}_SOURCES_DIR ${LAMMPS_SOURCE_DIR}/${PKG})
|
||||
|
||||
file(GLOB ${PKG}_SOURCES ${${PKG}_SOURCES_DIR}/*.cpp)
|
||||
file(GLOB ${PKG}_HEADERS ${${PKG}_SOURCES_DIR}/*.h)
|
||||
file(GLOB ${PKG}_SOURCES ${${PKG}_SOURCES_DIR}/[^.]*.cpp)
|
||||
file(GLOB ${PKG}_HEADERS ${${PKG}_SOURCES_DIR}/[^.]*.h)
|
||||
|
||||
# check for package files in src directory due to old make system
|
||||
DetectBuildSystemConflict(${LAMMPS_SOURCE_DIR} ${${PKG}_SOURCES} ${${PKG}_HEADERS})
|
||||
|
@ -586,8 +742,8 @@ endforeach()
|
|||
foreach(PKG ${ACCEL_PACKAGES})
|
||||
set(${PKG}_SOURCES_DIR ${LAMMPS_SOURCE_DIR}/${PKG})
|
||||
|
||||
file(GLOB ${PKG}_SOURCES ${${PKG}_SOURCES_DIR}/*.cpp)
|
||||
file(GLOB ${PKG}_HEADERS ${${PKG}_SOURCES_DIR}/*.h)
|
||||
file(GLOB ${PKG}_SOURCES ${${PKG}_SOURCES_DIR}/[^.]*.cpp)
|
||||
file(GLOB ${PKG}_HEADERS ${${PKG}_SOURCES_DIR}/[^.]*.h)
|
||||
|
||||
# check for package files in src directory due to old make system
|
||||
DetectBuildSystemConflict(${LAMMPS_SOURCE_DIR} ${${PKG}_SOURCES} ${${PKG}_HEADERS})
|
||||
|
@ -601,8 +757,10 @@ foreach(SIMPLE_LIB REAX MEAM POEMS USER-ATC USER-AWPMD USER-COLVARS USER-H5MD
|
|||
if(PKG_${SIMPLE_LIB})
|
||||
string(REGEX REPLACE "^USER-" "" PKG_LIB "${SIMPLE_LIB}")
|
||||
string(TOLOWER "${PKG_LIB}" PKG_LIB)
|
||||
file(GLOB_RECURSE ${PKG_LIB}_SOURCES ${LAMMPS_LIB_SOURCE_DIR}/${PKG_LIB}/*.F
|
||||
${LAMMPS_LIB_SOURCE_DIR}/${PKG_LIB}/*.c ${LAMMPS_LIB_SOURCE_DIR}/${PKG_LIB}/*.cpp)
|
||||
file(GLOB_RECURSE ${PKG_LIB}_SOURCES
|
||||
${LAMMPS_LIB_SOURCE_DIR}/${PKG_LIB}/[^.]*.F
|
||||
${LAMMPS_LIB_SOURCE_DIR}/${PKG_LIB}/[^.]*.c
|
||||
${LAMMPS_LIB_SOURCE_DIR}/${PKG_LIB}/[^.]*.cpp)
|
||||
add_library(${PKG_LIB} STATIC ${${PKG_LIB}_SOURCES})
|
||||
list(APPEND LAMMPS_LINK_LIBS ${PKG_LIB})
|
||||
if(PKG_LIB STREQUAL awpmd)
|
||||
|
@ -677,13 +835,16 @@ if(PKG_USER-OMP)
|
|||
set(USER-OMP_SOURCES_DIR ${LAMMPS_SOURCE_DIR}/USER-OMP)
|
||||
set(USER-OMP_SOURCES ${USER-OMP_SOURCES_DIR}/thr_data.cpp
|
||||
${USER-OMP_SOURCES_DIR}/thr_omp.cpp
|
||||
${USER-OMP_SOURCES_DIR}/fix_omp.cpp
|
||||
${USER-OMP_SOURCES_DIR}/fix_nh_omp.cpp
|
||||
${USER-OMP_SOURCES_DIR}/fix_nh_sphere_omp.cpp)
|
||||
${USER-OMP_SOURCES_DIR}/fix_nh_sphere_omp.cpp
|
||||
${USER-OMP_SOURCES_DIR}/domain_omp.cpp)
|
||||
add_definitions(-DLMP_USER_OMP)
|
||||
set_property(GLOBAL PROPERTY "OMP_SOURCES" "${USER-OMP_SOURCES}")
|
||||
|
||||
# detects styles which have USER-OMP version
|
||||
RegisterStylesExt(${USER-OMP_SOURCES_DIR} omp OMP_SOURCES)
|
||||
|
||||
RegisterFixStyle("${USER-OMP_SOURCES_DIR}/fix_omp.h")
|
||||
|
||||
get_property(USER-OMP_SOURCES GLOBAL PROPERTY OMP_SOURCES)
|
||||
|
||||
|
@ -883,7 +1044,7 @@ if(PKG_GPU)
|
|||
set(GPU_PREC_SETTING "SINGLE_SINGLE")
|
||||
endif()
|
||||
|
||||
file(GLOB GPU_LIB_SOURCES ${LAMMPS_LIB_SOURCE_DIR}/gpu/*.cpp)
|
||||
file(GLOB GPU_LIB_SOURCES ${LAMMPS_LIB_SOURCE_DIR}/gpu/[^.]*.cpp)
|
||||
file(MAKE_DIRECTORY ${LAMMPS_LIB_BINARY_DIR}/gpu)
|
||||
|
||||
if(GPU_API STREQUAL "CUDA")
|
||||
|
@ -896,15 +1057,15 @@ if(PKG_GPU)
|
|||
|
||||
set(GPU_ARCH "sm_30" CACHE STRING "LAMMPS GPU CUDA SM architecture (e.g. sm_60)")
|
||||
|
||||
file(GLOB GPU_LIB_CU ${LAMMPS_LIB_SOURCE_DIR}/gpu/*.cu ${CMAKE_CURRENT_SOURCE_DIR}/gpu/*.cu)
|
||||
file(GLOB GPU_LIB_CU ${LAMMPS_LIB_SOURCE_DIR}/gpu/[^.]*.cu ${CMAKE_CURRENT_SOURCE_DIR}/gpu/[^.]*.cu)
|
||||
list(REMOVE_ITEM GPU_LIB_CU ${LAMMPS_LIB_SOURCE_DIR}/gpu/lal_pppm.cu)
|
||||
|
||||
cuda_include_directories(${LAMMPS_LIB_SOURCE_DIR}/gpu ${LAMMPS_LIB_BINARY_DIR}/gpu)
|
||||
|
||||
if(CUDPP_OPT)
|
||||
cuda_include_directories(${LAMMPS_LIB_SOURCE_DIR}/gpu/cudpp_mini)
|
||||
file(GLOB GPU_LIB_CUDPP_SOURCES ${LAMMPS_LIB_SOURCE_DIR}/gpu/cudpp_mini/*.cpp)
|
||||
file(GLOB GPU_LIB_CUDPP_CU ${LAMMPS_LIB_SOURCE_DIR}/gpu/cudpp_mini/*.cu)
|
||||
file(GLOB GPU_LIB_CUDPP_SOURCES ${LAMMPS_LIB_SOURCE_DIR}/gpu/cudpp_mini/[^.]*.cpp)
|
||||
file(GLOB GPU_LIB_CUDPP_CU ${LAMMPS_LIB_SOURCE_DIR}/gpu/cudpp_mini/[^.]*.cu)
|
||||
endif()
|
||||
|
||||
cuda_compile_cubin(GPU_GEN_OBJS ${GPU_LIB_CU} OPTIONS
|
||||
|
@ -953,7 +1114,7 @@ if(PKG_GPU)
|
|||
include(OpenCLUtils)
|
||||
set(OCL_COMMON_HEADERS ${LAMMPS_LIB_SOURCE_DIR}/gpu/lal_preprocessor.h ${LAMMPS_LIB_SOURCE_DIR}/gpu/lal_aux_fun1.h)
|
||||
|
||||
file(GLOB GPU_LIB_CU ${LAMMPS_LIB_SOURCE_DIR}/gpu/*.cu)
|
||||
file(GLOB GPU_LIB_CU ${LAMMPS_LIB_SOURCE_DIR}/gpu/[^.]*.cu)
|
||||
list(REMOVE_ITEM GPU_LIB_CU ${LAMMPS_LIB_SOURCE_DIR}/gpu/lal_gayberne.cu ${LAMMPS_LIB_SOURCE_DIR}/gpu/lal_gayberne_lj.cu)
|
||||
|
||||
foreach(GPU_KERNEL ${GPU_LIB_CU})
|
||||
|
@ -1011,7 +1172,9 @@ include_directories(${LAMMPS_STYLE_HEADERS_DIR})
|
|||
######################################
|
||||
set(temp "#ifndef LMP_INSTALLED_PKGS_H\n#define LMP_INSTALLED_PKGS_H\n")
|
||||
set(temp "${temp}const char * LAMMPS_NS::LAMMPS::installed_packages[] = {\n")
|
||||
foreach(PKG ${DEFAULT_PACKAGES} ${ACCEL_PACKAGES} ${OTHER_PACKAGES})
|
||||
set(temp_PKG_LIST ${DEFAULT_PACKAGES} ${ACCEL_PACKAGES} ${OTHER_PACKAGES})
|
||||
list(SORT temp_PKG_LIST)
|
||||
foreach(PKG ${temp_PKG_LIST})
|
||||
if(PKG_${PKG})
|
||||
set(temp "${temp} \"${PKG}\",\n")
|
||||
endif()
|
||||
|
@ -1036,14 +1199,14 @@ if(BUILD_LIB)
|
|||
if(LAMMPS_DEPS)
|
||||
add_dependencies(lammps ${LAMMPS_DEPS})
|
||||
endif()
|
||||
set_target_properties(lammps PROPERTIES OUTPUT_NAME lammps${LIB_SUFFIX})
|
||||
if(BUILD_SHARED_LIBS)
|
||||
set_target_properties(lammps PROPERTIES OUTPUT_NAME lammps${LAMMPS_LIB_SUFFIX})
|
||||
set_target_properties(lammps PROPERTIES SOVERSION ${SOVERSION})
|
||||
install(TARGETS lammps LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR})
|
||||
install(FILES ${LAMMPS_SOURCE_DIR}/library.h DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/lammps)
|
||||
configure_file(pkgconfig/liblammps.pc.in ${CMAKE_CURRENT_BINARY_DIR}/liblammps${LIB_SUFFIX}.pc @ONLY)
|
||||
install(FILES ${CMAKE_CURRENT_BINARY_DIR}/liblammps${LIB_SUFFIX}.pc DESTINATION ${CMAKE_INSTALL_LIBDIR}/pkgconfig)
|
||||
endif()
|
||||
configure_file(pkgconfig/liblammps.pc.in ${CMAKE_CURRENT_BINARY_DIR}/liblammps${LAMMPS_LIB_SUFFIX}.pc @ONLY)
|
||||
install(FILES ${CMAKE_CURRENT_BINARY_DIR}/liblammps${LAMMPS_LIB_SUFFIX}.pc DESTINATION ${CMAKE_INSTALL_LIBDIR}/pkgconfig)
|
||||
configure_file(FindLAMMPS.cmake.in ${CMAKE_CURRENT_BINARY_DIR}/FindLAMMPS${LAMMPS_LIB_SUFFIX}.cmake @ONLY)
|
||||
install(FILES ${CMAKE_CURRENT_BINARY_DIR}/FindLAMMPS${LAMMPS_LIB_SUFFIX}.cmake DESTINATION ${CMAKE_INSTALL_DATADIR}/cmake/Module)
|
||||
else()
|
||||
list(APPEND LMP_SOURCES ${LIB_SOURCES})
|
||||
endif()
|
||||
|
@ -1059,10 +1222,11 @@ if(BUILD_EXE)
|
|||
endif()
|
||||
endif()
|
||||
|
||||
set_target_properties(lmp PROPERTIES OUTPUT_NAME lmp${LAMMPS_MACHINE})
|
||||
set_target_properties(lmp PROPERTIES OUTPUT_NAME ${LAMMPS_BINARY})
|
||||
install(TARGETS lmp DESTINATION ${CMAKE_INSTALL_BINDIR})
|
||||
install(FILES ${LAMMPS_DOC_DIR}/lammps.1 DESTINATION ${CMAKE_INSTALL_MANDIR}/man1 RENAME ${LAMMPS_BINARY}.1)
|
||||
if(ENABLE_TESTING)
|
||||
add_test(ShowHelp lmp${LAMMPS_MACHINE} -help)
|
||||
add_test(ShowHelp ${LAMMPS_BINARY} -help)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
|
@ -1077,7 +1241,7 @@ if(BUILD_DOC)
|
|||
|
||||
set(VIRTUALENV ${PYTHON_EXECUTABLE} -m virtualenv)
|
||||
|
||||
file(GLOB DOC_SOURCES ${LAMMPS_DOC_DIR}/src/*.txt)
|
||||
file(GLOB DOC_SOURCES ${LAMMPS_DOC_DIR}/src/[^.]*.txt)
|
||||
file(GLOB PDF_EXTRA_SOURCES ${LAMMPS_DOC_DIR}/src/lammps_commands*.txt ${LAMMPS_DOC_DIR}/src/lammps_support.txt ${LAMMPS_DOC_DIR}/src/lammps_tutorials.txt)
|
||||
list(REMOVE_ITEM DOC_SOURCES ${PDF_EXTRA_SOURCES})
|
||||
|
||||
|
@ -1130,7 +1294,7 @@ endif()
|
|||
# Install potential files in data directory
|
||||
###############################################################################
|
||||
set(LAMMPS_POTENTIALS_DIR ${CMAKE_INSTALL_FULL_DATADIR}/lammps/potentials)
|
||||
install(DIRECTORY ${LAMMPS_SOURCE_DIR}/../potentials DESTINATION ${CMAKE_INSTALL_DATADIR}/lammps/potentials)
|
||||
install(DIRECTORY ${LAMMPS_SOURCE_DIR}/../potentials/ DESTINATION ${LAMMPS_POTENTIALS_DIR})
|
||||
|
||||
configure_file(etc/profile.d/lammps.sh.in ${CMAKE_BINARY_DIR}/etc/profile.d/lammps.sh @ONLY)
|
||||
configure_file(etc/profile.d/lammps.csh.in ${CMAKE_BINARY_DIR}/etc/profile.d/lammps.csh @ONLY)
|
||||
|
@ -1172,7 +1336,7 @@ endif()
|
|||
###############################################################################
|
||||
# Print package summary
|
||||
###############################################################################
|
||||
foreach(PKG ${DEFAULT_PACKAGES} ${ACCEL_PACKAGES})
|
||||
foreach(PKG ${DEFAULT_PACKAGES} ${ACCEL_PACKAGES} ${OTHER_PACKAGES})
|
||||
if(PKG_${PKG})
|
||||
message(STATUS "Building package: ${PKG}")
|
||||
endif()
|
||||
|
|
|
@ -0,0 +1,48 @@
|
|||
# - Find liblammps
|
||||
# Find the native liblammps headers and libraries.
|
||||
#
|
||||
# The following variables will set:
|
||||
# LAMMPS_INCLUDE_DIRS - where to find lammps/library.h, etc.
|
||||
# LAMMPS_LIBRARIES - List of libraries when using lammps.
|
||||
# LAMMPS_API_DEFINES - lammps library api defines
|
||||
# LAMMPS_VERSION - lammps library version
|
||||
# LAMMPS_FOUND - True if liblammps found.
|
||||
#
|
||||
# In addition a LAMMPS::LAMMPS imported target is getting created.
|
||||
#
|
||||
# LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator
|
||||
# http://lammps.sandia.gov, Sandia National Laboratories
|
||||
# Steve Plimpton, sjplimp@sandia.gov
|
||||
#
|
||||
# Copyright (2003) Sandia Corporation. Under the terms of Contract
|
||||
# DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains
|
||||
# certain rights in this software. This software is distributed under
|
||||
# the GNU General Public License.
|
||||
#
|
||||
# See the README file in the top-level LAMMPS directory.
|
||||
#
|
||||
|
||||
find_package(PkgConfig)
|
||||
|
||||
pkg_check_modules(PC_LAMMPS liblammps@LAMMPS_LIB_SUFFIX@)
|
||||
find_path(LAMMPS_INCLUDE_DIR lammps/library.h HINTS ${PC_LAMMPS_INCLUDE_DIRS} @CMAKE_INSTALL_FULL_INCLUDEDIR@)
|
||||
|
||||
set(LAMMPS_VERSION @LAMMPS_VERSION@)
|
||||
set(LAMMPS_API_DEFINES @LAMMPS_API_DEFINES@)
|
||||
|
||||
find_library(LAMMPS_LIBRARY NAMES lammps@LAMMPS_LIB_SUFFIX@ HINTS ${PC_LAMMPS_LIBRARY_DIRS} @CMAKE_INSTALL_FULL_LIBDIR@)
|
||||
|
||||
set(LAMMPS_INCLUDE_DIRS "${LAMMPS_INCLUDE_DIR}")
|
||||
set(LAMMPS_LIBRARIES "${LAMMPS_LIBRARY}")
|
||||
|
||||
include(FindPackageHandleStandardArgs)
|
||||
# handle the QUIETLY and REQUIRED arguments and set LAMMPS_FOUND to TRUE
|
||||
# if all listed variables are TRUE
|
||||
find_package_handle_standard_args(LAMMPS REQUIRED_VARS LAMMPS_LIBRARY LAMMPS_INCLUDE_DIR VERSION_VAR LAMMPS_VERSION)
|
||||
|
||||
mark_as_advanced(LAMMPS_INCLUDE_DIR LAMMPS_LIBRARY)
|
||||
|
||||
if(LAMMPS_FOUND AND NOT TARGET LAMMPS::LAMMPS)
|
||||
add_library(LAMMPS::LAMMPS UNKNOWN IMPORTED)
|
||||
set_target_properties(LAMMPS::LAMMPS PROPERTIES IMPORTED_LOCATION "${LAMMPS_LIBRARY}" INTERFACE_INCLUDE_DIRECTORIES "${LAMMPS_INCLUDE_DIR}" INTERFACE_COMPILE_DEFINITIONS "${LAMMPS_API_DEFINES}")
|
||||
endif()
|
|
@ -0,0 +1,8 @@
|
|||
find_path(ZMQ_INCLUDE_DIR zmq.h)
|
||||
find_library(ZMQ_LIBRARY NAMES zmq)
|
||||
|
||||
set(ZMQ_LIBRARIES ${ZMQ_LIBRARY})
|
||||
set(ZMQ_INCLUDE_DIRS ${ZMQ_INCLUDE_DIR})
|
||||
|
||||
include(FindPackageHandleStandardArgs)
|
||||
find_package_handle_standard_args(ZMQ DEFAULT_MSG ZMQ_LIBRARY ZMQ_INCLUDE_DIR)
|
|
@ -48,8 +48,13 @@ function(CreateStyleHeader path filename)
|
|||
set(temp "")
|
||||
if(ARGC GREATER 2)
|
||||
list(REMOVE_AT ARGV 0 1)
|
||||
set(header_list)
|
||||
foreach(FNAME ${ARGV})
|
||||
get_filename_component(FNAME ${FNAME} NAME)
|
||||
list(APPEND header_list ${FNAME})
|
||||
endforeach()
|
||||
list(SORT header_list)
|
||||
foreach(FNAME ${header_list})
|
||||
set(temp "${temp}#include \"${FNAME}\"\n")
|
||||
endforeach()
|
||||
endif()
|
||||
|
@ -80,19 +85,23 @@ function(RegisterNPairStyle path)
|
|||
AddStyleHeader(${path} NPAIR)
|
||||
endfunction(RegisterNPairStyle)
|
||||
|
||||
function(RegisterFixStyle path)
|
||||
AddStyleHeader(${path} FIX)
|
||||
endfunction(RegisterFixStyle)
|
||||
|
||||
function(RegisterStyles search_path)
|
||||
FindStyleHeaders(${search_path} ANGLE_CLASS angle_ ANGLE ) # angle ) # force
|
||||
FindStyleHeaders(${search_path} ATOM_CLASS atom_vec_ ATOM_VEC ) # atom ) # atom atom_vec_hybrid
|
||||
FindStyleHeaders(${search_path} BODY_CLASS body_ BODY ) # body ) # atom_vec_body
|
||||
FindStyleHeaders(${search_path} BOND_CLASS bond_ BOND ) # bond ) # force
|
||||
FindStyleHeaders(${search_path} COMMAND_CLASS "" COMMAND ) # command ) # input
|
||||
FindStyleHeaders(${search_path} COMMAND_CLASS "[^.]" COMMAND ) # command ) # input
|
||||
FindStyleHeaders(${search_path} COMPUTE_CLASS compute_ COMPUTE ) # compute ) # modify
|
||||
FindStyleHeaders(${search_path} DIHEDRAL_CLASS dihedral_ DIHEDRAL ) # dihedral ) # force
|
||||
FindStyleHeaders(${search_path} DUMP_CLASS dump_ DUMP ) # dump ) # output write_dump
|
||||
FindStyleHeaders(${search_path} FIX_CLASS fix_ FIX ) # fix ) # modify
|
||||
FindStyleHeaders(${search_path} IMPROPER_CLASS improper_ IMPROPER ) # improper ) # force
|
||||
FindStyleHeaders(${search_path} INTEGRATE_CLASS "" INTEGRATE ) # integrate ) # update
|
||||
FindStyleHeaders(${search_path} KSPACE_CLASS "" KSPACE ) # kspace ) # force
|
||||
FindStyleHeaders(${search_path} INTEGRATE_CLASS "[^.]" INTEGRATE ) # integrate ) # update
|
||||
FindStyleHeaders(${search_path} KSPACE_CLASS "[^.]" KSPACE ) # kspace ) # force
|
||||
FindStyleHeaders(${search_path} MINIMIZE_CLASS min_ MINIMIZE ) # minimize ) # update
|
||||
FindStyleHeaders(${search_path} NBIN_CLASS nbin_ NBIN ) # nbin ) # neighbor
|
||||
FindStyleHeaders(${search_path} NPAIR_CLASS npair_ NPAIR ) # npair ) # neighbor
|
||||
|
|
|
@ -4,15 +4,15 @@
|
|||
# after you added @CMAKE_INSTALL_FULL_LIBDIR@/pkg-config to PKG_CONFIG_PATH,
|
||||
# e.g. export PKG_CONFIG_PATH=@CMAKE_INSTALL_FULL_LIBDIR@/pkgconfig
|
||||
|
||||
prefix=@CMAKE_INSTALL_FULL_PREFIX@
|
||||
prefix=@CMAKE_INSTALL_PREFIX@
|
||||
libdir=@CMAKE_INSTALL_FULL_LIBDIR@
|
||||
includedir=@CMAKE_INSTALL_FULL_INCLUDEDIR@
|
||||
|
||||
Name: liblammps@LAMMPS_MACHINE@
|
||||
Description: Large-scale Atomic/Molecular Massively Parallel Simulator Library
|
||||
URL: http://lammps.sandia.gov
|
||||
Version:
|
||||
Version: @LAMMPS_VERSION@
|
||||
Requires:
|
||||
Libs: -L${libdir} -llammps@LIB_SUFFIX@@
|
||||
Libs: -L${libdir} -llammps@LAMMPS_LIB_SUFFIX@
|
||||
Libs.private: -lm
|
||||
Cflags: -I${includedir} @LAMMPS_API_DEFINES@
|
||||
|
|
|
@ -0,0 +1,45 @@
|
|||
.TH LAMMPS "2018-08-22"
|
||||
.SH NAME
|
||||
.B LAMMPS
|
||||
\- Molecular Dynamics Simulator.
|
||||
|
||||
.SH SYNOPSIS
|
||||
.B lmp
|
||||
-in in.file
|
||||
|
||||
or
|
||||
|
||||
mpirun \-np 2
|
||||
.B lmp
|
||||
-in in.file
|
||||
|
||||
.SH DESCRIPTION
|
||||
.B LAMMPS
|
||||
LAMMPS is a classical molecular dynamics code, and an acronym for Large-scale
|
||||
Atomic/Molecular Massively Parallel Simulator. LAMMPS has potentials for soft
|
||||
materials (biomolecules, polymers) and solid-state materials (metals,
|
||||
semiconductors) and coarse-grained or mesoscopic systems. It can be used to
|
||||
model atoms or, more generically, as a parallel particle simulator at the
|
||||
atomic, meso, or continuum scale.
|
||||
|
||||
See http://lammps.sandia.gov/ for documentation.
|
||||
|
||||
.SH OPTIONS
|
||||
See https://lammps.sandia.gov/doc/Run_options.html for details on
|
||||
command-line options.
|
||||
|
||||
.SH COPYRIGHT
|
||||
© 2003--2018 Sandia Corporation
|
||||
|
||||
This package is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This package is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
On Debian systems, the complete text of the GNU General
|
||||
Public License can be found in `/usr/share/common-licenses/GPL-2'.
|
|
@ -292,6 +292,10 @@ This will create a lammps/doc/html dir with the HTML doc pages so that
|
|||
you can browse them locally on your system. Type "make" from the
|
||||
lammps/doc dir to see other options.
|
||||
|
||||
NOTE: You can also download a tarball of the documention for the
|
||||
current LAMMPS version (HTML and PDF files), from the website
|
||||
"download page"_http://lammps.sandia.gov/download.html.
|
||||
|
||||
:line
|
||||
|
||||
Install LAMMPS after a build :h4,link(install)
|
||||
|
|
|
@ -31,6 +31,7 @@ This is the list of packages that may require additional steps.
|
|||
"KOKKOS"_#kokkos,
|
||||
"LATTE"_#latte,
|
||||
"MEAM"_#meam,
|
||||
"MESSAGE"_#message,
|
||||
"MSCG"_#mscg,
|
||||
"OPT"_#opt,
|
||||
"POEMS"_#poems,
|
||||
|
@ -47,6 +48,7 @@ This is the list of packages that may require additional steps.
|
|||
"USER-OMP"_#user-omp,
|
||||
"USER-QMMM"_#user-qmmm,
|
||||
"USER-QUIP"_#user-quip,
|
||||
"USER-SCAFACOS"_#user-scafacos,
|
||||
"USER-SMD"_#user-smd,
|
||||
"USER-VTK"_#user-vtk :tb(c=6,ea=c,a=l)
|
||||
|
||||
|
@ -361,6 +363,10 @@ make lib-meam args="-m mpi" # build with default Fortran compiler compatible
|
|||
make lib-meam args="-m serial" # build with compiler compatible with "make serial" (GNU Fortran)
|
||||
make lib-meam args="-m ifort" # build with Intel Fortran compiler using Makefile.ifort :pre
|
||||
|
||||
NOTE: You should test building the MEAM library with both the Intel
|
||||
and GNU compilers to see if a simulation runs faster with one versus
|
||||
the other on your system.
|
||||
|
||||
The build should produce two files: lib/meam/libmeam.a and
|
||||
lib/meam/Makefile.lammps. The latter is copied from an existing
|
||||
Makefile.lammps.* and has settings needed to link C++ (LAMMPS) with
|
||||
|
@ -373,6 +379,35 @@ file.
|
|||
|
||||
:line
|
||||
|
||||
MESSAGE package :h4,link(message)
|
||||
|
||||
This package can optionally include support for messaging via sockets,
|
||||
using the open-source "ZeroMQ library"_http://zeromq.org, which must
|
||||
be installed on your system.
|
||||
|
||||
[CMake build]:
|
||||
|
||||
-D MESSAGE_ZMQ=value # build with ZeroMQ support, value = no (default) or yes
|
||||
|
||||
[Traditional make]:
|
||||
|
||||
Before building LAMMPS, you must build the CSlib library in
|
||||
lib/message. You can build the CSlib library manually if you prefer;
|
||||
follow the instructions in lib/message/README. You can also do it in
|
||||
one step from the lammps/src dir, using a command like these, which
|
||||
simply invoke the lib/message/Install.py script with the specified args:
|
||||
|
||||
make lib-message # print help message
|
||||
make lib-message args="-m -z" # build with MPI and socket (ZMQ) support
|
||||
make lib-message args="-s" # build as serial lib with no ZMQ support
|
||||
|
||||
The build should produce two files: lib/message/cslib/src/libmessage.a
|
||||
and lib/message/Makefile.lammps. The latter is copied from an
|
||||
existing Makefile.lammps.* and has settings to link with the ZeroMQ
|
||||
library if requested in the build.
|
||||
|
||||
:line
|
||||
|
||||
MSCG package :h4,link(mscg)
|
||||
|
||||
To build with this package, you must download and build the MS-CG
|
||||
|
@ -894,6 +929,45 @@ successfully build on your system.
|
|||
|
||||
:line
|
||||
|
||||
USER-SCAFACOS package :h4,link(user-scafacos)
|
||||
|
||||
To build with this package, you must download and build the "ScaFaCoS
|
||||
Coulomb solver library"_scafacos_home
|
||||
|
||||
:link(scafacos_home,http://www.scafacos.de)
|
||||
|
||||
[CMake build]:
|
||||
|
||||
-D DOWNLOAD_SCAFACOS=value # download ScaFaCoS for build, value = no (default) or yes
|
||||
-D SCAFACOS_LIBRARY=path # ScaFaCos library file (only needed if at custom location)
|
||||
-D SCAFACOS_INCLUDE_DIR=path # ScaFaCoS include directory (only needed if at custom location) :pre
|
||||
|
||||
If DOWNLOAD_SCAFACOS is set, the ScaFaCoS library will be downloaded
|
||||
and built inside the CMake build directory. If the ScaFaCoS library
|
||||
is already on your system (in a location CMake cannot find it),
|
||||
SCAFACOS_LIBRARY is the filename (plus path) of the ScaFaCoS library
|
||||
file, not the directory the library file is in. SCAFACOS_INCLUDE_DIR
|
||||
is the directory the ScaFaCoS include file is in.
|
||||
|
||||
[Traditional make]:
|
||||
|
||||
You can download and build the ScaFaCoS library manually if you
|
||||
prefer; follow the instructions in lib/scafacos/README. You can also
|
||||
do it in one step from the lammps/src dir, using a command like these,
|
||||
which simply invoke the lib/scafacos/Install.py script with the
|
||||
specified args:
|
||||
|
||||
make lib-scafacos # print help message
|
||||
make lib-scafacos args="-b" # download and build in lib/scafacos/scafacos-<version>
|
||||
make lib-scafacos args="-p $HOME/scafacos # use existing ScaFaCoS installation in $HOME/scafacos
|
||||
|
||||
Note that 2 symbolic (soft) links, "includelink" and "liblink", are
|
||||
created in lib/scafacos to point to the ScaFaCoS src dir. When LAMMPS
|
||||
builds in src it will use these links. You should not need to edit
|
||||
the lib/scafacos/Makefile.lammps file.
|
||||
|
||||
:line
|
||||
|
||||
USER-SMD package :h4,link(user-smd)
|
||||
|
||||
To build with this package, you must download the Eigen3 library.
|
||||
|
|
|
@ -42,6 +42,7 @@ packages:
|
|||
"KOKKOS"_Build_extras.html#kokkos,
|
||||
"LATTE"_Build_extras.html#latte,
|
||||
"MEAM"_Build_extras.html#meam,
|
||||
"MESSAGE"_Build_extras.html#message,
|
||||
"MSCG"_Build_extras.html#mscg,
|
||||
"OPT"_Build_extras.html#opt,
|
||||
"POEMS"_Build_extras.html#poems,
|
||||
|
@ -58,6 +59,7 @@ packages:
|
|||
"USER-OMP"_Build_extras.html#user-omp,
|
||||
"USER-QMMM"_Build_extras.html#user-qmmm,
|
||||
"USER-QUIP"_Build_extras.html#user-quip,
|
||||
"USER-SCAFACOS"_Build_extras.html#user-scafacos,
|
||||
"USER-SMD"_Build_extras.html#user-smd,
|
||||
"USER-VTK"_Build_extras.html#user-vtk :tb(c=6,ea=c,a=l)
|
||||
|
||||
|
|
|
@ -71,6 +71,7 @@ An alphabetic list of all LAMMPS commands.
|
|||
"lattice"_lattice.html,
|
||||
"log"_log.html,
|
||||
"mass"_mass.html,
|
||||
"message"_message.html,
|
||||
"minimize"_minimize.html,
|
||||
"min_modify"_min_modify.html,
|
||||
"min_style"_min_style.html,
|
||||
|
@ -103,6 +104,7 @@ An alphabetic list of all LAMMPS commands.
|
|||
"restart"_restart.html,
|
||||
"run"_run.html,
|
||||
"run_style"_run_style.html,
|
||||
"server"_server.html,
|
||||
"set"_set.html,
|
||||
"shell"_shell.html,
|
||||
"special_bonds"_special_bonds.html,
|
||||
|
|
|
@ -35,6 +35,7 @@ KOKKOS, o = USER-OMP, t = OPT.
|
|||
"bond/local"_compute_bond_local.html,
|
||||
"centro/atom"_compute_centro_atom.html,
|
||||
"chunk/atom"_compute_chunk_atom.html,
|
||||
"chunk/spread/atom"_compute_chunk_spread_atom.html,
|
||||
"cluster/atom"_compute_cluster_atom.html,
|
||||
"cna/atom"_compute_cna_atom.html,
|
||||
"cnp/atom"_compute_cnp_atom.html,
|
||||
|
@ -95,8 +96,10 @@ KOKKOS, o = USER-OMP, t = OPT.
|
|||
"property/atom"_compute_property_atom.html,
|
||||
"property/chunk"_compute_property_chunk.html,
|
||||
"property/local"_compute_property_local.html,
|
||||
"ptm/atom"_compute_ptm_atom.html
|
||||
"rdf"_compute_rdf.html,
|
||||
"reduce"_compute_reduce.html,
|
||||
"reduce/chunk"_compute_reduce_chunk.html,
|
||||
"reduce/region"_compute_reduce.html,
|
||||
"rigid/local"_compute_rigid_local.html,
|
||||
"saed"_compute_saed.html,
|
||||
|
|
|
@ -33,4 +33,5 @@ OPT.
|
|||
"pppm/disp (i)"_kspace_style.html,
|
||||
"pppm/disp/tip4p"_kspace_style.html,
|
||||
"pppm/stagger"_kspace_style.html,
|
||||
"pppm/tip4p (o)"_kspace_style.html :tb(c=4,ea=c)
|
||||
"pppm/tip4p (o)"_kspace_style.html,
|
||||
"scafacos"_kspace_style.html :tb(c=4,ea=c)
|
||||
|
|
|
@ -33,6 +33,7 @@ OPT.
|
|||
"agni (o)"_pair_agni.html,
|
||||
"airebo (oi)"_pair_airebo.html,
|
||||
"airebo/morse (oi)"_pair_airebo.html,
|
||||
"atm"_pair_atm.html,
|
||||
"awpmd/cut"_pair_awpmd.html,
|
||||
"beck (go)"_pair_beck.html,
|
||||
"body/nparticle"_pair_body_nparticle.html,
|
||||
|
|
Binary file not shown.
After Width: | Height: | Size: 5.3 KiB |
|
@ -0,0 +1,9 @@
|
|||
\documentclass[12pt]{article}
|
||||
|
||||
\begin{document}
|
||||
|
||||
\begin{equation}
|
||||
E=\nu\frac{1+3\cos\gamma_1\cos\gamma_2\cos\gamma_3}{r_{12}^3r_{23}^3r_{31}^3}
|
||||
\end{equation}
|
||||
|
||||
\end{document}
|
Binary file not shown.
After Width: | Height: | Size: 6.7 KiB |
|
@ -0,0 +1,21 @@
|
|||
\documentclass[12pt,article]{article}
|
||||
|
||||
\usepackage{indentfirst}
|
||||
\usepackage{amsmath}
|
||||
|
||||
\newcommand{\set}[1]{\ensuremath{\mathbf{#1}}}
|
||||
\newcommand{\mean}[1]{\ensuremath{\overline{#1}}}
|
||||
\newcommand{\norm}[1]{\ensuremath{\left|\left|{#1}\right|\right|}}
|
||||
|
||||
\begin{document}
|
||||
|
||||
\begin{equation*}
|
||||
\text{RMSD}(\set{u}, \set{v}) = \min_{s, \set{Q}} \sqrt{\frac{1}{N} \sum\limits_{i=1}^{N}
|
||||
\norm{
|
||||
s[\vec{u_i} - \mean{\set{u}}]
|
||||
-
|
||||
\set{Q} \vec{v_i}
|
||||
}^2}
|
||||
\end{equation*}
|
||||
|
||||
\end{document}
|
|
@ -54,6 +54,7 @@ General howto :h3
|
|||
Howto_replica
|
||||
Howto_library
|
||||
Howto_couple
|
||||
Howto_client_server
|
||||
|
||||
END_RST -->
|
||||
|
||||
|
@ -64,7 +65,8 @@ END_RST -->
|
|||
"Run multiple simulations from one input script"_Howto_multiple.html
|
||||
"Multi-replica simulations"_Howto_replica.html
|
||||
"Library interface to LAMMPS"_Howto_library.html
|
||||
"Couple LAMMPS to other codes"_Howto_couple.html :all(b)
|
||||
"Couple LAMMPS to other codes"_Howto_couple.html
|
||||
"Using LAMMPS in client/server mode"_Howto_client_server.html :all(b)
|
||||
|
||||
<!-- END_HTML_ONLY -->
|
||||
|
||||
|
|
|
@ -22,7 +22,7 @@ commands, to calculate various properties of a system:
|
|||
"fix ave/chunk"_fix_ave_chunk.html
|
||||
any of the "compute */chunk"_compute.html commands :ul
|
||||
|
||||
Here, each of the 3 kinds of chunk-related commands is briefly
|
||||
Here, each of the 4 kinds of chunk-related commands is briefly
|
||||
overviewed. Then some examples are given of how to compute different
|
||||
properties with chunk commands.
|
||||
|
||||
|
@ -83,8 +83,9 @@ chunk.
|
|||
|
||||
Compute */chunk commands: :h4
|
||||
|
||||
Currently the following computes operate on chunks of atoms to produce
|
||||
per-chunk values.
|
||||
The following computes operate on chunks of atoms to produce per-chunk
|
||||
values. Any compute whose style name ends in "/chunk" is in this
|
||||
category:
|
||||
|
||||
"compute com/chunk"_compute_com_chunk.html
|
||||
"compute gyration/chunk"_compute_gyration_chunk.html
|
||||
|
@ -111,8 +112,8 @@ of a center of mass, which requires summing mass*position over the
|
|||
atoms and then dividing by summed mass.
|
||||
|
||||
All of these computes produce a global vector or global array as
|
||||
output, wih one or more values per chunk. They can be used
|
||||
in various ways:
|
||||
output, wih one or more values per chunk. The output can be used in
|
||||
various ways:
|
||||
|
||||
As input to the "fix ave/time"_fix_ave_time.html command, which can
|
||||
write the values to a file and optionally time average them. :ulb,l
|
||||
|
@ -122,9 +123,27 @@ histogram values across chunks. E.g. a histogram of cluster sizes or
|
|||
molecule diffusion rates. :l
|
||||
|
||||
As input to special functions of "equal-style
|
||||
variables"_variable.html, like sum() and max(). E.g. to find the
|
||||
largest cluster or fastest diffusing molecule. :l
|
||||
:ule
|
||||
variables"_variable.html, like sum() and max() and ave(). E.g. to
|
||||
find the largest cluster or fastest diffusing molecule or average
|
||||
radius-of-gyration of a set of molecules (chunks). :l,ule
|
||||
|
||||
Other chunk commands: :h4
|
||||
|
||||
"compute chunk/spread/atom"_compute_chunk_spread_atom.html
|
||||
"compute reduce/chunk"_compute_reduce_chunk.html :ul
|
||||
|
||||
The "compute chunk/spread/atom"_compute_chunk_spread_atom.html command
|
||||
spreads per-chunk values to each atom in the chunk, producing per-atom
|
||||
values as its output. This can be useful for outputting per-chunk
|
||||
values to a per-atom "dump file"_dump.html. Or for using an atom's
|
||||
associated chunk value in an "atom-style variable"_variable.html.
|
||||
|
||||
The "compute reduce/chunk"_compute_reduce_chunk.html command reduces a
|
||||
peratom value across the atoms in each chunk to produce a value per
|
||||
chunk. When used with the "compute
|
||||
chunk/spread/atom"_compute_chunk_spread_atom.html command it can
|
||||
create peratom values that induce a new set of chunks with a second
|
||||
"compute chunk/atom"_compute_chunk_atom.html command.
|
||||
|
||||
Example calculations with chunks :h4
|
||||
|
||||
|
@ -164,3 +183,13 @@ compute cluster all cluster/atom 1.0
|
|||
compute cc1 all chunk/atom c_cluster compress yes
|
||||
compute size all property/chunk cc1 count
|
||||
fix 1 all ave/histo 100 1 100 0 20 20 c_size mode vector ave running beyond ignore file tmp.histo :pre
|
||||
|
||||
(6) An example of using a per-chunk value to apply per-atom forces to
|
||||
compress individual polymer chains (molecules) in a mixture, is
|
||||
explained on the "compute
|
||||
chunk/spread/atom"_compute_chunk_spread_atom.html command doc page.
|
||||
|
||||
(7) An example of using one set of per-chunk values for molecule
|
||||
chunks, to create a 2nd set of micelle-scale chunks (clustered
|
||||
molecules, due to hydrophobicity), is explained on the "compute
|
||||
chunk/reduce"_compute_reduce_chunk.html command doc page.
|
||||
|
|
|
@ -0,0 +1,131 @@
|
|||
"Higher level section"_Howto.html - "LAMMPS WWW Site"_lws - "LAMMPS
|
||||
Documentation"_ld - "LAMMPS Commands"_lc :c
|
||||
|
||||
:link(lws,http://lammps.sandia.gov)
|
||||
:link(ld,Manual.html)
|
||||
:link(lc,Commands_all.html)
|
||||
|
||||
:line
|
||||
|
||||
Using LAMMPS in client/server mode :h3
|
||||
|
||||
Client/server coupling of two codes is where one code is the "client"
|
||||
and sends request messages to a "server" code. The server responds to
|
||||
each request with a reply message. This enables the two codes to work
|
||||
in tandem to perform a simulation. LAMMPS can act as either a client
|
||||
or server code.
|
||||
|
||||
Some advantages of client/server coupling are that the two codes run
|
||||
as stand-alone executables; they are not linked together. Thus
|
||||
neither code needs to have a library interface. This often makes it
|
||||
easier to run the two codes on different numbers of processors. If a
|
||||
message protocol (format and content) is defined for a particular kind
|
||||
of simulation, then in principle any code that implements the
|
||||
client-side protocol can be used in tandem with any code that
|
||||
implements the server-side protocol, without the two codes needing to
|
||||
know anything more specific about each other.
|
||||
|
||||
A simple example of client/server coupling is where LAMMPS is the
|
||||
client code performing MD timestepping. Each timestep it sends a
|
||||
message to a server quantum code containing current coords of all the
|
||||
atoms. The quantum code computes energy and forces based on the
|
||||
coords. It returns them as a message to LAMMPS, which completes the
|
||||
timestep.
|
||||
|
||||
Alternate methods for code coupling with LAMMPS are described on
|
||||
the "Howto couple"_Howto_couple.html doc page.
|
||||
|
||||
LAMMPS support for client/server coupling is in its "MESSAGE
|
||||
package"_Packages_details.html#PKG-MESSAGE which implements several
|
||||
commands that enable LAMMPS to act as a client or server, as discussed
|
||||
below. The MESSAGE package also wraps a client/server library called
|
||||
CSlib which enables two codes to exchange messages in different ways,
|
||||
either via files, sockets, or MPI. The CSlib is provided with LAMMPS
|
||||
in the lib/message dir. The CSlib has its own
|
||||
"website"_http://cslib.sandia.gov with documentation and test
|
||||
programs.
|
||||
|
||||
NOTE: For client/server coupling to work between LAMMPS and another
|
||||
code, the other code also has to use the CSlib. This can sometimes be
|
||||
done without any modifications to the other code by simply wrapping it
|
||||
with a Python script that exchanges CSlib messages with LAMMPS and
|
||||
prepares input for or processes output from the other code. The other
|
||||
code also has to implement a matching protocol for the format and
|
||||
content of messages that LAMMPS exchanges with it.
|
||||
|
||||
These are the commands currently in the MESSAGE package for two
|
||||
protocols, MD and MC (Monte Carlo). New protocols can easily be
|
||||
defined and added to this directory, where LAMMPS acts as either the
|
||||
client or server.
|
||||
|
||||
"message"_message.html
|
||||
"fix client md"_fix_client_md.html = LAMMPS is a client for running MD
|
||||
"server md"_server_md.html = LAMMPS is a server for computing MD forces
|
||||
"server mc"_server_mc.html = LAMMPS is a server for computing a Monte Carlo energy :ul
|
||||
|
||||
The server doc files give details of the message protocols
|
||||
for data that is exchanged bewteen the client and server.
|
||||
|
||||
These example directories illustrate how to use LAMMPS as either a
|
||||
client or server code:
|
||||
|
||||
examples/message
|
||||
examples/COUPLE/README
|
||||
examples/COUPLE/lammps_mc
|
||||
examples/COUPLE/lammps_vasp :ul
|
||||
|
||||
The examples/message dir couples a client instance of LAMMPS to a
|
||||
server instance of LAMMPS.
|
||||
|
||||
The lammps_mc dir shows how to couple LAMMPS as a server to a simple
|
||||
Monte Carlo client code as the driver.
|
||||
|
||||
The lammps_vasp dir shows how to couple LAMMPS as a client code
|
||||
running MD timestepping to VASP acting as a server providing quantum
|
||||
DFT forces, thru a Python wrapper script on VASP.
|
||||
|
||||
Here is how to launch a client and server code together for any of the
|
||||
4 modes of message exchange that the "message"_message.html command
|
||||
and the CSlib support. Here LAMMPS is used as both the client and
|
||||
server code. Another code could be subsitituted for either.
|
||||
|
||||
The examples below show launching both codes from the same window (or
|
||||
batch script), using the "&" character to launch the first code in the
|
||||
background. For all modes except {mpi/one}, you could also launch the
|
||||
codes in separate windows on your desktop machine. It does not
|
||||
matter whether you launch the client or server first.
|
||||
|
||||
In these examples either code can be run on one or more processors.
|
||||
If running in a non-MPI mode (file or zmq) you can launch a code on a
|
||||
single processor without using mpirun.
|
||||
|
||||
IMPORTANT: If you run in mpi/two mode, you must launch both codes via
|
||||
mpirun, even if one or both of them runs on a single processor. This
|
||||
is so that MPI can figure out how to connect both MPI processes
|
||||
together to exchange MPI messages between them.
|
||||
|
||||
For message exchange in {file}, {zmq}, or {mpi/two} modes:
|
||||
|
||||
% mpirun -np 1 lmp_mpi -log log.client < in.client &
|
||||
% mpirun -np 2 lmp_mpi -log log.server < in.server :pre
|
||||
|
||||
% mpirun -np 4 lmp_mpi -log log.client < in.client &
|
||||
% mpirun -np 1 lmp_mpi -log log.server < in.server :pre
|
||||
|
||||
% mpirun -np 2 lmp_mpi -log log.client < in.client &
|
||||
% mpirun -np 4 lmp_mpi -log log.server < in.server :pre
|
||||
|
||||
For message exchange in {mpi/one} mode:
|
||||
|
||||
Launch both codes in a single mpirun command:
|
||||
|
||||
mpirun -np 2 lmp_mpi -mpicolor 0 -in in.message.client -log log.client : -np 4 lmp_mpi -mpicolor 1 -in in.message.server -log log.server :pre
|
||||
|
||||
The two -np values determine how many procs the client and the server
|
||||
run on.
|
||||
|
||||
A LAMMPS executable run in this manner must use the -mpicolor color
|
||||
command-line option as their its option, where color is an integer
|
||||
label that will be used to distinguish one executable from another in
|
||||
the multiple executables that the mpirun command launches. In this
|
||||
example the client was colored with a 0, and the server with a 1.
|
|
@ -16,10 +16,12 @@ atoms and pass those forces to LAMMPS. Or a continuum finite element
|
|||
nodal points, compute a FE solution, and return interpolated forces on
|
||||
MD atoms.
|
||||
|
||||
LAMMPS can be coupled to other codes in at least 3 ways. Each has
|
||||
LAMMPS can be coupled to other codes in at least 4 ways. Each has
|
||||
advantages and disadvantages, which you'll have to think about in the
|
||||
context of your application.
|
||||
|
||||
:line
|
||||
|
||||
(1) Define a new "fix"_fix.html command that calls the other code. In
|
||||
this scenario, LAMMPS is the driver code. During its timestepping,
|
||||
the fix is invoked, and can make library calls to the other code,
|
||||
|
@ -32,6 +34,8 @@ LAMMPS.
|
|||
|
||||
:link(poems,http://www.rpi.edu/~anderk5/lab)
|
||||
|
||||
:line
|
||||
|
||||
(2) Define a new LAMMPS command that calls the other code. This is
|
||||
conceptually similar to method (1), but in this case LAMMPS and the
|
||||
other code are on a more equal footing. Note that now the other code
|
||||
|
@ -52,6 +56,8 @@ command writes and reads.
|
|||
See the "Modify command"_Modify_command.html doc page for info on how
|
||||
to add a new command to LAMMPS.
|
||||
|
||||
:line
|
||||
|
||||
(3) Use LAMMPS as a library called by another code. In this case the
|
||||
other code is the driver and calls LAMMPS as needed. Or a wrapper
|
||||
code could link and call both LAMMPS and another code as libraries.
|
||||
|
@ -102,3 +108,9 @@ on all the processors. Or it might allocate half the processors to
|
|||
LAMMPS and half to the other code and run both codes simultaneously
|
||||
before syncing them up periodically. Or it might instantiate multiple
|
||||
instances of LAMMPS to perform different calculations.
|
||||
|
||||
:line
|
||||
|
||||
(4) Couple LAMMPS with another code in a client/server mode. This is
|
||||
described on the "Howto client/server"_Howto_client_server.html doc
|
||||
page.
|
||||
|
|
|
@ -24,6 +24,11 @@ by subtracting out the streaming velocity of the shearing atoms. The
|
|||
velocity profile or other properties of the fluid can be monitored via
|
||||
the "fix ave/chunk"_fix_ave_chunk.html command.
|
||||
|
||||
NOTE: A recent (2017) book by "(Daivis and Todd)"_#Daivis-nemd
|
||||
discusses use of the SLLOD method and non-equilibrium MD (NEMD)
|
||||
thermostatting generally, for both simple and complex fluids,
|
||||
e.g. molecular systems. The latter can be tricky to do correctly.
|
||||
|
||||
As discussed in the previous section on non-orthogonal simulation
|
||||
boxes, the amount of tilt or skew that can be applied is limited by
|
||||
LAMMPS for computational efficiency to be 1/2 of the parallel box
|
||||
|
@ -46,3 +51,9 @@ An alternative method for calculating viscosities is provided via the
|
|||
NEMD simulations can also be used to measure transport properties of a fluid
|
||||
through a pore or channel. Simulations of steady-state flow can be performed
|
||||
using the "fix flow/gauss"_fix_flow_gauss.html command.
|
||||
|
||||
:line
|
||||
|
||||
:link(Daivis-nemd)
|
||||
[(Daivis and Todd)] Daivis and Todd, Nonequilibrium Molecular Dyanmics (book),
|
||||
Cambridge University Press, https://doi.org/10.1017/9781139017848, (2017).
|
||||
|
|
|
@ -43,6 +43,11 @@ nvt/asphere"_fix_nvt_asphere.html thermostat not only translation
|
|||
velocities but also rotational velocities for spherical and aspherical
|
||||
particles.
|
||||
|
||||
NOTE: A recent (2017) book by "(Daivis and Todd)"_#Daivis-thermostat
|
||||
discusses use of the SLLOD method and non-equilibrium MD (NEMD)
|
||||
thermostatting generally, for both simple and complex fluids,
|
||||
e.g. molecular systems. The latter can be tricky to do correctly.
|
||||
|
||||
DPD thermostatting alters pairwise interactions in a manner analogous
|
||||
to the per-particle thermostatting of "fix
|
||||
langevin"_fix_langevin.html.
|
||||
|
@ -87,3 +92,9 @@ specify them explicitly via the "thermo_style
|
|||
custom"_thermo_style.html command. Or you can use the
|
||||
"thermo_modify"_thermo_modify.html command to re-define what
|
||||
temperature compute is used for default thermodynamic output.
|
||||
|
||||
:line
|
||||
|
||||
:link(Daivis-thermostat)
|
||||
[(Daivis and Todd)] Daivis and Todd, Nonequilibrium Molecular Dyanmics (book),
|
||||
Cambridge University Press, https://doi.org/10.1017/9781139017848, (2017).
|
||||
|
|
|
@ -37,6 +37,11 @@ used to shear the fluid in between them, again with some kind of
|
|||
thermostat that modifies only the thermal (non-shearing) components of
|
||||
velocity to prevent the fluid from heating up.
|
||||
|
||||
NOTE: A recent (2017) book by "(Daivis and Todd)"_#Daivis-viscosity
|
||||
discusses use of the SLLOD method and non-equilibrium MD (NEMD)
|
||||
thermostatting generally, for both simple and complex fluids,
|
||||
e.g. molecular systems. The latter can be tricky to do correctly.
|
||||
|
||||
In both cases, the velocity profile setup in the fluid by this
|
||||
procedure can be monitored by the "fix ave/chunk"_fix_ave_chunk.html
|
||||
command, which determines grad(Vstream) in the equation above.
|
||||
|
@ -131,3 +136,9 @@ mean-square-displacement formulation for self-diffusivity. The
|
|||
time-integrated momentum fluxes play the role of Cartesian
|
||||
coordinates, whose mean-square displacement increases linearly
|
||||
with time at sufficiently long times.
|
||||
|
||||
:line
|
||||
|
||||
:link(Daivis-viscosity)
|
||||
[(Daivis and Todd)] Daivis and Todd, Nonequilibrium Molecular Dyanmics (book),
|
||||
Cambridge University Press, https://doi.org/10.1017/9781139017848, (2017).
|
||||
|
|
|
@ -7,7 +7,7 @@ Documentation"_ld - "LAMMPS Commands"_lc :c
|
|||
|
||||
:line
|
||||
|
||||
Download source as a tarball :h3
|
||||
Download source and documentation as a tarball :h3
|
||||
|
||||
You can download a current LAMMPS tarball from the "download page"_download
|
||||
of the "LAMMPS website"_lws.
|
||||
|
@ -22,6 +22,10 @@ few times per year, and undergo more testing before release. Patch
|
|||
releases occur a couple times per month. The new contents in all
|
||||
releases are listed on the "bug and feature page"_bug of the website.
|
||||
|
||||
Both tarballs include LAMMPS documentation (HTML and PDF files)
|
||||
corresponding to that version. The download page also has an option
|
||||
to download the current-version LAMMPS documentation by itself.
|
||||
|
||||
Older versions of LAMMPS can also be downloaded from "this
|
||||
page"_older.
|
||||
|
||||
|
|
Binary file not shown.
After Width: | Height: | Size: 31 KiB |
|
@ -1,7 +1,7 @@
|
|||
<!-- HTML_ONLY -->
|
||||
<HEAD>
|
||||
<TITLE>LAMMPS Users Manual</TITLE>
|
||||
<META NAME="docnumber" CONTENT="22 Aug 2018 version">
|
||||
<META NAME="docnumber" CONTENT="18 Sep 2018 version">
|
||||
<META NAME="author" CONTENT="http://lammps.sandia.gov - Sandia National Laboratories">
|
||||
<META NAME="copyright" CONTENT="Copyright (2003) Sandia Corporation. This software and manual is distributed under the GNU General Public License.">
|
||||
</HEAD>
|
||||
|
@ -21,7 +21,7 @@
|
|||
:line
|
||||
|
||||
LAMMPS Documentation :c,h1
|
||||
22 Aug 2018 version :c,h2
|
||||
18 Sep 2018 version :c,h2
|
||||
|
||||
"What is a LAMMPS version?"_Manual_version.html
|
||||
|
||||
|
|
|
@ -46,6 +46,7 @@ as contained in the file name.
|
|||
"MANYBODY"_#PKG-MANYBODY,
|
||||
"MC"_#PKG-MC,
|
||||
"MEAM"_#PKG-MEAM,
|
||||
"MESSAGE"_#PKG-MESSAGE,
|
||||
"MISC"_#PKG-MISC,
|
||||
"MOLECULE"_#PKG-MOLECULE,
|
||||
"MPIIO"_#PKG-MPIIO,
|
||||
|
@ -88,10 +89,12 @@ as contained in the file name.
|
|||
"USER-NETCDF"_#PKG-USER-NETCDF,
|
||||
"USER-OMP"_#PKG-USER-OMP,
|
||||
"USER-PHONON"_#PKG-USER-PHONON,
|
||||
"USER-PTM"_#PKG-USER-PTM,
|
||||
"USER-QMMM"_#PKG-USER-QMMM,
|
||||
"USER-QTB"_#PKG-USER-QTB,
|
||||
"USER-QUIP"_#PKG-USER-QUIP,
|
||||
"USER-REAXC"_#PKG-USER-REAXC,
|
||||
"USER-SCAFACOS"_#PKG-USER-SCAFACOS,
|
||||
"USER-SMD"_#PKG-USER-SMD,
|
||||
"USER-SMTBQ"_#PKG-USER-SMTBQ,
|
||||
"USER-SPH"_#PKG-USER-SPH,
|
||||
|
@ -549,10 +552,6 @@ This package has "specific installation
|
|||
instructions"_Build_extras.html#gpu on the "Build
|
||||
extras"_Build_extras.html doc page.
|
||||
|
||||
NOTE: You should test building the MEAM library with both the Intel
|
||||
and GNU compilers to see if a simulation runs faster with one versus
|
||||
the other on your system.
|
||||
|
||||
[Supporting info:]
|
||||
|
||||
src/MEAM: filenames -> commands
|
||||
|
@ -563,6 +562,31 @@ examples/meam :ul
|
|||
|
||||
:line
|
||||
|
||||
MESSAGE package :link(PKG-MESSAGE),h4
|
||||
|
||||
[Contents:]
|
||||
|
||||
Commands to use LAMMPS as either a client or server and couple it to
|
||||
another application.
|
||||
|
||||
[Install:]
|
||||
|
||||
This package has "specific installation
|
||||
instructions"_Build_extras.html#message on the "Build
|
||||
extras"_Build_extras.html doc page.
|
||||
|
||||
[Supporting info:]
|
||||
|
||||
src/MESSAGE: filenames -> commands
|
||||
lib/message/README
|
||||
"message"_message.html
|
||||
"fix client/md"_fix_client_md.html
|
||||
"server md"_server_md.html
|
||||
"server mc"_server_mc.html
|
||||
examples/message :ul
|
||||
|
||||
:line
|
||||
|
||||
MISC package :link(PKG-MISC),h4
|
||||
|
||||
[Contents:]
|
||||
|
@ -1721,6 +1745,25 @@ examples/USER/phonon :ul
|
|||
|
||||
:line
|
||||
|
||||
USER-PTM package :link(PKG-USER-PTM),h4
|
||||
|
||||
[Contents:]
|
||||
|
||||
A "compute ptm/atom"_compute_ptm.html command that calculates
|
||||
local structure characterization using the Polyhedral Template
|
||||
Matching methodology.
|
||||
|
||||
[Author:] Peter Mahler Larsen (MIT).
|
||||
|
||||
[Supporting info:]
|
||||
|
||||
src/USER-PHONON: filenames -> commands
|
||||
src/USER-PHONON/README
|
||||
"fix phonon"_fix_phonon.html
|
||||
examples/USER/phonon :ul
|
||||
|
||||
:line
|
||||
|
||||
USER-QMMM package :link(PKG-USER-QMMM),h4
|
||||
|
||||
[Contents:]
|
||||
|
@ -1838,6 +1881,41 @@ examples/reax :ul
|
|||
|
||||
:line
|
||||
|
||||
USER-SCAFACOS package :link(PKG-USER-SCAFACOS),h4
|
||||
|
||||
[Contents:]
|
||||
|
||||
A KSpace style which wraps the "ScaFaCoS Coulomb solver
|
||||
library"_http://www.scafacos.de to compute long-range Coulombic
|
||||
interactions.
|
||||
|
||||
To use this package you must have the ScaFaCoS library available on
|
||||
your system.
|
||||
|
||||
[Author:] Rene Halver (JSC) wrote the scafacos LAMMPS command.
|
||||
|
||||
ScaFaCoS itself was developed by a consortium of German research
|
||||
facilities with a BMBF (German Ministry of Science and Education)
|
||||
funded project in 2009-2012. Participants of the consortium were the
|
||||
Universities of Bonn, Chemnitz, Stuttgart, and Wuppertal as well as
|
||||
the Forschungszentrum Juelich.
|
||||
|
||||
[Install:]
|
||||
|
||||
This package has "specific installation
|
||||
instructions"_Build_extras.html#user-scafacos on the "Build
|
||||
extras"_Build_extras.html doc page.
|
||||
|
||||
[Supporting info:]
|
||||
|
||||
src/USER-SCAFACOS: filenames -> commands
|
||||
src/USER-SCAFACOS/README
|
||||
"kspace_style scafacos"_kspace_style.html
|
||||
"kspace_modify"_kspace_modify.html
|
||||
examples/USER/scafacos :ul
|
||||
|
||||
:line
|
||||
|
||||
USER-SMD package :link(PKG-USER-SMD),h4
|
||||
|
||||
[Contents:]
|
||||
|
|
|
@ -47,6 +47,7 @@ Package, Description, Doc page, Example, Library
|
|||
"MANYBODY"_Packages_details.html#PKG-MANYBODY, many-body potentials, "pair_style tersoff"_pair_tersoff.html, shear, no
|
||||
"MC"_Packages_details.html#PKG-MC, Monte Carlo options, "fix gcmc"_fix_gcmc.html, n/a, no
|
||||
"MEAM"_Packages_details.html#PKG-MEAM, modified EAM potential, "pair_style meam"_pair_meam.html, meam, int
|
||||
"MESSAGE"_Packages_details.html#PKG-MESSAGE, client/server messaging, "message"_message.html, message, int
|
||||
"MISC"_Packages_details.html#PKG-MISC, miscellaneous single-file commands, n/a, no, no
|
||||
"MOLECULE"_Packages_details.html#PKG-MOLECULE, molecular system force fields, "Howto bioFF"_Howto_bioFF.html, peptide, no
|
||||
"MPIIO"_Packages_details.html#PKG-MPIIO, MPI parallel I/O dump and restart, "dump"_dump.html, n/a, no
|
||||
|
|
|
@ -62,10 +62,12 @@ Package, Description, Doc page, Example, Library
|
|||
"USER-NETCDF"_Packages_details.html#PKG-USER-NETCDF, dump output via NetCDF,"dump netcdf"_dump_netcdf.html, n/a, ext
|
||||
"USER-OMP"_Packages_details.html#PKG-USER-OMP, OpenMP-enabled styles,"Speed omp"_Speed_omp.html, "Benchmarks"_http://lammps.sandia.gov/bench.html, no
|
||||
"USER-PHONON"_Packages_details.html#PKG-USER-PHONON, phonon dynamical matrix,"fix phonon"_fix_phonon.html, USER/phonon, no
|
||||
"USER-PTM"_Packages_details.html#PKG-USER-PTM, Polyhedral Template Matching,"compute ptm/atom"_compute_ptm.html, n/a, no
|
||||
"USER-QMMM"_Packages_details.html#PKG-USER-QMMM, QM/MM coupling,"fix qmmm"_fix_qmmm.html, USER/qmmm, ext
|
||||
"USER-QTB"_Packages_details.html#PKG-USER-QTB, quantum nuclear effects,"fix qtb"_fix_qtb.html "fix qbmsst"_fix_qbmsst.html, qtb, no
|
||||
"USER-QUIP"_Packages_details.html#PKG-USER-QUIP, QUIP/libatoms interface,"pair_style quip"_pair_quip.html, USER/quip, ext
|
||||
"USER-REAXC"_Packages_details.html#PKG-USER-REAXC, ReaxFF potential (C/C++) ,"pair_style reaxc"_pair_reaxc.html, reax, no
|
||||
"USER-SCAFACOS"_Packages_details.html#PKG-USER-SCAFACOS, wrapper on ScaFaCoS solver,"kspace_style scafacos"_kspace_style.html, USER/scafacos, ext
|
||||
"USER-SMD"_Packages_details.html#PKG-USER-SMD, smoothed Mach dynamics,"SMD User Guide"_PDF/SMD_LAMMPS_userguide.pdf, USER/smd, ext
|
||||
"USER-SMTBQ"_Packages_details.html#PKG-USER-SMTBQ, second moment tight binding QEq potential,"pair_style smtbq"_pair_smtbq.html, USER/smtbq, no
|
||||
"USER-SPH"_Packages_details.html#PKG-USER-SPH, smoothed particle hydrodynamics,"SPH User Guide"_PDF/SPH_LAMMPS_userguide.pdf, USER/sph, no
|
||||
|
|
|
@ -18,6 +18,7 @@ letter abbreviation can be used:
|
|||
"-i or -in"_#file
|
||||
"-k or -kokkos"_#run-kokkos
|
||||
"-l or -log"_#log
|
||||
"-m or -mpicolor"_#mpicolor
|
||||
"-nc or -nocite"_#nocite
|
||||
"-pk or -package"_#package
|
||||
"-p or -partition"_#partition
|
||||
|
@ -175,6 +176,30 @@ Option -plog will override the name of the partition log files file.N.
|
|||
|
||||
:line
|
||||
|
||||
[-mpicolor] color :link(mpicolor)
|
||||
|
||||
If used, this must be the first command-line argument after the LAMMPS
|
||||
executable name. It is only used when LAMMPS is launched by an mpirun
|
||||
command which also launches another executable(s) at the same time.
|
||||
(The other executable could be LAMMPS as well.) The color is an
|
||||
integer value which should be different for each executable (another
|
||||
application may set this value in a different way). LAMMPS and the
|
||||
other executable(s) perform an MPI_Comm_split() with their own colors
|
||||
to shrink the MPI_COMM_WORLD communication to be the subset of
|
||||
processors they are actually running on.
|
||||
|
||||
Currently, this is only used in LAMMPS to perform client/server
|
||||
messaging with another application. LAMMPS can act as either a client
|
||||
or server (or both). More details are given on the "Howto
|
||||
client/server"_Howto_client_server.html doc page.
|
||||
|
||||
Specifically, this refers to the "mpi/one" mode of messaging provided
|
||||
by the "message"_message.html command and the CSlib library LAMMPS
|
||||
links with from the lib/message directory. See the
|
||||
"message"_message.html command for more details.
|
||||
|
||||
:line
|
||||
|
||||
[-nocite] :link(nocite)
|
||||
|
||||
Disable writing the log.cite file which is normally written to list
|
||||
|
|
|
@ -106,6 +106,11 @@ modification to the input script is needed. Alternatively, one can run
|
|||
with the KOKKOS package by editing the input script as described
|
||||
below.
|
||||
|
||||
NOTE: When using a single OpenMP thread, the Kokkos Serial backend (i.e.
|
||||
Makefile.kokkos_mpi_only) will give better performance than the OpenMP
|
||||
backend (i.e. Makefile.kokkos_omp) because some of the overhead to make
|
||||
the code thread-safe is removed.
|
||||
|
||||
NOTE: The default for the "package kokkos"_package.html command is to
|
||||
use "full" neighbor lists and set the Newton flag to "off" for both
|
||||
pairwise and bonded interactions. However, when running on CPUs, it
|
||||
|
@ -122,6 +127,22 @@ mpirun -np 16 lmp_kokkos_mpi_only -k on -sf kk -pk kokkos newton on neigh half c
|
|||
If the "newton"_newton.html command is used in the input
|
||||
script, it can also override the Newton flag defaults.
|
||||
|
||||
For half neighbor lists and OpenMP, the KOKKOS package uses data
|
||||
duplication (i.e. thread-private arrays) by default to avoid
|
||||
thread-level write conflicts in the force arrays (and other data
|
||||
structures as necessary). Data duplication is typically fastest for
|
||||
small numbers of threads (i.e. 8 or less) but does increase memory
|
||||
footprint and is not scalable to large numbers of threads. An
|
||||
alternative to data duplication is to use thread-level atomics, which
|
||||
don't require duplication. The use of atomics can be forced by compiling
|
||||
with the "-DLMP_KOKKOS_USE_ATOMICS" compile switch. Most but not all
|
||||
Kokkos-enabled pair_styles support data duplication. Alternatively, full
|
||||
neighbor lists avoid the need for duplication or atomics but require
|
||||
more compute operations per atom. When using the Kokkos Serial backend
|
||||
or the OpenMP backend with a single thread, no duplication or atomics are
|
||||
used. For CUDA and half neighbor lists, the KOKKOS package always uses
|
||||
atomics.
|
||||
|
||||
[Core and Thread Affinity:]
|
||||
|
||||
When using multi-threading, it is important for performance to bind
|
||||
|
|
|
@ -56,6 +56,7 @@ Commands :h1
|
|||
lattice
|
||||
log
|
||||
mass
|
||||
message
|
||||
min_modify
|
||||
min_style
|
||||
minimize
|
||||
|
@ -87,6 +88,9 @@ Commands :h1
|
|||
restart
|
||||
run
|
||||
run_style
|
||||
server
|
||||
server_mc
|
||||
server_md
|
||||
set
|
||||
shell
|
||||
special_bonds
|
||||
|
|
|
@ -183,6 +183,7 @@ compute"_Commands_compute.html doc page are followed by one or more of
|
|||
"bond/local"_compute_bond_local.html - distance and energy of each bond
|
||||
"centro/atom"_compute_centro_atom.html - centro-symmetry parameter for each atom
|
||||
"chunk/atom"_compute_chunk_atom.html - assign chunk IDs to each atom
|
||||
"chunk/spread/atom"_compute_chunk_spread_atom.html - spreads chunk values to each atom in chunk
|
||||
"cluster/atom"_compute_cluster_atom.html - cluster ID for each atom
|
||||
"cna/atom"_compute_cna_atom.html - common neighbor analysis (CNA) for each atom
|
||||
"com"_compute_com.html - center-of-mass of group of atoms
|
||||
|
@ -225,6 +226,7 @@ compute"_Commands_compute.html doc page are followed by one or more of
|
|||
"property/chunk"_compute_property_chunk.html - extract various per-chunk attributes
|
||||
"rdf"_compute_rdf.html - radial distribution function g(r) histogram of group of atoms
|
||||
"reduce"_compute_reduce.html - combine per-atom quantities into a single global value
|
||||
"reduce/chunk"_compute_reduce_chunk.html - reduce per-atom quantities within each chunk
|
||||
"reduce/region"_compute_reduce.html - same as compute reduce, within a region
|
||||
"rigid/local"_compute_rigid_local.html - extract rigid body attributes
|
||||
"slice"_compute_slice.html - extract values from global vector or array
|
||||
|
|
|
@ -10,20 +10,27 @@ compute angle/local command :h3
|
|||
|
||||
[Syntax:]
|
||||
|
||||
compute ID group-ID angle/local value1 value2 ... :pre
|
||||
compute ID group-ID angle/local value1 value2 ... keyword args ... :pre
|
||||
|
||||
ID, group-ID are documented in "compute"_compute.html command :ulb,l
|
||||
angle/local = style name of this compute command :l
|
||||
one or more values may be appended :l
|
||||
value = {theta} or {eng} :l
|
||||
value = {theta} or {eng} or {v_name} :l
|
||||
{theta} = tabulate angles
|
||||
{eng} = tabulate angle energies :pre
|
||||
{eng} = tabulate angle energies
|
||||
{v_name} = equal-style variable with name (see below) :pre
|
||||
zero or more keyword/args pairs may be appended :l
|
||||
keyword = {set} :l
|
||||
{set} args = theta name
|
||||
theta = only currently allowed arg
|
||||
name = name of variable to set with theta :pre
|
||||
:ule
|
||||
|
||||
[Examples:]
|
||||
|
||||
compute 1 all angle/local theta
|
||||
compute 1 all angle/local eng theta :pre
|
||||
compute 1 all angle/local eng theta
|
||||
compute 1 all angle/local theta v_cos set theta t :pre
|
||||
|
||||
[Description:]
|
||||
|
||||
|
@ -36,6 +43,47 @@ The value {theta} is the angle for the 3 atoms in the interaction.
|
|||
|
||||
The value {eng} is the interaction energy for the angle.
|
||||
|
||||
The value {v_name} can be used together with the {set} keyword to
|
||||
compute a user-specified function of the angle theta. The {name}
|
||||
specified for the {v_name} value is the name of an "equal-style
|
||||
variable"_variable.html which should evaluate a formula based on a
|
||||
variable which will store the angle theta. This other variable must
|
||||
be an "internal-style variable"_variable.html defined in the input
|
||||
script; its initial numeric value can be anything. It must be an
|
||||
internal-style variable, because this command resets its value
|
||||
directly. The {set} keyword is used to identify the name of this
|
||||
other variable associated with theta.
|
||||
|
||||
Note that the value of theta for each angle which stored in the
|
||||
internal variable is in radians, not degrees.
|
||||
|
||||
As an example, these commands can be added to the bench/in.rhodo
|
||||
script to compute the cosine and cosine^2 of every angle in the system
|
||||
and output the statistics in various ways:
|
||||
|
||||
variable t internal 0.0
|
||||
variable cos equal cos(v_t)
|
||||
variable cossq equal cos(v_t)*cos(v_t) :pre
|
||||
|
||||
compute 1 all property/local aatom1 aatom2 aatom3 atype
|
||||
compute 2 all angle/local eng theta v_cos v_cossq set theta t
|
||||
dump 1 all local 100 tmp.dump c_1[*] c_2[*] :pre
|
||||
|
||||
compute 3 all reduce ave c_2[*]
|
||||
thermo_style custom step temp press c_3[*] :pre
|
||||
|
||||
fix 10 all ave/histo 10 10 100 -1 1 20 c_2[3] mode vector file tmp.histo :pre
|
||||
|
||||
The "dump local"_dump.html command will output the energy, angle,
|
||||
cosine(angle), cosine^2(angle) for every angle in the system. The
|
||||
"thermo_style"_thermo_style.html command will print the average of
|
||||
those quantities via the "compute reduce"_compute_reduce.html command
|
||||
with thermo output. And the "fix ave/histo"_fix_ave_histo.html
|
||||
command will histogram the cosine(angle) values and write them to a
|
||||
file.
|
||||
|
||||
:line
|
||||
|
||||
The local data stored by this command is generated by looping over all
|
||||
the atoms owned on a processor and their angles. An angle will only
|
||||
be included if all 3 atoms in the angle are in the specified compute
|
||||
|
@ -65,12 +113,12 @@ dump 1 all local 1000 tmp.dump index c_1\[1\] c_1\[2\] c_1\[3\] c_1\[4\] c_2\[1\
|
|||
[Output info:]
|
||||
|
||||
This compute calculates a local vector or local array depending on the
|
||||
number of keywords. The length of the vector or number of rows in the
|
||||
array is the number of angles. If a single keyword is specified, a
|
||||
local vector is produced. If two or more keywords are specified, a
|
||||
number of values. The length of the vector or number of rows in the
|
||||
array is the number of angles. If a single value is specified, a
|
||||
local vector is produced. If two or more values are specified, a
|
||||
local array is produced where the number of columns = the number of
|
||||
keywords. The vector or array can be accessed by any command that
|
||||
uses local values from a compute as input. See the "Howto
|
||||
values. The vector or array can be accessed by any command that uses
|
||||
local values from a compute as input. See the "Howto
|
||||
output"_Howto_output.html doc page for an overview of LAMMPS output
|
||||
options.
|
||||
|
||||
|
|
|
@ -10,12 +10,12 @@ compute bond/local command :h3
|
|||
|
||||
[Syntax:]
|
||||
|
||||
compute ID group-ID bond/local value1 value2 ... :pre
|
||||
compute ID group-ID bond/local value1 value2 ... keyword args ... :pre
|
||||
|
||||
ID, group-ID are documented in "compute"_compute.html command :ulb,l
|
||||
bond/local = style name of this compute command :l
|
||||
one or more values may be appended :l
|
||||
value = {dist} or {engpot} or {force} or {engvib} or {engrot} or {engtrans} or {omega} or {velvib} :l
|
||||
value = {dist} or {engpot} or {force} or {engvib} or {engrot} or {engtrans} or {omega} or {velvib} or {v_name} :l
|
||||
{dist} = bond distance
|
||||
{engpot} = bond potential energy
|
||||
{force} = bond force :pre
|
||||
|
@ -23,13 +23,22 @@ value = {dist} or {engpot} or {force} or {engvib} or {engrot} or {engtrans} or {
|
|||
{engrot} = bond kinetic energy of rotation
|
||||
{engtrans} = bond kinetic energy of translation
|
||||
{omega} = magnitude of bond angular velocity
|
||||
{velvib} = vibrational velocity along the bond length :pre
|
||||
{velvib} = vibrational velocity along the bond length
|
||||
{v_name} = equal-style variable with name (see below) :pre
|
||||
zero or more keyword/args pairs may be appended :l
|
||||
keyword = {set} :l
|
||||
{set} args = dist name
|
||||
dist = only currently allowed arg
|
||||
name = name of variable to set with distance (dist) :pre
|
||||
:ule
|
||||
|
||||
:ule
|
||||
|
||||
[Examples:]
|
||||
|
||||
compute 1 all bond/local engpot
|
||||
compute 1 all bond/local dist engpot force :pre
|
||||
compute 1 all angle/local dist v_distsq set dist d :pre
|
||||
|
||||
[Description:]
|
||||
|
||||
|
@ -38,6 +47,10 @@ interactions. The number of datums generated, aggregated across all
|
|||
processors, equals the number of bonds in the system, modified by the
|
||||
group parameter as explained below.
|
||||
|
||||
All these properties are computed for the pair of atoms in a bond,
|
||||
whether the 2 atoms represent a simple diatomic molecule, or are part
|
||||
of some larger molecule.
|
||||
|
||||
The value {dist} is the current length of the bond.
|
||||
|
||||
The value {engpot} is the potential energy for the bond,
|
||||
|
@ -79,9 +92,41 @@ two atoms in the bond towards each other. A negative value means the
|
|||
2 atoms are moving toward each other; a positive value means they are
|
||||
moving apart.
|
||||
|
||||
Note that all these properties are computed for the pair of atoms in a
|
||||
bond, whether the 2 atoms represent a simple diatomic molecule, or are
|
||||
part of some larger molecule.
|
||||
The value {v_name} can be used together with the {set} keyword to
|
||||
compute a user-specified function of the bond distance. The {name}
|
||||
specified for the {v_name} value is the name of an "equal-style
|
||||
variable"_variable.html which should evaluate a formula based on a
|
||||
variable which will store the bond distance. This other variable must
|
||||
be an "internal-style variable"_variable.html defined in the input
|
||||
script; its initial numeric value can be anything. It must be an
|
||||
internal-style variable, because this command resets its value
|
||||
directly. The {set} keyword is used to identify the name of this
|
||||
other variable associated with theta.
|
||||
|
||||
As an example, these commands can be added to the bench/in.rhodo
|
||||
script to compute the distance^2 of every bond in the system and
|
||||
output the statistics in various ways:
|
||||
|
||||
variable d internal 0.0
|
||||
variable dsq equal v_d*v_d :pre
|
||||
|
||||
compute 1 all property/local batom1 batom2 btype
|
||||
compute 2 all bond/local engpot dist v_dsq set dist d
|
||||
dump 1 all local 100 tmp.dump c_1[*] c_2[*] :pre
|
||||
|
||||
compute 3 all reduce ave c_2[*]
|
||||
thermo_style custom step temp press c_3[*] :pre
|
||||
|
||||
fix 10 all ave/histo 10 10 100 0 6 20 c_2[3] mode vector file tmp.histo :pre
|
||||
|
||||
The "dump local"_dump.html command will output the energy, distance,
|
||||
distance^2 for every bond in the system. The
|
||||
"thermo_style"_thermo_style.html command will print the average of
|
||||
those quantities via the "compute reduce"_compute_reduce.html command
|
||||
with thermo output. And the "fix ave/histo"_fix_ave_histo.html
|
||||
command will histogram the distance^2 values and write them to a file.
|
||||
|
||||
:line
|
||||
|
||||
The local data stored by this command is generated by looping over all
|
||||
the atoms owned on a processor and their bonds. A bond will only be
|
||||
|
@ -111,12 +156,12 @@ dump 1 all local 1000 tmp.dump index c_1\[*\] c_2\[*\] :pre
|
|||
[Output info:]
|
||||
|
||||
This compute calculates a local vector or local array depending on the
|
||||
number of keywords. The length of the vector or number of rows in the
|
||||
array is the number of bonds. If a single keyword is specified, a
|
||||
local vector is produced. If two or more keywords are specified, a
|
||||
local array is produced where the number of columns = the number of
|
||||
keywords. The vector or array can be accessed by any command that
|
||||
uses local values from a compute as input. See the "Howto
|
||||
number of values. The length of the vector or number of rows in the
|
||||
array is the number of bonds. If a single value is specified, a local
|
||||
vector is produced. If two or more values are specified, a local
|
||||
array is produced where the number of columns = the number of values.
|
||||
The vector or array can be accessed by any command that uses local
|
||||
values from a compute as input. See the "Howto
|
||||
output"_Howto_output.html doc page for an overview of LAMMPS output
|
||||
options.
|
||||
|
||||
|
|
|
@ -14,7 +14,7 @@ compute ID group-ID chunk/atom style args keyword values ... :pre
|
|||
|
||||
ID, group-ID are documented in "compute"_compute.html command :ulb,l
|
||||
chunk/atom = style name of this compute command :l
|
||||
style = {bin/1d} or {bin/2d} or {bin/3d} or {bin/sphere} or {type} or {molecule} or {compute/fix/variable}
|
||||
style = {bin/1d} or {bin/2d} or {bin/3d} or {bin/sphere} or {type} or {molecule} or c_ID, c_ID\[I\], f_ID, f_ID\[I\], v_name
|
||||
{bin/1d} args = dim origin delta
|
||||
dim = {x} or {y} or {z}
|
||||
origin = {lower} or {center} or {upper} or coordinate value (distance units)
|
||||
|
@ -40,7 +40,7 @@ style = {bin/1d} or {bin/2d} or {bin/3d} or {bin/sphere} or {type} or {molecule}
|
|||
ncbin = # of concentric circle bins between rmin and rmax
|
||||
{type} args = none
|
||||
{molecule} args = none
|
||||
{compute/fix/variable} = c_ID, c_ID\[I\], f_ID, f_ID\[I\], v_name with no args
|
||||
c_ID, c_ID\[I\], f_ID, f_ID\[I\], v_name args = none
|
||||
c_ID = per-atom vector calculated by a compute with ID
|
||||
c_ID\[I\] = Ith column of per-atom array calculated by a compute with ID
|
||||
f_ID = per-atom vector calculated by a fix with ID
|
||||
|
@ -85,7 +85,8 @@ compute 1 all chunk/atom bin/1d z lower 0.02 units reduced
|
|||
compute 1 all chunk/atom bin/2d z lower 1.0 y 0.0 2.5
|
||||
compute 1 all chunk/atom molecule region sphere nchunk once ids once compress yes
|
||||
compute 1 all chunk/atom bin/sphere 5 5 5 2.0 5.0 5 discard yes
|
||||
compute 1 all chunk/atom bin/cylinder z lower 2 10 10 2.0 5.0 3 discard yes :pre
|
||||
compute 1 all chunk/atom bin/cylinder z lower 2 10 10 2.0 5.0 3 discard yes
|
||||
compute 1 all chunk/atom c_cluster :pre
|
||||
|
||||
[Description:]
|
||||
|
||||
|
@ -386,8 +387,8 @@ described below, which resets {Nchunk}. The {limit} keyword is then
|
|||
applied to the new {Nchunk} value, exactly as described in the
|
||||
preceding paragraph. Note that in this case, all atoms will end up
|
||||
with chunk IDs <= {Nc}, but their original values (e.g. molecule ID or
|
||||
compute/fix/variable value) may have been > {Nc}, because of the
|
||||
compression operation.
|
||||
compute/fix/variable) may have been > {Nc}, because of the compression
|
||||
operation.
|
||||
|
||||
If {compress yes} is set, and the {compress} keyword comes after the
|
||||
{limit} keyword, then the {limit} value of {Nc} is applied first to
|
||||
|
|
|
@ -0,0 +1,174 @@
|
|||
"LAMMPS WWW Site"_lws - "LAMMPS Documentation"_ld - "LAMMPS Commands"_lc :c
|
||||
|
||||
:link(lws,http://lammps.sandia.gov)
|
||||
:link(ld,Manual.html)
|
||||
:link(lc,Commands_all.html)
|
||||
|
||||
:line
|
||||
|
||||
compute chunk/spread/atom command :h3
|
||||
|
||||
[Syntax:]
|
||||
|
||||
compute ID group-ID chunk/spread/atom chunkID input1 input2 ... :pre
|
||||
|
||||
ID, group-ID are documented in "compute"_compute.html command :ulb,l
|
||||
chunk/spread/atom = style name of this compute command :l
|
||||
chunkID = ID of "compute chunk/atom"_compute_chunk_atom.html command :l
|
||||
one or more inputs can be listed :l
|
||||
input = c_ID, c_ID\[N\], f_ID, f_ID\[N\] :l
|
||||
c_ID = global vector calculated by a compute with ID
|
||||
c_ID\[I\] = Ith column of global array calculated by a compute with ID, I can include wildcard (see below)
|
||||
f_ID = global vector calculated by a fix with ID
|
||||
f_ID\[I\] = Ith column of global array calculated by a fix with ID, I can include wildcard (see below) :pre
|
||||
:ule
|
||||
|
||||
[Examples:]
|
||||
|
||||
compute 1 all chunk/spread/atom mychunk c_com[*] c_gyration :pre
|
||||
|
||||
[Description:]
|
||||
|
||||
Define a calculation that "spreads" one or more per-chunk values to
|
||||
each atom in the chunk. This can be useful for creating a "dump
|
||||
file"_dump.html where each atom lists info about the chunk it is in,
|
||||
e.g. for post-processing purposes. It can also be used in "atom-style
|
||||
variables"_variable.html that need info about the chunk each atom is
|
||||
in. Examples are given below.
|
||||
|
||||
In LAMMPS, chunks are collections of atoms defined by a "compute
|
||||
chunk/atom"_compute_chunk_atom.html command, which assigns each atom
|
||||
to a single chunk (or no chunk). The ID for this command is specified
|
||||
as chunkID. For example, a single chunk could be the atoms in a
|
||||
molecule or atoms in a spatial bin. See the "compute
|
||||
chunk/atom"_compute_chunk_atom.html and "Howto chunk"_Howto_chunk.html
|
||||
doc pages for details of how chunks can be defined and examples of how
|
||||
they can be used to measure properties of a system.
|
||||
|
||||
For inputs that are computes, they must be a compute that calculates
|
||||
per-chunk values. These are computes whose style names end in
|
||||
"/chunk".
|
||||
|
||||
For inputs that are fixes, they should be a a fix that calculates
|
||||
per-chunk values. For example, "fix ave/chunk"_fix_ave_chunk.html or
|
||||
"fix ave/time"_fix_ave_time.html (assuming it is time-averaging
|
||||
per-chunk data).
|
||||
|
||||
For each atom, this compute accesses its chunk ID from the specified
|
||||
{chunkID} compute, then accesses the per-chunk value in each input.
|
||||
Those values are copied to this compute to become the output for that
|
||||
atom.
|
||||
|
||||
The values generated by this compute will be 0.0 for atoms not in the
|
||||
specified compute group {group-ID}. They will also be 0.0 if the atom
|
||||
is not in a chunk, as assigned by the {chunkID} compute. They will
|
||||
also be 0.0 if the current chunk ID for the atom is out-of-bounds with
|
||||
respect to the number of chunks stored by a particular input compute
|
||||
or fix.
|
||||
|
||||
NOTE: LAMMPS does not check that a compute or fix which calculates
|
||||
per-chunk values uses the same definition of chunks as this compute.
|
||||
It's up to you to be consistent. Likewise, for a fix input, LAMMPS
|
||||
does not check that it is per-chunk data. It only checks that the fix
|
||||
produces a global vector or array.
|
||||
|
||||
:line
|
||||
|
||||
Each listed input is operated on independently.
|
||||
|
||||
If a bracketed index I is used, it can be specified using a wildcard
|
||||
asterisk with the index to effectively specify multiple values. This
|
||||
takes the form "*" or "*n" or "n*" or "m*n". If N = the number of
|
||||
columns in the array, then an asterisk with no numeric values means
|
||||
all indices from 1 to N. A leading asterisk means all indices from 1
|
||||
to n (inclusive). A trailing asterisk means all indices from n to N
|
||||
(inclusive). A middle asterisk means all indices from m to n
|
||||
(inclusive).
|
||||
|
||||
Using a wildcard is the same as if the individual columns of the array
|
||||
had been listed one by one. E.g. these 2 compute chunk/spread/atom
|
||||
commands are equivalent, since the "compute
|
||||
com/chunk"_compute_com_chunk.html command creates a per-atom array
|
||||
with 3 columns:
|
||||
|
||||
compute com all com/chunk mychunk
|
||||
compute 10 all chunk/spread/atom mychunk c_com\[*\]
|
||||
compute 10 all chunk/spread/atom mychunk c_com\[1\] c_com\[2\] c_com\[3\] :pre
|
||||
|
||||
:line
|
||||
|
||||
Here is an example of writing a dump file the with the center-of-mass
|
||||
(COM) for the chunk each atom is in. The commands below can be added
|
||||
to the bench/in.chain script.
|
||||
|
||||
compute cmol all chunk/atom molecule
|
||||
compute com all com/chunk cmol
|
||||
compute comchunk all chunk/spread/atom cmol c_com[*]
|
||||
dump 1 all custom 50 tmp.dump id mol type x y z c_comchunk[*]
|
||||
dump_modify 1 sort id :pre
|
||||
|
||||
The same per-chunk data for each atom could be used to define per-atom
|
||||
forces for the "fix addforce"_fix_addforce.html command. In this
|
||||
example the forces act to pull atoms of an extended polymer chain
|
||||
towards its COM in an attractive manner.
|
||||
|
||||
compute prop all property/atom xu yu zu
|
||||
variable k equal 0.1
|
||||
variable fx atom v_k*(c_comchunk\[1\]-c_prop\[1\])
|
||||
variable fy atom v_k*(c_comchunk\[2\]-c_prop\[2\])
|
||||
variable fz atom v_k*(c_comchunk\[3\]-c_prop\[3\])
|
||||
fix 3 all addforce v_fx v_fy v_fz :pre
|
||||
|
||||
Note that "compute property/atom"_compute_property_atom.html is used
|
||||
to generate unwrapped coordinates for use in the per-atom force
|
||||
calculation, so that the effect of periodic boundaries is accounted
|
||||
for properly.
|
||||
|
||||
Over time this applied force could shrink each polymer chain's radius
|
||||
of gyration in a polymer mixture simulation. Here is output from the
|
||||
bench/in.chain script. Thermo output is shown for 1000 steps, where
|
||||
the last column is the average radius of gyration over all 320 chains
|
||||
in the 32000 atom system:
|
||||
|
||||
compute gyr all gyration/chunk cmol
|
||||
variable ave equal ave(c_gyr)
|
||||
thermo_style custom step etotal press v_ave :pre
|
||||
|
||||
0 22.394765 4.6721833 5.128278
|
||||
100 22.445002 4.8166709 5.0348372
|
||||
200 22.500128 4.8790392 4.9364875
|
||||
300 22.534686 4.9183766 4.8590693
|
||||
400 22.557196 4.9492211 4.7937849
|
||||
500 22.571017 4.9161853 4.7412008
|
||||
600 22.573944 5.0229708 4.6931243
|
||||
700 22.581804 5.0541301 4.6440647
|
||||
800 22.584683 4.9691734 4.6000016
|
||||
900 22.59128 5.0247538 4.5611513
|
||||
1000 22.586832 4.94697 4.5238362 :pre
|
||||
|
||||
:line
|
||||
|
||||
[Output info:]
|
||||
|
||||
This compute calculates a per-atom vector or array, which can be
|
||||
accessed by any command that uses per-atom values from a compute as
|
||||
input. See the "Howto output"_Howto_output.html doc page for an
|
||||
overview of LAMMPS output options.
|
||||
|
||||
The output is a per-atom vector if a single input value is specified,
|
||||
otherwise a per-atom array is output. The number of columns in the
|
||||
array is the number of inputs provided. The per-atom values for the
|
||||
vector or each column of the array will be in whatever
|
||||
"units"_units.html the corresponding input value is in.
|
||||
|
||||
The vector or array values are "intensive".
|
||||
|
||||
[Restrictions:] none
|
||||
|
||||
[Related commands:]
|
||||
|
||||
"compute chunk/atom"_compute_chunk_atom.html, "fix
|
||||
ave/chunk"_fix_ave_chunk.html, "compute
|
||||
reduce/chunk"_compute_reduce_chunk.html
|
||||
|
||||
[Default:] none
|
|
@ -10,18 +10,25 @@ compute dihedral/local command :h3
|
|||
|
||||
[Syntax:]
|
||||
|
||||
compute ID group-ID dihedral/local value1 value2 ... :pre
|
||||
compute ID group-ID dihedral/local value1 value2 ... keyword args ... :pre
|
||||
|
||||
ID, group-ID are documented in "compute"_compute.html command :ulb,l
|
||||
dihedral/local = style name of this compute command :l
|
||||
one or more values may be appended :l
|
||||
value = {phi} :l
|
||||
{phi} = tabulate dihedral angles :pre
|
||||
value = {phi} or {v_name} :l
|
||||
{phi} = tabulate dihedral angles
|
||||
{v_name} = equal-style variable with name (see below) :pre
|
||||
zero or more keyword/args pairs may be appended :l
|
||||
keyword = {set} :l
|
||||
{set} args = phi name
|
||||
phi = only currently allowed arg
|
||||
name = name of variable to set with phi :pre
|
||||
:ule
|
||||
|
||||
[Examples:]
|
||||
|
||||
compute 1 all dihedral/local phi :pre
|
||||
compute 1 all dihedral/local phi v_cos set phi p :pre
|
||||
|
||||
[Description:]
|
||||
|
||||
|
@ -33,6 +40,47 @@ by the group parameter as explained below.
|
|||
The value {phi} is the dihedral angle, as defined in the diagram on
|
||||
the "dihedral_style"_dihedral_style.html doc page.
|
||||
|
||||
The value {v_name} can be used together with the {set} keyword to
|
||||
compute a user-specified function of the dihedral angle phi. The
|
||||
{name} specified for the {v_name} value is the name of an "equal-style
|
||||
variable"_variable.html which should evaluate a formula based on a
|
||||
variable which will store the angle phi. This other variable must
|
||||
be an "internal-style variable"_variable.html defined in the input
|
||||
script; its initial numeric value can be anything. It must be an
|
||||
internal-style variable, because this command resets its value
|
||||
directly. The {set} keyword is used to identify the name of this
|
||||
other variable associated with phi.
|
||||
|
||||
Note that the value of phi for each angle which stored in the internal
|
||||
variable is in radians, not degrees.
|
||||
|
||||
As an example, these commands can be added to the bench/in.rhodo
|
||||
script to compute the cosine and cosine^2 of every dihedral angle in
|
||||
the system and output the statistics in various ways:
|
||||
|
||||
variable p internal 0.0
|
||||
variable cos equal cos(v_p)
|
||||
variable cossq equal cos(v_p)*cos(v_p) :pre
|
||||
|
||||
compute 1 all property/local datom1 datom2 datom3 datom4 dtype
|
||||
compute 2 all dihedral/local phi v_cos v_cossq set phi p
|
||||
dump 1 all local 100 tmp.dump c_1[*] c_2[*] :pre
|
||||
|
||||
compute 3 all reduce ave c_2[*]
|
||||
thermo_style custom step temp press c_3[*] :pre
|
||||
|
||||
fix 10 all ave/histo 10 10 100 -1 1 20 c_2[2] mode vector file tmp.histo :pre
|
||||
|
||||
The "dump local"_dump.html command will output the angle,
|
||||
cosine(angle), cosine^2(angle) for every dihedral in the system. The
|
||||
"thermo_style"_thermo_style.html command will print the average of
|
||||
those quantities via the "compute reduce"_compute_reduce.html command
|
||||
with thermo output. And the "fix ave/histo"_fix_ave_histo.html
|
||||
command will histogram the cosine(angle) values and write them to a
|
||||
file.
|
||||
|
||||
:line
|
||||
|
||||
The local data stored by this command is generated by looping over all
|
||||
the atoms owned on a processor and their dihedrals. A dihedral will
|
||||
only be included if all 4 atoms in the dihedral are in the specified
|
||||
|
@ -57,12 +105,12 @@ dump 1 all local 1000 tmp.dump index c_1\[1\] c_1\[2\] c_1\[3\] c_1\[4\] c_1\[5\
|
|||
[Output info:]
|
||||
|
||||
This compute calculates a local vector or local array depending on the
|
||||
number of keywords. The length of the vector or number of rows in the
|
||||
array is the number of dihedrals. If a single keyword is specified, a
|
||||
local vector is produced. If two or more keywords are specified, a
|
||||
number of values. The length of the vector or number of rows in the
|
||||
array is the number of dihedrals. If a single value is specified, a
|
||||
local vector is produced. If two or more values are specified, a
|
||||
local array is produced where the number of columns = the number of
|
||||
keywords. The vector or array can be accessed by any command that
|
||||
uses local values from a compute as input. See the "Howto
|
||||
values. The vector or array can be accessed by any command that uses
|
||||
local values from a compute as input. See the "Howto
|
||||
output"_Howto_output.html doc page for an overview of LAMMPS output
|
||||
options.
|
||||
|
||||
|
|
|
@ -0,0 +1,121 @@
|
|||
"LAMMPS WWW Site"_lws - "LAMMPS Documentation"_ld - "LAMMPS Commands"_lc :c
|
||||
|
||||
:link(lws,http://lammps.sandia.gov)
|
||||
:link(ld,Manual.html)
|
||||
:link(lc,Section_commands.html#comm)
|
||||
|
||||
:line
|
||||
|
||||
compute ptm/atom command :h3
|
||||
|
||||
[Syntax:]
|
||||
|
||||
compute ID group-ID ptm/atom structures threshold :pre
|
||||
|
||||
ID, group-ID are documented in "compute"_compute.html command
|
||||
ptm/atom = style name of this compute command
|
||||
structures = structure types to search for
|
||||
threshold = lattice distortion threshold (RMSD) :ul
|
||||
|
||||
[Examples:]
|
||||
|
||||
compute 1 all ptm/atom default 0.1
|
||||
compute 1 all ptm/atom fcc-hcp-dcub-dhex 0.15
|
||||
compute 1 all ptm/atom all 0 :pre
|
||||
|
||||
[Description:]
|
||||
|
||||
Define a computation that determines the local lattice structure
|
||||
around an atom using the PTM (Polyhedral Template Matching) method.
|
||||
The PTM method is described in "(Larsen)"_#Larsen.
|
||||
|
||||
Currently, there are seven lattice structures PTM recognizes:
|
||||
|
||||
fcc = 1
|
||||
hcp = 2
|
||||
bcc = 3
|
||||
ico (icosahedral) = 4
|
||||
sc (simple cubic) = 5
|
||||
dcub (diamond cubic) = 6
|
||||
dhex (diamond hexagonal) = 7
|
||||
other = 8 :ul
|
||||
|
||||
The value of the PTM structure will be 0 for atoms not in the specified
|
||||
compute group. The choice of structures to search for can be specified using the "structures"
|
||||
argument, which is a hyphen-separated list of structure keywords.
|
||||
Two convenient pre-set options are provided:
|
||||
|
||||
default: fcc-hcp-bcc-ico
|
||||
all: fcc-hcp-bcc-ico-sc-dcub-dhex :ul
|
||||
|
||||
The 'default' setting detects the same structures as the Common Neighbor Analysis method.
|
||||
The 'all' setting searches for all structure types. A small performance penalty is
|
||||
incurred for the diamond structures, so it is not recommended to use this option if
|
||||
it is known that the simulation does not contain diamond structures.
|
||||
|
||||
|
||||
PTM identifies structures using two steps. First, a graph isomorphism test is used
|
||||
to identify potential structure matches. Next, the deviation is computed between the
|
||||
local structure (in the simulation) and a template of the ideal lattice structure.
|
||||
The deviation is calculated as:
|
||||
|
||||
:c,image(Eqs/ptm_rmsd.jpg)
|
||||
|
||||
Here, u and v contain the coordinates of the local and ideal structures respectively,
|
||||
s is a scale factor, and Q is a rotation. The best match is identified by the
|
||||
lowest RMSD value, using the optimal scaling, rotation, and correspondence between the
|
||||
points.
|
||||
|
||||
The 'threshold' keyword sets an upper limit on the maximum permitted deviation before
|
||||
a local structure is identified as disordered. Typical values are in the range 0.1-0.15,
|
||||
but larger values may be desirable at higher temperatures.
|
||||
A value of 0 is equivalent to infinity and can be used if no threshold is desired.
|
||||
|
||||
|
||||
The neighbor list needed to compute this quantity is constructed each
|
||||
time the calculation is performed (e.g. each time a snapshot of atoms
|
||||
is dumped). Thus it can be inefficient to compute/dump this quantity
|
||||
too frequently or to have multiple compute/dump commands, each with a
|
||||
{ptm/atom} style.
|
||||
|
||||
[Output info:]
|
||||
|
||||
This compute calculates a per-atom array, which can be accessed by
|
||||
any command that uses per-atom values from a compute as input. See
|
||||
"Section 6.15"_Section_howto.html#howto_15 for an overview of
|
||||
LAMMPS output options.
|
||||
|
||||
Results are stored in the per-atom array in the following order:
|
||||
|
||||
type
|
||||
rmsd
|
||||
interatomic distance
|
||||
qw
|
||||
qx
|
||||
qy
|
||||
qw :ul
|
||||
|
||||
The type is a number from 0 to 8. The rmsd is a positive real number.
|
||||
The interatomic distance is computed from the scale factor in the RMSD equation.
|
||||
The (qw,qx,qy,qz) parameters represent the orientation of the local structure
|
||||
in quaternion form. The reference coordinates for each template (from which the
|
||||
orientation is determined) can be found in the {ptm_constants.h} file in the PTM source directory.
|
||||
|
||||
[Restrictions:]
|
||||
|
||||
This fix is part of the USER-PTM package. It is only enabled if
|
||||
LAMMPS was built with that package. See the "Build
|
||||
package"_Build_package.html doc page for more info.
|
||||
|
||||
[Related commands:]
|
||||
|
||||
"compute centro/atom"_compute_centro_atom.html
|
||||
"compute cna/atom"_compute_cna_atom.html
|
||||
|
||||
[Default:] none
|
||||
|
||||
:line
|
||||
|
||||
:link(Larsen)
|
||||
[(Larsen)] Larsen, Schmidt, Schiøtz, Modelling Simul Mater Sci Eng, 24, 055007 (2016).
|
||||
|
|
@ -97,9 +97,9 @@ equivalent, since the "compute stress/atom"_compute_stress_atom.html
|
|||
command creates a per-atom array with 6 columns:
|
||||
|
||||
compute myPress all stress/atom NULL
|
||||
compute 2 all reduce min myPress\[*\]
|
||||
compute 2 all reduce min myPress\[1\] myPress\[2\] myPress\[3\] &
|
||||
myPress\[4\] myPress\[5\] myPress\[6\] :pre
|
||||
compute 2 all reduce min c_myPress\[*\]
|
||||
compute 2 all reduce min c_myPress\[1\] c_myPress\[2\] c_myPress\[3\] &
|
||||
c_myPress\[4\] c_myPress\[5\] c_myPress\[6\] :pre
|
||||
|
||||
:line
|
||||
|
||||
|
|
|
@ -0,0 +1,177 @@
|
|||
"LAMMPS WWW Site"_lws - "LAMMPS Documentation"_ld - "LAMMPS Commands"_lc :c
|
||||
|
||||
:link(lws,http://lammps.sandia.gov)
|
||||
:link(ld,Manual.html)
|
||||
:link(lc,Commands_all.html)
|
||||
|
||||
:line
|
||||
|
||||
compute reduce/chunk command :h3
|
||||
|
||||
[Syntax:]
|
||||
|
||||
compute ID group-ID reduce/chunk chunkID mode input1 input2 ... :pre
|
||||
|
||||
ID, group-ID are documented in "compute"_compute.html command :ulb,l
|
||||
reduce/chunk = style name of this compute command :l
|
||||
chunkID = ID of "compute chunk/atom"_compute_chunk_atom.html command :l
|
||||
mode = {sum} or {min} or {max} :l
|
||||
one or more inputs can be listed :l
|
||||
input = c_ID, c_ID\[N\], f_ID, f_ID\[N\], v_ID :l
|
||||
c_ID = per-atom vector calculated by a compute with ID
|
||||
c_ID\[I\] = Ith column of per-atom array calculated by a compute with ID, I can include wildcard (see below)
|
||||
f_ID = per-atom vector calculated by a fix with ID
|
||||
f_ID\[I\] = Ith column of per-atom array calculated by a fix with ID, I can include wildcard (see below)
|
||||
v_name = per-atom vector calculated by an atom-style variable with name :pre
|
||||
:ule
|
||||
|
||||
[Examples:]
|
||||
|
||||
compute 1 all reduce/chunk/atom mychunk min c_cluster :pre
|
||||
|
||||
[Description:]
|
||||
|
||||
Define a calculation that reduces one or more per-atom vectors into
|
||||
per-chunk values. This can be useful for diagnostic output. Or when
|
||||
used in conjunction with the "compute
|
||||
chunk/spread/atom"_compute_chunk_spread_atom.html command it can be
|
||||
used ot create per-atom values that induce a new set of chunks with a
|
||||
second "compute chunk/atom"_compute_chunk_atom.html command. An
|
||||
example is given below.
|
||||
|
||||
In LAMMPS, chunks are collections of atoms defined by a "compute
|
||||
chunk/atom"_compute_chunk_atom.html command, which assigns each atom
|
||||
to a single chunk (or no chunk). The ID for this command is specified
|
||||
as chunkID. For example, a single chunk could be the atoms in a
|
||||
molecule or atoms in a spatial bin. See the "compute
|
||||
chunk/atom"_compute_chunk_atom.html and "Howto chunk"_Howto_chunk.html
|
||||
doc pages for details of how chunks can be defined and examples of how
|
||||
they can be used to measure properties of a system.
|
||||
|
||||
For each atom, this compute accesses its chunk ID from the specified
|
||||
{chunkID} compute. The per-atom value from an input contributes
|
||||
to a per-chunk value corresponding the the chunk ID.
|
||||
|
||||
The reduction operation is specified by the {mode} setting and is
|
||||
performed over all the per-atom values from the atoms in each chunk.
|
||||
The {sum} option adds the pre-atom values to a per-chunk total. The
|
||||
{min} or {max} options find the minimum or maximum value of the
|
||||
per-atom values for each chunk.
|
||||
|
||||
Note that only atoms in the specified group contribute to the
|
||||
reduction operation. If the {chunkID} compute returns a 0 for the
|
||||
chunk ID of an atom (i.e. the atom is not in a chunk defined by the
|
||||
"compute chunk/atom"_compute_chunk_atom.html command), that atom will
|
||||
also not contribute to the reduction operation. An input that is a
|
||||
compute or fix may define its own group which affects the quantities
|
||||
it returns. For example, a compute with return a zero value for atoms
|
||||
that are not in the group specified for that compute.
|
||||
|
||||
Each listed input is operated on independently. Each input can be the
|
||||
result of a "compute"_compute.html or "fix"_fix.html or the evaluation
|
||||
of an atom-style "variable"_variable.html.
|
||||
|
||||
Note that for values from a compute or fix, the bracketed index I can
|
||||
be specified using a wildcard asterisk with the index to effectively
|
||||
specify multiple values. This takes the form "*" or "*n" or "n*" or
|
||||
"m*n". If N = the size of the vector (for {mode} = scalar) or the
|
||||
number of columns in the array (for {mode} = vector), then an asterisk
|
||||
with no numeric values means all indices from 1 to N. A leading
|
||||
asterisk means all indices from 1 to n (inclusive). A trailing
|
||||
asterisk means all indices from n to N (inclusive). A middle asterisk
|
||||
means all indices from m to n (inclusive).
|
||||
|
||||
Using a wildcard is the same as if the individual columns of the array
|
||||
had been listed one by one. E.g. these 2 compute reduce/chunk
|
||||
commands are equivalent, since the "compute
|
||||
property/chunk"_compute_property_chunk.html command creates a per-atom
|
||||
array with 3 columns:
|
||||
|
||||
compute prop all property/atom vx vy vz
|
||||
compute 10 all reduce/chunk mychunk max c_prop\[*\]
|
||||
compute 10 all reduce/chunk mychunk max c_prop\[1\] c_prop\[2\] c_prop\[3\] :pre
|
||||
|
||||
:line
|
||||
|
||||
Here is an example of using this compute, in conjunction with the
|
||||
compute chunk/spread/atom command to identify self-assembled micelles.
|
||||
The commands below can be added to the examples/in.micelle script.
|
||||
|
||||
Imagine a collection of polymer chains or small molecules with
|
||||
hydrophobic end groups. All the hydrophobic (HP) atoms are assigned
|
||||
to a group called "phobic".
|
||||
|
||||
These commands will assign a unique cluster ID to all HP atoms within
|
||||
a specified distance of each other. A cluster will contain all HP
|
||||
atoms in a single molecule, but also the HP atoms in nearby molecules,
|
||||
e.g. molecules that have clumped to form a micelle due to the
|
||||
attraction induced by the hydrophobicity. The output of the
|
||||
chunk/reduce command will be a cluster ID per chunk (molecule).
|
||||
Molecules with the same cluster ID are in the same micelle.
|
||||
|
||||
group phobic type 4 # specific to in.micelle model
|
||||
compute cluster phobic cluster/atom 2.0
|
||||
compute cmol all chunk/atom molecule
|
||||
compute reduce phobic reduce/chunk cmol min c_cluster :pre
|
||||
|
||||
This per-chunk info could be output in at least two ways:
|
||||
|
||||
fix 10 all ave/time 1000 1 1000 c_reduce file tmp.phobic mode vector :pre
|
||||
|
||||
compute spread all chunk/spread/atom cmol c_reduce
|
||||
dump 1 all custom 1000 tmp.dump id type mol x y z c_cluster c_spread
|
||||
dump_modify 1 sort id :pre
|
||||
|
||||
In the first case, each snapshot in the tmp.phobic file will contain
|
||||
one line per molecule. Molecules with the same value are in the same
|
||||
micelle. In the second case each dump snapshot contains all atoms,
|
||||
each with a final field with the cluster ID of the micelle that the HP
|
||||
atoms of that atom's molecule belong to.
|
||||
|
||||
The result from compute chunk/spread/atom can be used to define a new
|
||||
set of chunks, where all the atoms in all the molecules in the same
|
||||
micelle are assigned to the same chunk, i.e. one chunk per micelle.
|
||||
|
||||
compute micelle all chunk/atom c_spread compress yes :pre
|
||||
|
||||
Further analysis on a per-micelle basis can now be performed using any
|
||||
of the per-chunk computes listed on the "Howto chunk"_Howto_chunk.html
|
||||
doc page. E.g. count the number of atoms in each micelle, calculate
|
||||
its center or mass, shape (moments of intertia), radius of gyration,
|
||||
etc.
|
||||
|
||||
compute prop all property/chunk micelle count
|
||||
fix 20 all ave/time 1000 1 1000 c_prop file tmp.micelle mode vector :pre
|
||||
|
||||
Each snapshot in the tmp.micelle file will have one line per micelle
|
||||
with its count of atoms, plus a first line for a chunk with all the
|
||||
solvent atoms. By the time 50000 steps have elapsed there are a
|
||||
handful of large micelles.
|
||||
|
||||
:line
|
||||
|
||||
[Output info:]
|
||||
|
||||
This compute calculates a global vector if a single input value is
|
||||
specified, otherwise a global array is output. The number of columns
|
||||
in the array is the number of inputs provided. The length of the
|
||||
vector or the number of vector elements or array rows = the number of
|
||||
chunks {Nchunk} as calculated by the specified "compute
|
||||
chunk/atom"_compute_chunk_atom.html command. The vector or array can
|
||||
be accessed by any command that uses global values from a compute as
|
||||
input. See the "Howto output"_Howto_output.html doc page for an
|
||||
overview of LAMMPS output options.
|
||||
|
||||
The per-atom values for the vector or each column of the array will be
|
||||
in whatever "units"_units.html the corresponding input value is in.
|
||||
The vector or array values are "intensive".
|
||||
|
||||
[Restrictions:] none
|
||||
|
||||
[Related commands:]
|
||||
|
||||
"compute chunk/atom"_compute_chunk_atom.html, "compute
|
||||
reduce"_compute_reduce.html, "compute
|
||||
chunk/spread/atom"_compute_chunk_spread_atom.html
|
||||
|
||||
[Default:] none
|
|
@ -0,0 +1,111 @@
|
|||
"LAMMPS WWW Site"_lws - "LAMMPS Documentation"_ld - "LAMMPS Commands"_lc :c
|
||||
|
||||
:link(lws,http://lammps.sandia.gov)
|
||||
:link(ld,Manual.html)
|
||||
:link(lc,Section_commands.html#comm)
|
||||
|
||||
:line
|
||||
|
||||
compute stress/mop command :h3
|
||||
compute stress/mop/profile command :h3
|
||||
|
||||
|
||||
[Syntax:]
|
||||
|
||||
compute ID group-ID style dir args keywords ... :pre
|
||||
|
||||
ID, group-ID are documented in "compute"_compute.html command
|
||||
style = {stress/mop} or {stress/mop/profile}
|
||||
dir = {x} or {y} or {z} is the direction normal to the plane
|
||||
args = argument specific to the compute style
|
||||
keywords = {kin} or {conf} or {total} (one of more can be specified) :ul
|
||||
{stress/mop} args = pos
|
||||
pos = {lower} or {center} or {upper} or coordinate value (distance units) is the position of the plane
|
||||
{stress/mop/profile} args = origin delta
|
||||
origin = {lower} or {center} or {upper} or coordinate value (distance units) is the position of the first plane
|
||||
delta = value (distance units) is the distance between planes :pre
|
||||
|
||||
compute 1 all stress/mop x lower total
|
||||
compute 1 liquid stress/mop z 0.0 kin conf
|
||||
fix 1 all ave/time 10 1000 10000 c_1\[*\] file mop.time
|
||||
fix 1 all ave/time 10 1000 10000 c_1\[2\] file mop.time :pre
|
||||
|
||||
compute 1 all stress/mop/profile x lower 0.1 total
|
||||
compute 1 liquid stress/mop/profile z 0.0 0.25 kin conf
|
||||
fix 1 all ave/time 500 20 10000 c_1\[*\] ave running overwrite file mopp.time mode vector :pre
|
||||
|
||||
|
||||
[Description:]
|
||||
|
||||
Compute {stress/mop} and compute {stress/mop/profile} define computations that
|
||||
calculate components of the local stress tensor using the method of
|
||||
planes "(Todd)"_#mop-todd. Specifically in compute {stress/mop} calculates 3
|
||||
components are computed in directions {dir},{x}; {dir},{y}; and
|
||||
{dir},{z}; where {dir} is the direction normal to the plane, while
|
||||
in compute {stress/mop/profile} the profile of the stress is computed.
|
||||
|
||||
Contrary to methods based on histograms of atomic stress (i.e. using
|
||||
"compute stress/atom"_compute_stress_atom.html), the method of planes is
|
||||
compatible with mechanical balance in heterogeneous systems and at
|
||||
interfaces "(Todd)"_#mop-todd.
|
||||
|
||||
The stress tensor is the sum of a kinetic term and a configurational
|
||||
term, which are given respectively by Eq. (21) and Eq. (16) in
|
||||
"(Todd)"_#mop-todd. For the kinetic part, the algorithm considers that
|
||||
atoms have crossed the plane if their positions at times t-dt and t are
|
||||
one on either side of the plane, and uses the velocity at time t-dt/2
|
||||
given by the velocity-Verlet algorithm.
|
||||
|
||||
Between one and three keywords can be used to indicate which
|
||||
contributions to the stress must be computed: kinetic stress (kin),
|
||||
configurational stress (conf), and/or total stress (total).
|
||||
|
||||
NOTE 1: The configurational stress is computed considering all pairs of atoms where at least one atom belongs to group group-ID.
|
||||
|
||||
NOTE 2: The local stress does not include any Lennard-Jones tail
|
||||
corrections to the pressure added by the "pair_modify tail
|
||||
yes"_pair_modify.html command, since those are contributions to the global system pressure.
|
||||
|
||||
[Output info:]
|
||||
|
||||
Compute {stress/mop} calculates a global vector (indices starting at 1), with 3
|
||||
values for each declared keyword (in the order the keywords have been
|
||||
declared). For each keyword, the stress tensor components are ordered as
|
||||
follows: stress_dir,x, stress_dir,y, and stress_dir,z.
|
||||
|
||||
Compute {stress/mop/profile} instead calculates a global array, with 1 column
|
||||
giving the position of the planes where the stress tensor was computed,
|
||||
and with 3 columns of values for each declared keyword (in the order the
|
||||
keywords have been declared). For each keyword, the profiles of stress
|
||||
tensor components are ordered as follows: stress_dir,x; stress_dir,y;
|
||||
and stress_dir,z.
|
||||
|
||||
The values are in pressure "units"_units.html.
|
||||
|
||||
The values produced by this compute can be accessed by various "output commands"_Howto_output.html. For instance, the results can be written to a file using the "fix ave/time"_fix_ave_time.html command. Please see the example in the examples/USER/mop folder.
|
||||
|
||||
[Restrictions:]
|
||||
|
||||
These styles are part of the USER-MISC package. They are only enabled if
|
||||
LAMMPS is built with that package. See the "Build package"_Build_package.html
|
||||
doc page on for more info.
|
||||
|
||||
The method is only implemented for 3d orthogonal simulation boxes whose
|
||||
size does not change in time, and axis-aligned planes.
|
||||
|
||||
The method only works with two-body pair interactions, because it
|
||||
requires the class method pair->single() to be implemented. In
|
||||
particular, it does not work with more than two-body pair interactions,
|
||||
intra-molecular interactions, and long range (kspace) interactions.
|
||||
|
||||
[Related commands:]
|
||||
|
||||
"compute stress/atom"_compute_stress_atom.html
|
||||
|
||||
[Default:] none
|
||||
|
||||
:line
|
||||
|
||||
:link(mop-todd)
|
||||
[(Todd)] B. D. Todd, Denis J. Evans, and Peter J. Daivis: "Pressure tensor for inhomogeneous fluids",
|
||||
Phys. Rev. E 52, 1627 (1995).
|
|
@ -15,6 +15,7 @@ Computes :h1
|
|||
compute_bond_local
|
||||
compute_centro_atom
|
||||
compute_chunk_atom
|
||||
compute_chunk_spread_atom
|
||||
compute_cluster_atom
|
||||
compute_cna_atom
|
||||
compute_cnp_atom
|
||||
|
@ -70,8 +71,10 @@ Computes :h1
|
|||
compute_property_atom
|
||||
compute_property_chunk
|
||||
compute_property_local
|
||||
compute_ptm_atom
|
||||
compute_rdf
|
||||
compute_reduce
|
||||
compute_reduce_chunk
|
||||
compute_rigid_local
|
||||
compute_saed
|
||||
compute_slice
|
||||
|
@ -98,6 +101,7 @@ Computes :h1
|
|||
compute_sna_atom
|
||||
compute_spin
|
||||
compute_stress_atom
|
||||
compute_stress_mop
|
||||
compute_tally
|
||||
compute_tdpd_cc_atom
|
||||
compute_temp
|
||||
|
|
|
@ -221,8 +221,8 @@ This equation only applies when the box dimensions are equal to those
|
|||
of the reference dimensions. If this is not the case, then the
|
||||
converged stress tensor will not equal that specified by the user. We
|
||||
can resolve this problem by periodically resetting the reference
|
||||
dimensions. The keyword {nreset_ref} controls how often this is done.
|
||||
If this keyword is not used, or is given a value of zero, then the
|
||||
dimensions. The keyword {nreset} controls how often this is done. If
|
||||
this keyword is not used, or is given a value of zero, then the
|
||||
reference dimensions are set to those of the initial simulation domain
|
||||
and are never changed. A value of {nstep} means that every {nstep}
|
||||
minimization steps, the reference dimensions are set to those of the
|
||||
|
|
|
@ -0,0 +1,106 @@
|
|||
"LAMMPS WWW Site"_lws - "LAMMPS Documentation"_ld - "LAMMPS Commands"_lc :c
|
||||
|
||||
:link(lws,http://lammps.sandia.gov)
|
||||
:link(ld,Manual.html)
|
||||
:link(lc,Commands_all.html)
|
||||
|
||||
:line
|
||||
|
||||
fix client/md command :h3
|
||||
|
||||
[Syntax:]
|
||||
|
||||
fix ID group-ID client/md :pre
|
||||
|
||||
ID, group-ID are documented in "fix"_fix.html command
|
||||
client/md = style name of this fix command :ul
|
||||
|
||||
[Examples:]
|
||||
|
||||
fix 1 all client/md :pre
|
||||
|
||||
[Description:]
|
||||
|
||||
This fix style enables LAMMPS to run as a "client" code and
|
||||
communicate each timestep with a separate "server" code to perform an
|
||||
MD simulation together.
|
||||
|
||||
The "Howto client/server"_Howto_client_server.html doc page gives an
|
||||
overview of client/server coupling of LAMMPS with another code where
|
||||
one code is the "client" and sends request messages to a "server"
|
||||
code. The server responds to each request with a reply message. This
|
||||
enables the two codes to work in tandem to perform a simulation.
|
||||
|
||||
When using this fix, LAMMPS (as the client code) passes the current
|
||||
coordinates of all particles to the server code each timestep, which
|
||||
computes their interaction, and returns the energy, forces, and virial
|
||||
for the interacting particles to LAMMPS, so it can complete the
|
||||
timestep.
|
||||
|
||||
The server code could be a quantum code, or another classical MD code
|
||||
which encodes a force field (pair_style in LAMMPS lingo) which LAMMPS
|
||||
does not have. In the quantum case, this fix is a mechanism for
|
||||
running {ab initio} MD with quantum forces.
|
||||
|
||||
The group associated with this fix is ignored.
|
||||
|
||||
The protocol and "units"_units.html for message format and content
|
||||
that LAMMPS exchanges with the server code is defined on the "server
|
||||
md"_server_md.html doc page.
|
||||
|
||||
Note that when using LAMMPS as an MD client, your LAMMPS input script
|
||||
should not normally contain force field commands, like a
|
||||
"pair_style"_pair_style.html, "bond_style"_bond_style.html, or
|
||||
"kspace_style"_kspace_style.html commmand. However it is possible for
|
||||
a server code to only compute a portion of the full force-field, while
|
||||
LAMMPS computes the remaining part. Your LAMMPS script can also
|
||||
specify boundary conditions or force constraints in the usual way,
|
||||
which will be added to the per-atom forces returned by the server
|
||||
code.
|
||||
|
||||
See the examples/message dir for example scripts where LAMMPS is both
|
||||
the "client" and/or "server" code for this kind of client/server MD
|
||||
simulation. The examples/message/README file explains how to launch
|
||||
LAMMPS and another code in tandem to perform a coupled simulation.
|
||||
|
||||
:line
|
||||
|
||||
[Restart, fix_modify, output, run start/stop, minimize info:]
|
||||
|
||||
No information about this fix is written to "binary restart
|
||||
files"_restart.html.
|
||||
|
||||
The "fix_modify"_fix_modify.html {energy} option is supported by this
|
||||
fix to add the potential energy computed by the server application to
|
||||
the system's potential energy as part of "thermodynamic
|
||||
output"_thermo_style.html.
|
||||
|
||||
The "fix_modify"_fix_modify.html {virial} option is supported by this
|
||||
fix to add the server application's contribution to the system's
|
||||
virial as part of "thermodynamic output"_thermo_style.html. The
|
||||
default is {virial yes}
|
||||
|
||||
This fix computes a global scalar which can be accessed by various
|
||||
"output commands"_Howto_output.html. The scalar is the potential
|
||||
energy discussed above. The scalar value calculated by this fix is
|
||||
"extensive".
|
||||
|
||||
No parameter of this fix can be used with the {start/stop} keywords of
|
||||
the "run"_run.html command. This fix is not invoked during "energy
|
||||
minimization"_minimize.html.
|
||||
|
||||
[Restrictions:]
|
||||
|
||||
This fix is part of the MESSAGE package. It is only enabled if LAMMPS
|
||||
was built with that package. See the "Build
|
||||
package"_Build_package.html doc page for more info.
|
||||
|
||||
A script that uses this command must also use the
|
||||
"message"_message.html command to setup the messaging protocol with
|
||||
the other server code.
|
||||
|
||||
[Related commands:]
|
||||
|
||||
"message"_message.html, "server"_server.html
|
||||
|
||||
[Default:] none
|
|
@ -63,6 +63,11 @@ implemented in LAMMPS, they are coupled to a Nose/Hoover chain
|
|||
thermostat in a velocity Verlet formulation, closely following the
|
||||
implementation used for the "fix nvt"_fix_nh.html command.
|
||||
|
||||
NOTE: A recent (2017) book by "(Daivis and Todd)"_#Daivis-sllod
|
||||
discusses use of the SLLOD method and non-equilibrium MD (NEMD)
|
||||
thermostatting generally, for both simple and complex fluids,
|
||||
e.g. molecular systems. The latter can be tricky to do correctly.
|
||||
|
||||
Additional parameters affecting the thermostat are specified by
|
||||
keywords and values documented with the "fix nvt"_fix_nh.html
|
||||
command. See, for example, discussion of the {temp} and {drag}
|
||||
|
@ -177,3 +182,7 @@ Same as "fix nvt"_fix_nh.html, except tchain = 1.
|
|||
|
||||
:link(Daivis)
|
||||
[(Daivis and Todd)] Daivis and Todd, J Chem Phys, 124, 194103 (2006).
|
||||
|
||||
:link(Daivis-sllod)
|
||||
[(Daivis and Todd)] Daivis and Todd, Nonequilibrium Molecular Dyanmics (book),
|
||||
Cambridge University Press, https://doi.org/10.1017/9781139017848, (2017).
|
||||
|
|
|
@ -214,8 +214,10 @@ which can lead to poor energy conservation. You can test for this in
|
|||
your system by running a constant NVE simulation with a particular set
|
||||
of SHAKE parameters and monitoring the energy versus time.
|
||||
|
||||
SHAKE or RATTLE should not be used to constrain an angle at 180 degrees
|
||||
(e.g. linear CO2 molecule). This causes numeric difficulties.
|
||||
SHAKE or RATTLE should not be used to constrain an angle at 180
|
||||
degrees (e.g. linear CO2 molecule). This causes numeric difficulties.
|
||||
You can use "fix rigid or fix rigid/small"_fix_rigid.html instead to
|
||||
make a linear molecule rigid.
|
||||
|
||||
[Related commands:] none
|
||||
|
||||
|
|
|
@ -26,6 +26,7 @@ Fixes :h1
|
|||
fix_bond_swap
|
||||
fix_bond_react
|
||||
fix_box_relax
|
||||
fix_client_md
|
||||
fix_cmap
|
||||
fix_colvars
|
||||
fix_controller
|
||||
|
|
|
@ -13,47 +13,53 @@ kspace_modify command :h3
|
|||
kspace_modify keyword value ... :pre
|
||||
|
||||
one or more keyword/value pairs may be listed :ulb,l
|
||||
keyword = {mesh} or {order} or {order/disp} or {mix/disp} or {overlap} or {minorder} or {force} or {gewald} or {gewald/disp} or {slab} or (nozforce} or {compute} or {cutoff/adjust} or {fftbench} or {collective} or {diff} or {kmax/ewald} or {force/disp/real} or {force/disp/kspace} or {splittol} or {disp/auto}:l
|
||||
{mesh} value = x y z
|
||||
x,y,z = grid size in each dimension for long-range Coulombics
|
||||
{mesh/disp} value = x y z
|
||||
x,y,z = grid size in each dimension for 1/r^6 dispersion
|
||||
{order} value = N
|
||||
N = extent of Gaussian for PPPM or MSM mapping of charge to grid
|
||||
{order/disp} value = N
|
||||
N = extent of Gaussian for PPPM mapping of dispersion term to grid
|
||||
{mix/disp} value = {pair} or {geom} or {none}
|
||||
{overlap} = {yes} or {no} = whether the grid stencil for PPPM is allowed to overlap into more than the nearest-neighbor processor
|
||||
{minorder} value = M
|
||||
M = min allowed extent of Gaussian when auto-adjusting to minimize grid communication
|
||||
keyword = {collective} or {compute} or {cutoff/adjust} or {diff} or {disp/auto} or {fftbench} or {force/disp/kspace} or {force/disp/real} or {force} or {gewald/disp} or {gewald} or {kmax/ewald} or {mesh} or {minorder} or {mix/disp} or {order/disp} or {order} or {overlap} or {scafacos} or {slab} or {splittol} :l
|
||||
{collective} value = {yes} or {no}
|
||||
{compute} value = {yes} or {no}
|
||||
{cutoff/adjust} value = {yes} or {no}
|
||||
{diff} value = {ad} or {ik} = 2 or 4 FFTs for PPPM in smoothed or non-smoothed mode
|
||||
{disp/auto} value = yes or no
|
||||
{fftbench} value = {yes} or {no}
|
||||
{force/disp/real} value = accuracy (force units)
|
||||
{force/disp/kspace} value = accuracy (force units)
|
||||
{force} value = accuracy (force units)
|
||||
{gewald} value = rinv (1/distance units)
|
||||
rinv = G-ewald parameter for Coulombics
|
||||
{gewald/disp} value = rinv (1/distance units)
|
||||
rinv = G-ewald parameter for dispersion
|
||||
{kmax/ewald} value = kx ky kz
|
||||
kx,ky,kz = number of Ewald sum kspace vectors in each dimension
|
||||
{mesh} value = x y z
|
||||
x,y,z = grid size in each dimension for long-range Coulombics
|
||||
{mesh/disp} value = x y z
|
||||
x,y,z = grid size in each dimension for 1/r^6 dispersion
|
||||
{minorder} value = M
|
||||
M = min allowed extent of Gaussian when auto-adjusting to minimize grid communication
|
||||
{mix/disp} value = {pair} or {geom} or {none}
|
||||
{order} value = N
|
||||
N = extent of Gaussian for PPPM or MSM mapping of charge to grid
|
||||
{order/disp} value = N
|
||||
N = extent of Gaussian for PPPM mapping of dispersion term to grid
|
||||
{overlap} = {yes} or {no} = whether the grid stencil for PPPM is allowed to overlap into more than the nearest-neighbor processor
|
||||
{pressure/scalar} value = {yes} or {no}
|
||||
{scafacos} values = option value1 value2 ...
|
||||
option = {tolerance}
|
||||
value = {energy} or {energy_rel} or {field} or {field_rel} or {potential} or {potential_rel}
|
||||
option = {fmm_tuning}
|
||||
value = {0} or {1}
|
||||
{slab} value = volfactor or {nozforce}
|
||||
volfactor = ratio of the total extended volume used in the
|
||||
2d approximation compared with the volume of the simulation domain
|
||||
{nozforce} turns off kspace forces in the z direction
|
||||
{compute} value = {yes} or {no}
|
||||
{cutoff/adjust} value = {yes} or {no}
|
||||
{pressure/scalar} value = {yes} or {no}
|
||||
{fftbench} value = {yes} or {no}
|
||||
{collective} value = {yes} or {no}
|
||||
{diff} value = {ad} or {ik} = 2 or 4 FFTs for PPPM in smoothed or non-smoothed mode
|
||||
{kmax/ewald} value = kx ky kz
|
||||
kx,ky,kz = number of Ewald sum kspace vectors in each dimension
|
||||
{force/disp/real} value = accuracy (force units)
|
||||
{force/disp/kspace} value = accuracy (force units)
|
||||
{splittol} value = tol
|
||||
tol = relative size of two eigenvalues (see discussion below)
|
||||
{disp/auto} value = yes or no :pre
|
||||
tol = relative size of two eigenvalues (see discussion below) :pre
|
||||
:ule
|
||||
|
||||
[Examples:]
|
||||
|
||||
kspace_modify mesh 24 24 30 order 6
|
||||
kspace_modify slab 3.0 :pre
|
||||
kspace_modify slab 3.0
|
||||
kspace_modify scafacos tolerance energy :pre
|
||||
|
||||
[Description:]
|
||||
|
||||
|
@ -61,6 +67,132 @@ Set parameters used by the kspace solvers defined by the
|
|||
"kspace_style"_kspace_style.html command. Not all parameters are
|
||||
relevant to all kspace styles.
|
||||
|
||||
:line
|
||||
|
||||
The {collective} keyword applies only to PPPM. It is set to {no} by
|
||||
default, except on IBM BlueGene machines. If this option is set to
|
||||
{yes}, LAMMPS will use MPI collective operations to remap data for
|
||||
3d-FFT operations instead of the default point-to-point communication.
|
||||
This is faster on IBM BlueGene machines, and may also be faster on
|
||||
other machines if they have an efficient implementation of MPI
|
||||
collective operations and adequate hardware.
|
||||
|
||||
:line
|
||||
|
||||
The {compute} keyword allows Kspace computations to be turned off,
|
||||
even though a "kspace_style"_kspace_style.html is defined. This is
|
||||
not useful for running a real simulation, but can be useful for
|
||||
debugging purposes or for computing only partial forces that do not
|
||||
include the Kspace contribution. You can also do this by simply not
|
||||
defining a "kspace_style"_kspace_style.html, but a Kspace-compatible
|
||||
"pair_style"_pair_style.html requires a kspace style to be defined.
|
||||
This keyword gives you that option.
|
||||
|
||||
:line
|
||||
|
||||
The {cutoff/adjust} keyword applies only to MSM. If this option is
|
||||
turned on, the Coulombic cutoff will be automatically adjusted at the
|
||||
beginning of the run to give the desired estimated error. Other
|
||||
cutoffs such as LJ will not be affected. If the grid is not set using
|
||||
the {mesh} command, this command will also attempt to use the optimal
|
||||
grid that minimizes cost using an estimate given by
|
||||
"(Hardy)"_#Hardy1. Note that this cost estimate is not exact, somewhat
|
||||
experimental, and still may not yield the optimal parameters.
|
||||
|
||||
:line
|
||||
|
||||
The {diff} keyword specifies the differentiation scheme used by the
|
||||
PPPM method to compute forces on particles given electrostatic
|
||||
potentials on the PPPM mesh. The {ik} approach is the default for
|
||||
PPPM and is the original formulation used in "(Hockney)"_#Hockney1. It
|
||||
performs differentiation in Kspace, and uses 3 FFTs to transfer each
|
||||
component of the computed fields back to real space for total of 4
|
||||
FFTs per timestep.
|
||||
|
||||
The analytic differentiation {ad} approach uses only 1 FFT to transfer
|
||||
information back to real space for a total of 2 FFTs per timestep. It
|
||||
then performs analytic differentiation on the single quantity to
|
||||
generate the 3 components of the electric field at each grid point.
|
||||
This is sometimes referred to as "smoothed" PPPM. This approach
|
||||
requires a somewhat larger PPPM mesh to achieve the same accuracy as
|
||||
the {ik} method. Currently, only the {ik} method (default) can be
|
||||
used for a triclinic simulation cell with PPPM. The {ad} method is
|
||||
always used for MSM.
|
||||
|
||||
NOTE: Currently, not all PPPM styles support the {ad} option. Support
|
||||
for those PPPM variants will be added later.
|
||||
|
||||
:line
|
||||
|
||||
The {disp/auto} option controls whether the pppm/disp is allowed to
|
||||
generate PPPM parameters automatically. If set to {no}, parameters have
|
||||
to be specified using the {gewald/disp}, {mesh/disp},
|
||||
{force/disp/real} or {force/disp/kspace} keywords, or
|
||||
the code will stop with an error message. When this option is set to
|
||||
{yes}, the error message will not appear and the simulation will start.
|
||||
For a typical application, using the automatic parameter generation
|
||||
will provide simulations that are either inaccurate or slow. Using this
|
||||
option is thus not recommended. For guidelines on how to obtain good
|
||||
parameters, see the "How-To"_Howto_dispersion.html discussion.
|
||||
|
||||
:line
|
||||
|
||||
The {fftbench} keyword applies only to PPPM. It is off by default. If
|
||||
this option is turned on, LAMMPS will perform a short FFT benchmark
|
||||
computation and report its timings, and will thus finish a some seconds
|
||||
later than it would if this option were off.
|
||||
|
||||
:line
|
||||
|
||||
The {force/disp/real} and {force/disp/kspace} keywords set the force
|
||||
accuracy for the real and space computations for the dispersion part
|
||||
of pppm/disp. As shown in "(Isele-Holder)"_#Isele-Holder1, optimal
|
||||
performance and accuracy in the results is obtained when these values
|
||||
are different.
|
||||
|
||||
:line
|
||||
|
||||
The {force} keyword overrides the relative accuracy parameter set by
|
||||
the "kspace_style"_kspace_style.html command with an absolute
|
||||
accuracy. The accuracy determines the RMS error in per-atom forces
|
||||
calculated by the long-range solver and is thus specified in force
|
||||
units. A negative value for the accuracy setting means to use the
|
||||
relative accuracy parameter. The accuracy setting is used in
|
||||
conjunction with the pairwise cutoff to determine the number of
|
||||
K-space vectors for style {ewald}, the FFT grid size for style
|
||||
{pppm}, or the real space grid size for style {msm}.
|
||||
|
||||
:line
|
||||
|
||||
The {gewald} keyword sets the value of the Ewald or PPPM G-ewald
|
||||
parameter for charge as {rinv} in reciprocal distance units. Without
|
||||
this setting, LAMMPS chooses the parameter automatically as a function
|
||||
of cutoff, precision, grid spacing, etc. This means it can vary from
|
||||
one simulation to the next which may not be desirable for matching a
|
||||
KSpace solver to a pre-tabulated pairwise potential. This setting can
|
||||
also be useful if Ewald or PPPM fails to choose a good grid spacing
|
||||
and G-ewald parameter automatically. If the value is set to 0.0,
|
||||
LAMMPS will choose the G-ewald parameter automatically. MSM does not
|
||||
use the {gewald} parameter.
|
||||
|
||||
:line
|
||||
|
||||
The {gewald/disp} keyword sets the value of the Ewald or PPPM G-ewald
|
||||
parameter for dispersion as {rinv} in reciprocal distance units. It
|
||||
has the same meaning as the {gewald} setting for Coulombics.
|
||||
|
||||
:line
|
||||
|
||||
The {kmax/ewald} keyword sets the number of kspace vectors in each
|
||||
dimension for kspace style {ewald}. The three values must be positive
|
||||
integers, or else (0,0,0), which unsets the option. When this option
|
||||
is not set, the Ewald sum scheme chooses its own kspace vectors,
|
||||
consistent with the user-specified accuracy and pairwise cutoff. In
|
||||
any case, if kspace style {ewald} is invoked, the values used are
|
||||
printed to the screen and the log file at the start of the run.
|
||||
|
||||
:line
|
||||
|
||||
The {mesh} keyword sets the grid size for kspace style {pppm} or
|
||||
{msm}. In the case of PPPM, this is the FFT mesh, and each dimension
|
||||
must be factorizable into powers of 2, 3, and 5. In the case of MSM,
|
||||
|
@ -70,6 +202,8 @@ or MSM solver chooses its own grid size, consistent with the
|
|||
user-specified accuracy and pairwise cutoff. Values for x,y,z of
|
||||
0,0,0 unset the option.
|
||||
|
||||
:line
|
||||
|
||||
The {mesh/disp} keyword sets the grid size for kspace style
|
||||
{pppm/disp}. This is the FFT mesh for long-range dispersion and ach
|
||||
dimension must be factorizable into powers of 2, 3, and 5. When this
|
||||
|
@ -77,39 +211,7 @@ option is not set, the PPPM solver chooses its own grid size,
|
|||
consistent with the user-specified accuracy and pairwise cutoff.
|
||||
Values for x,y,z of 0,0,0 unset the option.
|
||||
|
||||
The {order} keyword determines how many grid spacings an atom's charge
|
||||
extends when it is mapped to the grid in kspace style {pppm} or {msm}.
|
||||
The default for this parameter is 5 for PPPM and 8 for MSM, which
|
||||
means each charge spans 5 or 8 grid cells in each dimension,
|
||||
respectively. For the LAMMPS implementation of MSM, the order can
|
||||
range from 4 to 10 and must be even. For PPPM, the minimum allowed
|
||||
setting is 2 and the maximum allowed setting is 7. The larger the
|
||||
value of this parameter, the smaller that LAMMPS will set the grid
|
||||
size, to achieve the requested accuracy. Conversely, the smaller the
|
||||
order value, the larger the grid size will be. Note that there is an
|
||||
inherent trade-off involved: a small grid will lower the cost of FFTs
|
||||
or MSM direct sum, but a larger order parameter will increase the cost
|
||||
of interpolating charge/fields to/from the grid.
|
||||
|
||||
The {order/disp} keyword determines how many grid spacings an atom's
|
||||
dispersion term extends when it is mapped to the grid in kspace style
|
||||
{pppm/disp}. It has the same meaning as the {order} setting for
|
||||
Coulombics.
|
||||
|
||||
The {overlap} keyword can be used in conjunction with the {minorder}
|
||||
keyword with the PPPM styles to adjust the amount of communication
|
||||
that occurs when values on the FFT grid are exchanged between
|
||||
processors. This communication is distinct from the communication
|
||||
inherent in the parallel FFTs themselves, and is required because
|
||||
processors interpolate charge and field values using grid point values
|
||||
owned by neighboring processors (i.e. ghost point communication). If
|
||||
the {overlap} keyword is set to {yes} then this communication is
|
||||
allowed to extend beyond nearest-neighbor processors, e.g. when using
|
||||
lots of processors on a small problem. If it is set to {no} then the
|
||||
communication will be limited to nearest-neighbor processors and the
|
||||
{order} setting will be reduced if necessary, as explained by the
|
||||
{minorder} keyword discussion. The {overlap} keyword is always set to
|
||||
{yes} in MSM.
|
||||
:line
|
||||
|
||||
The {minorder} keyword allows LAMMPS to reduce the {order} setting if
|
||||
necessary to keep the communication of ghost grid point limited to
|
||||
|
@ -126,6 +228,42 @@ error if the grid communication is non-nearest-neighbor and {overlap}
|
|||
is set to {no}. The {minorder} keyword is not currently supported in
|
||||
MSM.
|
||||
|
||||
:line
|
||||
|
||||
The {mix/disp} keyword selects the mixing rule for the dispersion
|
||||
coefficients. With {pair}, the dispersion coefficients of unlike
|
||||
types are computed as indicated with "pair_modify"_pair_modify.html.
|
||||
With {geom}, geometric mixing is enforced on the dispersion
|
||||
coefficients in the kspace coefficients. When using the arithmetic
|
||||
mixing rule, this will speed-up the simulations but introduces some
|
||||
error in the force computations, as shown in "(Wennberg)"_#Wennberg.
|
||||
With {none}, it is assumed that no mixing rule is
|
||||
applicable. Splitting of the dispersion coefficients will be performed
|
||||
as described in "(Isele-Holder)"_#Isele-Holder1.
|
||||
|
||||
This splitting can be influenced with the {splittol} keywords. Only
|
||||
the eigenvalues that are larger than tol compared to the largest
|
||||
eigenvalues are included. Using this keywords the original matrix of
|
||||
dispersion coefficients is approximated. This leads to faster
|
||||
computations, but the accuracy in the reciprocal space computations of
|
||||
the dispersion part is decreased.
|
||||
|
||||
:line
|
||||
|
||||
The {order} keyword determines how many grid spacings an atom's charge
|
||||
extends when it is mapped to the grid in kspace style {pppm} or {msm}.
|
||||
The default for this parameter is 5 for PPPM and 8 for MSM, which
|
||||
means each charge spans 5 or 8 grid cells in each dimension,
|
||||
respectively. For the LAMMPS implementation of MSM, the order can
|
||||
range from 4 to 10 and must be even. For PPPM, the minimum allowed
|
||||
setting is 2 and the maximum allowed setting is 7. The larger the
|
||||
value of this parameter, the smaller that LAMMPS will set the grid
|
||||
size, to achieve the requested accuracy. Conversely, the smaller the
|
||||
order value, the larger the grid size will be. Note that there is an
|
||||
inherent trade-off involved: a small grid will lower the cost of FFTs
|
||||
or MSM direct sum, but a larger order parameter will increase the cost
|
||||
of interpolating charge/fields to/from the grid.
|
||||
|
||||
The PPPM order parameter may be reset by LAMMPS when it sets up the
|
||||
FFT grid if the implied grid stencil extends beyond the grid cells
|
||||
owned by neighboring processors. Typically this will only occur when
|
||||
|
@ -134,30 +272,102 @@ be generated indicating the order parameter is being reduced to allow
|
|||
LAMMPS to run the problem. Automatic adjustment of the order parameter
|
||||
is not supported in MSM.
|
||||
|
||||
The {force} keyword overrides the relative accuracy parameter set by
|
||||
the "kspace_style"_kspace_style.html command with an absolute
|
||||
accuracy. The accuracy determines the RMS error in per-atom forces
|
||||
calculated by the long-range solver and is thus specified in force
|
||||
units. A negative value for the accuracy setting means to use the
|
||||
relative accuracy parameter. The accuracy setting is used in
|
||||
conjunction with the pairwise cutoff to determine the number of
|
||||
K-space vectors for style {ewald}, the FFT grid size for style
|
||||
{pppm}, or the real space grid size for style {msm}.
|
||||
:line
|
||||
|
||||
The {gewald} keyword sets the value of the Ewald or PPPM G-ewald
|
||||
parameter for charge as {rinv} in reciprocal distance units. Without
|
||||
this setting, LAMMPS chooses the parameter automatically as a function
|
||||
of cutoff, precision, grid spacing, etc. This means it can vary from
|
||||
one simulation to the next which may not be desirable for matching a
|
||||
KSpace solver to a pre-tabulated pairwise potential. This setting can
|
||||
also be useful if Ewald or PPPM fails to choose a good grid spacing
|
||||
and G-ewald parameter automatically. If the value is set to 0.0,
|
||||
LAMMPS will choose the G-ewald parameter automatically. MSM does not
|
||||
use the {gewald} parameter.
|
||||
The {order/disp} keyword determines how many grid spacings an atom's
|
||||
dispersion term extends when it is mapped to the grid in kspace style
|
||||
{pppm/disp}. It has the same meaning as the {order} setting for
|
||||
Coulombics.
|
||||
|
||||
The {gewald/disp} keyword sets the value of the Ewald or PPPM G-ewald
|
||||
parameter for dispersion as {rinv} in reciprocal distance units. It
|
||||
has the same meaning as the {gewald} setting for Coulombics.
|
||||
:line
|
||||
|
||||
The {overlap} keyword can be used in conjunction with the {minorder}
|
||||
keyword with the PPPM styles to adjust the amount of communication
|
||||
that occurs when values on the FFT grid are exchanged between
|
||||
processors. This communication is distinct from the communication
|
||||
inherent in the parallel FFTs themselves, and is required because
|
||||
processors interpolate charge and field values using grid point values
|
||||
owned by neighboring processors (i.e. ghost point communication). If
|
||||
the {overlap} keyword is set to {yes} then this communication is
|
||||
allowed to extend beyond nearest-neighbor processors, e.g. when using
|
||||
lots of processors on a small problem. If it is set to {no} then the
|
||||
communication will be limited to nearest-neighbor processors and the
|
||||
{order} setting will be reduced if necessary, as explained by the
|
||||
{minorder} keyword discussion. The {overlap} keyword is always set to
|
||||
{yes} in MSM.
|
||||
|
||||
:line
|
||||
|
||||
The {pressure/scalar} keyword applies only to MSM. If this option is
|
||||
turned on, only the scalar pressure (i.e. (Pxx + Pyy + Pzz)/3.0) will
|
||||
be computed, which can be used, for example, to run an isotropic barostat.
|
||||
Computing the full pressure tensor with MSM is expensive, and this option
|
||||
provides a faster alternative. The scalar pressure is computed using a
|
||||
relationship between the Coulombic energy and pressure "(Hummer)"_#Hummer
|
||||
instead of using the virial equation. This option cannot be used to access
|
||||
individual components of the pressure tensor, to compute per-atom virial,
|
||||
or with suffix kspace/pair styles of MSM, like OMP or GPU.
|
||||
|
||||
:line
|
||||
|
||||
The {scafacos} keyword is used for settings that are passed to the
|
||||
ScaFaCoS library when using "kspace_style scafacos"_kspace_style.html.
|
||||
|
||||
The {tolerance} option affects how the {accuracy} specified with the
|
||||
"kspace_style"_kspace_style.html command is interpreted by ScaFaCoS.
|
||||
The following values may be used:
|
||||
|
||||
energy = absolute accuracy in total Coulomic energy
|
||||
energy_rel = relative accuracy in total Coulomic energy
|
||||
potential = absolute accuracy in total Coulomic potential
|
||||
potential_rel = relative accuracy in total Coulomic potential
|
||||
field = absolute accuracy in electric field
|
||||
field_rel = relative accuracy in electric field :ul
|
||||
|
||||
The values with suffix _rel indicate the tolerance is a relative
|
||||
tolerance; the other values impose an absolute tolerance on the given
|
||||
quantity. Absoulte tolerance in this case means, that for a given
|
||||
quantity q and a given absolute tolerance of t_a the result should
|
||||
be between q-t_a and q+t_a. For a relative tolerance t_r the relative
|
||||
error should not be greater than t_r, i.e. abs(1 - (result/q)) < t_r.
|
||||
As a consequence of this, the tolerance type should be checked, when
|
||||
performing computations with a high absolute field / energy. E.g.
|
||||
if the total energy in the system is 1000000.0 an absolute tolerance
|
||||
of 1e-3 would mean that the result has to be between 999999.999 and
|
||||
1000000.001, which would be equivalent to a relative tolerance of
|
||||
1e-9.
|
||||
|
||||
The energy and energy_rel values, set a tolerance based on the total
|
||||
Coulomic energy of the system. The potential and potential_rel set a
|
||||
tolerance based on the per-atom Coulomic energy. The field and
|
||||
field_rel tolerance types set a tolerance based on the electric field
|
||||
values computed by ScaFaCoS. Since per-atom forces are derived from
|
||||
the per-atom electric field, this effectively sets a tolerance on the
|
||||
forces, simimlar to other LAMMPS KSpace styles, as explained on the
|
||||
"kspace_style"_kspace_style.html doc page.
|
||||
|
||||
Note that not all ScaFaCoS solvers support all tolerance types.
|
||||
These are the allowed values for each method:
|
||||
|
||||
fmm = energy and energy_rel
|
||||
p2nfft = field (1d-,2d-,3d-periodic systems) or potential (0d-periodic)
|
||||
p3m = field
|
||||
ewald = field
|
||||
direct = has no tolerance tuning :ul
|
||||
|
||||
If the tolerance type is not changed, the default values for the
|
||||
tolerance type are the first values in the above list, e.g. energy
|
||||
is the default tolerance type for the fmm solver.
|
||||
|
||||
The {fmm_tuning} option is only relevant when using the FMM method.
|
||||
It activates (value=1) or deactivates (value=0) an internal tuning
|
||||
mechanism for the FMM solver. The tuning operation runs sequentially
|
||||
and can be very time-consuming. Usually it is not needed for systems
|
||||
with a homogenous charge distribution. The default for this option is
|
||||
therefore {0}. The FMM internal tuning is performed once, when the
|
||||
solver is set up.
|
||||
|
||||
:line
|
||||
|
||||
The {slab} keyword allows an Ewald or PPPM solver to be used for a
|
||||
systems that are periodic in x,y but non-periodic in z - a
|
||||
|
@ -191,92 +401,7 @@ the "fix efield"_fix_efield.html command, it will not give the correct
|
|||
dielectric constant due to the Yeh/Berkowitz "(Yeh)"_#Yeh correction
|
||||
not being compatible with how "fix efield"_fix_efield.html works.
|
||||
|
||||
The {compute} keyword allows Kspace computations to be turned off,
|
||||
even though a "kspace_style"_kspace_style.html is defined. This is
|
||||
not useful for running a real simulation, but can be useful for
|
||||
debugging purposes or for computing only partial forces that do not
|
||||
include the Kspace contribution. You can also do this by simply not
|
||||
defining a "kspace_style"_kspace_style.html, but a Kspace-compatible
|
||||
"pair_style"_pair_style.html requires a kspace style to be defined.
|
||||
This keyword gives you that option.
|
||||
|
||||
The {cutoff/adjust} keyword applies only to MSM. If this option is
|
||||
turned on, the Coulombic cutoff will be automatically adjusted at the
|
||||
beginning of the run to give the desired estimated error. Other
|
||||
cutoffs such as LJ will not be affected. If the grid is not set using
|
||||
the {mesh} command, this command will also attempt to use the optimal
|
||||
grid that minimizes cost using an estimate given by
|
||||
"(Hardy)"_#Hardy1. Note that this cost estimate is not exact, somewhat
|
||||
experimental, and still may not yield the optimal parameters.
|
||||
|
||||
The {pressure/scalar} keyword applies only to MSM. If this option is
|
||||
turned on, only the scalar pressure (i.e. (Pxx + Pyy + Pzz)/3.0) will
|
||||
be computed, which can be used, for example, to run an isotropic barostat.
|
||||
Computing the full pressure tensor with MSM is expensive, and this option
|
||||
provides a faster alternative. The scalar pressure is computed using a
|
||||
relationship between the Coulombic energy and pressure "(Hummer)"_#Hummer
|
||||
instead of using the virial equation. This option cannot be used to access
|
||||
individual components of the pressure tensor, to compute per-atom virial,
|
||||
or with suffix kspace/pair styles of MSM, like OMP or GPU.
|
||||
|
||||
The {fftbench} keyword applies only to PPPM. It is off by default. If
|
||||
this option is turned on, LAMMPS will perform a short FFT benchmark
|
||||
computation and report its timings, and will thus finish a some seconds
|
||||
later than it would if this option were off.
|
||||
|
||||
The {collective} keyword applies only to PPPM. It is set to {no} by
|
||||
default, except on IBM BlueGene machines. If this option is set to
|
||||
{yes}, LAMMPS will use MPI collective operations to remap data for
|
||||
3d-FFT operations instead of the default point-to-point communication.
|
||||
This is faster on IBM BlueGene machines, and may also be faster on
|
||||
other machines if they have an efficient implementation of MPI
|
||||
collective operations and adequate hardware.
|
||||
|
||||
The {diff} keyword specifies the differentiation scheme used by the
|
||||
PPPM method to compute forces on particles given electrostatic
|
||||
potentials on the PPPM mesh. The {ik} approach is the default for
|
||||
PPPM and is the original formulation used in "(Hockney)"_#Hockney1. It
|
||||
performs differentiation in Kspace, and uses 3 FFTs to transfer each
|
||||
component of the computed fields back to real space for total of 4
|
||||
FFTs per timestep.
|
||||
|
||||
The analytic differentiation {ad} approach uses only 1 FFT to transfer
|
||||
information back to real space for a total of 2 FFTs per timestep. It
|
||||
then performs analytic differentiation on the single quantity to
|
||||
generate the 3 components of the electric field at each grid point.
|
||||
This is sometimes referred to as "smoothed" PPPM. This approach
|
||||
requires a somewhat larger PPPM mesh to achieve the same accuracy as
|
||||
the {ik} method. Currently, only the {ik} method (default) can be
|
||||
used for a triclinic simulation cell with PPPM. The {ad} method is
|
||||
always used for MSM.
|
||||
|
||||
NOTE: Currently, not all PPPM styles support the {ad} option. Support
|
||||
for those PPPM variants will be added later.
|
||||
|
||||
The {kmax/ewald} keyword sets the number of kspace vectors in each
|
||||
dimension for kspace style {ewald}. The three values must be positive
|
||||
integers, or else (0,0,0), which unsets the option. When this option
|
||||
is not set, the Ewald sum scheme chooses its own kspace vectors,
|
||||
consistent with the user-specified accuracy and pairwise cutoff. In
|
||||
any case, if kspace style {ewald} is invoked, the values used are
|
||||
printed to the screen and the log file at the start of the run.
|
||||
|
||||
With the {mix/disp} keyword one can select the mixing rule for the
|
||||
dispersion coefficients. With {pair}, the dispersion coefficients of
|
||||
unlike types are computed as indicated with
|
||||
"pair_modify"_pair_modify.html. With {geom}, geometric mixing is
|
||||
enforced on the dispersion coefficients in the kspace
|
||||
coefficients. When using the arithmetic mixing rule, this will
|
||||
speed-up the simulations but introduces some error in the force
|
||||
computations, as shown in "(Wennberg)"_#Wennberg. With {none}, it is
|
||||
assumed that no mixing rule is applicable. Splitting of the dispersion
|
||||
coefficients will be performed as described in
|
||||
"(Isele-Holder)"_#Isele-Holder1. This splitting can be influenced with
|
||||
the {splittol} keywords. Only the eigenvalues that are larger than tol
|
||||
compared to the largest eigenvalues are included. Using this keywords
|
||||
the original matrix of dispersion coefficients is approximated. This
|
||||
leads to faster computations, but the accuracy in the reciprocal space
|
||||
computations of the dispersion part is decreased.
|
||||
:line
|
||||
|
||||
The {force/disp/real} and {force/disp/kspace} keywords set the force
|
||||
accuracy for the real and space computations for the dispersion part
|
||||
|
@ -295,6 +420,8 @@ provide simulations that are either inaccurate or slow. Using this
|
|||
option is thus not recommended. For guidelines on how to obtain good
|
||||
parameters, see the "Howto dispersion"_Howto_dispersion.html doc page.
|
||||
|
||||
:line
|
||||
|
||||
[Restrictions:] none
|
||||
|
||||
[Related commands:]
|
||||
|
@ -306,10 +433,12 @@ parameters, see the "Howto dispersion"_Howto_dispersion.html doc page.
|
|||
The option defaults are mesh = mesh/disp = 0 0 0, order = order/disp =
|
||||
5 (PPPM), order = 10 (MSM), minorder = 2, overlap = yes, force = -1.0,
|
||||
gewald = gewald/disp = 0.0, slab = 1.0, compute = yes, cutoff/adjust =
|
||||
yes (MSM), pressure/scalar = yes (MSM), fftbench = no (PPPM), diff = ik
|
||||
(PPPM), mix/disp = pair, force/disp/real = -1.0, force/disp/kspace = -1.0,
|
||||
split = 0, tol = 1.0e-6, and disp/auto = no. For pppm/intel, order =
|
||||
order/disp = 7.
|
||||
yes (MSM), pressure/scalar = yes (MSM), fftbench = no (PPPM), diff =
|
||||
ik (PPPM), mix/disp = pair, force/disp/real = -1.0, force/disp/kspace
|
||||
= -1.0, split = 0, tol = 1.0e-6, and disp/auto = no. For pppm/intel,
|
||||
order = order/disp = 7. For scafacos settings, the scafacos tolerance
|
||||
option depends on the method chosen, as documented above. The
|
||||
scafacos fmm_tuning default = 0.
|
||||
|
||||
:line
|
||||
|
||||
|
|
|
@ -12,7 +12,7 @@ kspace_style command :h3
|
|||
|
||||
kspace_style style value :pre
|
||||
|
||||
style = {none} or {ewald} or {ewald/disp} or {ewald/omp} or {pppm} or {pppm/cg} or {pppm/disp} or {pppm/tip4p} or {pppm/stagger} or {pppm/disp/tip4p} or {pppm/gpu} or {pppm/kk} or {pppm/omp} or {pppm/cg/omp} or {pppm/tip4p/omp} or {msm} or {msm/cg} or {msm/omp} or {msm/cg/omp} :ulb,l
|
||||
style = {none} or {ewald} or {ewald/disp} or {ewald/omp} or {pppm} or {pppm/cg} or {pppm/disp} or {pppm/tip4p} or {pppm/stagger} or {pppm/disp/tip4p} or {pppm/gpu} or {pppm/kk} or {pppm/omp} or {pppm/cg/omp} or {pppm/tip4p/omp} or {msm} or {msm/cg} or {msm/omp} or {msm/cg/omp} or {scafacos} :ulb,l
|
||||
{none} value = none
|
||||
{ewald} value = accuracy
|
||||
accuracy = desired relative error in forces
|
||||
|
@ -22,7 +22,7 @@ style = {none} or {ewald} or {ewald/disp} or {ewald/omp} or {pppm} or {pppm/cg}
|
|||
accuracy = desired relative error in forces
|
||||
{pppm} value = accuracy
|
||||
accuracy = desired relative error in forces
|
||||
{pppm/cg} value = accuracy (smallq)
|
||||
{pppm/cg} values = accuracy (smallq)
|
||||
accuracy = desired relative error in forces
|
||||
smallq = cutoff for charges to be considered (optional) (charge units)
|
||||
{pppm/disp} value = accuracy
|
||||
|
@ -56,7 +56,10 @@ style = {none} or {ewald} or {ewald/disp} or {ewald/omp} or {pppm} or {pppm/cg}
|
|||
accuracy = desired relative error in forces
|
||||
{msm/cg/omp} value = accuracy (smallq)
|
||||
accuracy = desired relative error in forces
|
||||
smallq = cutoff for charges to be considered (optional) (charge units) :pre
|
||||
smallq = cutoff for charges to be considered (optional) (charge units)
|
||||
{scafacos} values = method accuracy
|
||||
method = fmm or p2nfft or ewald or direct
|
||||
accuracy = desired relative error in forces :pre
|
||||
:ule
|
||||
|
||||
[Examples:]
|
||||
|
@ -64,6 +67,7 @@ style = {none} or {ewald} or {ewald/disp} or {ewald/omp} or {pppm} or {pppm/cg}
|
|||
kspace_style pppm 1.0e-4
|
||||
kspace_style pppm/cg 1.0e-5 1.0e-6
|
||||
kspace style msm 1.0e-4
|
||||
kspace style scafacos fmm 1.0e-4
|
||||
kspace_style none :pre
|
||||
|
||||
[Description:]
|
||||
|
@ -211,6 +215,63 @@ pressure simulation with MSM will cause the code to run slower.
|
|||
|
||||
:line
|
||||
|
||||
The {scafacos} style is a wrapper on the "ScaFaCoS Coulomb solver
|
||||
library"_http://www.scafacos.de which provides a variety of solver
|
||||
methods which can be used with LAMMPS. The paper by "(Who)"_#Who2012
|
||||
gives an overview of ScaFaCoS.
|
||||
|
||||
ScaFaCoS was developed by a consortium of German research facilities
|
||||
with a BMBF (German Ministry of Science and Education) funded project
|
||||
in 2009-2012. Participants of the consortium were the Universities of
|
||||
Bonn, Chemnitz, Stuttgart, and Wuppertal as well as the
|
||||
Forschungszentrum Juelich.
|
||||
|
||||
The library is available for download at "http://scafacos.de" or can
|
||||
be cloned from the git-repository
|
||||
"git://github.com/scafacos/scafacos.git".
|
||||
|
||||
In order to use this KSpace style, you must download and build the
|
||||
ScaFaCoS library, then build LAMMPS with the USER-SCAFACOS package
|
||||
installed package which links LAMMPS to the ScaFaCoS library.
|
||||
See details on "this page"_Section_packages.html#USER-SCAFACOS.
|
||||
|
||||
NOTE: Unlike other KSpace solvers in LAMMPS, ScaFaCoS computes all
|
||||
Coulombic interactions, both short- and long-range. Thus you should
|
||||
NOT use a Coulmbic pair style when using kspace_style scafacos. This
|
||||
also means the total Coulombic energy (short- and long-range) will be
|
||||
tallied for "thermodynamic output"_thermo_style.html command as part
|
||||
of the {elong} keyword; the {ecoul} keyword will be zero.
|
||||
|
||||
NOTE: See the current restriction below about use of ScaFaCoS in
|
||||
LAMMPS with molecular charged systems or the TIP4P water model.
|
||||
|
||||
The specified {method} determines which ScaFaCoS algorithm is used.
|
||||
These are the ScaFaCoS methods currently available from LAMMPS:
|
||||
|
||||
{fmm} = Fast Multi-Pole method
|
||||
{p2nfft} = FFT-based Coulomb solver
|
||||
{ewald} = Ewald summation
|
||||
{direct} = direct O(N^2) summation
|
||||
{p3m} = PPPM :ul
|
||||
|
||||
We plan to support additional ScaFaCoS solvers from LAMMPS in the
|
||||
future. For an overview of the included solvers, refer to
|
||||
"(Sutmann)"_#Sutmann2013
|
||||
|
||||
The specified {accuracy} is similar to the accuracy setting for other
|
||||
LAMMPS KSpace styles, but is passed to ScaFaCoS, which can interpret
|
||||
it in different ways for different methods it supports. Within the
|
||||
ScaFaCoS library the {accuracy} is treated as a tolerance level
|
||||
(either absolute or relative) for the chosen quantity, where the
|
||||
quantity can be either the Columic field values, the per-atom Columic
|
||||
energy or the total Columic energy. To select from these options, see
|
||||
the "kspace_modify scafacos accuracy"_kspace_modify.html doc page.
|
||||
|
||||
The "kspace_modify scafacos"_kspace_modify.html command also explains
|
||||
other ScaFaCoS options currently exposed to LAMMPS.
|
||||
|
||||
:line
|
||||
|
||||
The specified {accuracy} determines the relative RMS error in per-atom
|
||||
forces calculated by the long-range solver. It is set as a
|
||||
dimensionless number, relative to the force that two unit point
|
||||
|
@ -321,12 +382,24 @@ dimensions. The only exception is if the slab option is set with
|
|||
"kspace_modify"_kspace_modify.html, in which case the xy dimensions
|
||||
must be periodic and the z dimension must be non-periodic.
|
||||
|
||||
The scafacos KSpace style will only be enabled if LAMMPS is built with
|
||||
the USER-SCAFACOS package. See the "Build package"_Build_package.html
|
||||
doc page for more info.
|
||||
|
||||
The use of ScaFaCos in LAMMPS does not yet support molecular charged
|
||||
systems where the short-range Coulombic interactions between atoms in
|
||||
the same bond/angle/dihedral are weighted by the
|
||||
"special_bonds"_special_bonds.html command. Likewise it does not
|
||||
support the "TIP4P water style" where a fictitious charge site is
|
||||
introduced in each water molecule.
|
||||
|
||||
[Related commands:]
|
||||
|
||||
"kspace_modify"_kspace_modify.html, "pair_style
|
||||
lj/cut/coul/long"_pair_lj.html, "pair_style
|
||||
lj/charmm/coul/long"_pair_charmm.html, "pair_style
|
||||
lj/long/coul/long"_pair_lj_long.html, "pair_style buck/coul/long"_pair_buck.html
|
||||
lj/long/coul/long"_pair_lj_long.html, "pair_style
|
||||
buck/coul/long"_pair_buck.html
|
||||
|
||||
[Default:]
|
||||
|
||||
|
@ -384,5 +457,12 @@ Evaluation of Forces for the Simulation of Biomolecules, University of
|
|||
Illinois at Urbana-Champaign, (2006).
|
||||
|
||||
:link(Hardy2009)
|
||||
[(Hardy2)] Hardy, Stone, Schulten, Parallel Computing 35 (2009)
|
||||
164-177.
|
||||
[(Hardy2)] Hardy, Stone, Schulten, Parallel Computing, 35, 164-177
|
||||
(2009).
|
||||
|
||||
:link(Sutmann2013)
|
||||
[(Sutmann)] Sutmann, Arnold, Fahrenberger, et. al., Physical review / E 88(6), 063308 (2013)
|
||||
|
||||
:link(Who2012)
|
||||
[(Who)] Who, Author2, Author3, J of Long Range Solvers, 35, 164-177
|
||||
(2012).
|
||||
|
|
|
@ -67,6 +67,7 @@ Howto_multiple.html
|
|||
Howto_replica.html
|
||||
Howto_library.html
|
||||
Howto_couple.html
|
||||
Howto_client_server.html
|
||||
Howto_output.html
|
||||
Howto_chunk.html
|
||||
Howto_2d.html
|
||||
|
@ -167,6 +168,7 @@ label.html
|
|||
lattice.html
|
||||
log.html
|
||||
mass.html
|
||||
message.html
|
||||
min_modify.html
|
||||
min_style.html
|
||||
minimize.html
|
||||
|
@ -194,6 +196,9 @@ reset_timestep.html
|
|||
restart.html
|
||||
run.html
|
||||
run_style.html
|
||||
server.html
|
||||
server_mc.html
|
||||
server_md.html
|
||||
set.html
|
||||
shell.html
|
||||
special_bonds.html
|
||||
|
@ -241,6 +246,7 @@ fix_bond_create.html
|
|||
fix_bond_react.html
|
||||
fix_bond_swap.html
|
||||
fix_box_relax.html
|
||||
fix_client_md.html
|
||||
fix_cmap.html
|
||||
fix_colvars.html
|
||||
fix_controller.html
|
||||
|
@ -406,6 +412,7 @@ compute_bond.html
|
|||
compute_bond_local.html
|
||||
compute_centro_atom.html
|
||||
compute_chunk_atom.html
|
||||
compute_chunk_spread_atom.html
|
||||
compute_cluster_atom.html
|
||||
compute_cna_atom.html
|
||||
compute_cnp_atom.html
|
||||
|
@ -461,8 +468,10 @@ compute_pressure_uef.html
|
|||
compute_property_atom.html
|
||||
compute_property_chunk.html
|
||||
compute_property_local.html
|
||||
compute_ptm_atom.html
|
||||
compute_rdf.html
|
||||
compute_reduce.html
|
||||
compute_reduce_chunk.html
|
||||
compute_rigid_local.html
|
||||
compute_saed.html
|
||||
compute_slice.html
|
||||
|
@ -489,6 +498,7 @@ compute_smd_vol.html
|
|||
compute_sna_atom.html
|
||||
compute_spin.html
|
||||
compute_stress_atom.html
|
||||
compute_stress_mop.html
|
||||
compute_tally.html
|
||||
compute_tdpd_cc_atom.html
|
||||
compute_temp.html
|
||||
|
@ -524,6 +534,7 @@ pair_write.html
|
|||
pair_adp.html
|
||||
pair_agni.html
|
||||
pair_airebo.html
|
||||
pair_atm.html
|
||||
pair_awpmd.html
|
||||
pair_beck.html
|
||||
pair_body_nparticle.html
|
||||
|
|
|
@ -0,0 +1,162 @@
|
|||
"LAMMPS WWW Site"_lws - "LAMMPS Documentation"_ld - "LAMMPS Commands"_lc :c
|
||||
|
||||
:link(lws,http://lammps.sandia.gov)
|
||||
:link(ld,Manual.html)
|
||||
:link(lc,Command_all.html)
|
||||
|
||||
:line
|
||||
|
||||
message command :h3
|
||||
|
||||
[Syntax:]
|
||||
|
||||
message which protocol mode arg :pre
|
||||
|
||||
which = {client} or {server} :ulb,l
|
||||
protocol = {md} or {mc} :l
|
||||
mode = {file} or {zmq} or {mpi/one} or {mpi/two} :l
|
||||
{file} arg = filename
|
||||
filename = file used for message exchanges
|
||||
{zmq} arg = socket-ID
|
||||
socket-ID for client = localhost:5555, see description below
|
||||
socket-ID for server = *:5555, see description below
|
||||
{mpi/one} arg = none
|
||||
{mpi/two} arg = filename
|
||||
filename = file used to establish communication bewteen 2 MPI jobs :pre
|
||||
:ule
|
||||
|
||||
[Examples:]
|
||||
|
||||
message client md file tmp.couple
|
||||
message server md file tmp.couple :pre
|
||||
|
||||
message client md zmq localhost:5555
|
||||
message server md zmq *:5555 :pre
|
||||
|
||||
message client md mpi/one
|
||||
message server md mpi/one :pre
|
||||
|
||||
message client md mpi/two tmp.couple
|
||||
message server md mpi/two tmp.couple :pre
|
||||
|
||||
[Description:]
|
||||
|
||||
Establish a messaging protocol between LAMMPS and another code for the
|
||||
purpose of client/server coupling.
|
||||
|
||||
The "Howto client/server"_Howto_client_server.html doc page gives an
|
||||
overview of client/server coupling of LAMMPS with another code where
|
||||
one code is the "client" and sends request messages to a "server"
|
||||
code. The server responds to each request with a reply message. This
|
||||
enables the two codes to work in tandem to perform a simulation.
|
||||
|
||||
:line
|
||||
|
||||
The {which} argument defines LAMMPS to be the client or the server.
|
||||
|
||||
:line
|
||||
|
||||
The {protocol} argument defines the format and content of messages
|
||||
that will be exchanged between the two codes. The current options
|
||||
are:
|
||||
|
||||
md = run dynamics with another code
|
||||
mc = perform Monte Carlo moves with another code :ul
|
||||
|
||||
For protocol {md}, LAMMPS can be either a client or server. See the
|
||||
"server md"_server_md.html doc page for details on the protocol.
|
||||
|
||||
For protocol {mc}, LAMMPS can be the server. See the "server
|
||||
mc"_server_mc.html doc page for details on the protocol.
|
||||
|
||||
:line
|
||||
|
||||
The {mode} argument specifies how messages are exchanged between the
|
||||
client and server codes. Both codes must use the same mode and use
|
||||
consistent parameters.
|
||||
|
||||
For mode {file}, the 2 codes communicate via binary files. They must
|
||||
use the same filename, which is actually a file prefix. Several files
|
||||
with that prefix will be created and deleted as a simulation runs.
|
||||
The filename can include a path. Both codes must be able to access
|
||||
the path/file in a common filesystem.
|
||||
|
||||
For mode {zmq}, the 2 codes communicate via a socket on the server
|
||||
code's machine. Support for socket messaging is provided by the
|
||||
open-source "ZeroMQ library"_http://zeromq.org, which must be
|
||||
installed on your system. The client specifies an IP address (IPv4
|
||||
format) or the DNS name of the machine the server code is running on,
|
||||
followed by a 4-digit port ID for the socket, separated by a colon.
|
||||
E.g.
|
||||
|
||||
localhost:5555 # client and server running on same machine
|
||||
192.168.1.1:5555 # server is 192.168.1.1
|
||||
deptbox.uni.edu:5555 # server is deptbox.uni.edu :pre
|
||||
|
||||
The server specifes "*:5555" where "*" represents all available
|
||||
interfaces on the server's machine, and the port ID must match
|
||||
what the client specifies.
|
||||
|
||||
NOTE: What are allowed port IDs?
|
||||
|
||||
NOTE: Additional explanation is needed here about how to use the {zmq}
|
||||
mode on a parallel machine, e.g. a cluster with many nodes.
|
||||
|
||||
For mode {mpi/one}, the 2 codes communicate via MPI and are launched
|
||||
by the same mpirun command, e.g. with this syntax for OpenMPI:
|
||||
|
||||
mpirun -np 2 lmp_mpi -mpicolor 0 -in in.client -log log.client : -np 4 othercode args # LAMMPS is client
|
||||
mpirun -np 2 othercode args : -np 4 lmp_mpi -mpicolor 1 -in in.server # LAMMPS is server :pre
|
||||
|
||||
Note the use of the "-mpicolor color" command-line argument with
|
||||
LAMMPS. See the "command-line args"_Run_options.html doc page for
|
||||
further explanation.
|
||||
|
||||
For mode {mpi/two}, the 2 codes communicate via MPI, but are launched
|
||||
be 2 separate mpirun commands. The specified {filename} argument is a
|
||||
file the 2 MPI processes will use to exchange info so that an MPI
|
||||
inter-communicator can be established to enable the 2 codes to send
|
||||
MPI messages to each other. Both codes must be able to access the
|
||||
path/file in a common filesystem.
|
||||
|
||||
:line
|
||||
|
||||
Normally, the message command should be used at the top of a LAMMPS
|
||||
input script. It performs an initial handshake with the other code to
|
||||
setup messaging and to verify that both codes are using the same
|
||||
message protocol and mode. Assuming both codes are launched at
|
||||
(nearly) the same time, the other code should perform the same kind of
|
||||
initialization.
|
||||
|
||||
If LAMMPS is the client code, it will begin sending messages when a
|
||||
LAMMPS client command begins its operation. E.g. for the "fix
|
||||
client/md"_fix_client_md.html command, it is when a "run"_run.html
|
||||
command is executed.
|
||||
|
||||
If LAMMPS is the server code, it will begin receiving messages when
|
||||
the "server"_server.html command is invoked.
|
||||
|
||||
A fix client command will terminate its messaging with the server when
|
||||
LAMMPS ends, or the fix is deleted via the "unfix"_unfix command. The
|
||||
server command will terminate its messaging with the client when the
|
||||
client signals it. Then the remainder of the LAMMPS input script will
|
||||
be processed.
|
||||
|
||||
If both codes do something similar, this means a new round of
|
||||
client/server messaging can be initiated after termination by re-using
|
||||
a 2nd message command in your LAMMPS input script, followed by a new
|
||||
fix client or server command.
|
||||
|
||||
:line
|
||||
|
||||
[Restrictions:]
|
||||
|
||||
This command is part of the MESSAGE package. It is only enabled if
|
||||
LAMMPS was built with that package. See the "Build
|
||||
package"_Build_package.html doc page for more info.
|
||||
|
||||
[Related commands:]
|
||||
|
||||
"server"_server.html, "fix client/md"_fix_client_md.html
|
||||
|
||||
[Default:] none
|
|
@ -216,10 +216,10 @@ The "fix box/relax"_fix_box_relax.html command can be used to apply an
|
|||
external pressure to the simulation box and allow it to shrink/expand
|
||||
during the minimization.
|
||||
|
||||
Only a few other fixes (typically those that apply force constraints)
|
||||
are invoked during minimization. See the doc pages for individual
|
||||
"fix"_fix.html commands to see which ones are relevant. Current
|
||||
examples of fixes that can be used include:
|
||||
Only a few other fixes (typically those that add forces) are invoked
|
||||
during minimization. See the doc pages for individual "fix"_fix.html
|
||||
commands to see which ones are relevant. Current examples of fixes
|
||||
that can be used include:
|
||||
|
||||
"fix addforce"_fix_addforce.html
|
||||
"fix addtorque"_fix_addtorque.html
|
||||
|
@ -242,6 +242,11 @@ you MUST enable the "fix_modify"_fix_modify.html {energy} option for
|
|||
that fix. The doc pages for individual "fix"_fix.html commands
|
||||
specify if this should be done.
|
||||
|
||||
NOTE: The minimizers in LAMMPS do not allow for bonds (or angles, etc)
|
||||
to be held fixed while atom coordinates are being relaxed, e.g. via
|
||||
"fix shake"_fix_shake.html or "fix rigid"_fix_rigid.html. See more
|
||||
info in the Restrictions section below.
|
||||
|
||||
:line
|
||||
|
||||
[Restrictions:]
|
||||
|
|
|
@ -0,0 +1,164 @@
|
|||
"LAMMPS WWW Site"_lws - "LAMMPS Documentation"_ld - "LAMMPS Commands"_lc :c
|
||||
|
||||
:link(lws,http://lammps.sandia.gov)
|
||||
:link(ld,Manual.html)
|
||||
:link(lc,Commands_all.html)
|
||||
|
||||
:line
|
||||
|
||||
pair_style atm command :h3
|
||||
|
||||
[Syntax:]
|
||||
|
||||
pair_style atm cutoff cutoff_triple :pre
|
||||
|
||||
cutoff = cutoff for each pair in 3-body interaction (distance units)
|
||||
cutoff_triple = additional cutoff applied to product of 3 pairwise distances (distance units) :ul
|
||||
|
||||
[Examples:]
|
||||
|
||||
pair_style atm 4.5 2.5
|
||||
pair_coeff * * * 0.072 :pre
|
||||
|
||||
pair_style hybrid/overlay lj/cut 6.5 atm 4.5 2.5
|
||||
pair_coeff * * lj/cut 1.0 1.0
|
||||
pair_coeff 1 1 atm 1 0.064
|
||||
pair_coeff 1 1 atm 2 0.080
|
||||
pair_coeff 1 2 atm 2 0.100
|
||||
pair_coeff 2 2 atm 2 0.125 :pre
|
||||
|
||||
[Description:]
|
||||
|
||||
The {atm} style computes a 3-body "Axilrod-Teller-Muto"_#Axilrod
|
||||
potential for the energy E of a system of atoms as
|
||||
|
||||
:c,image(Eqs/pair_atm.jpg)
|
||||
|
||||
where nu is the three-body interaction strength. The distances
|
||||
between pairs of atoms r12, r23, r31 and the angles gamma1, gamma2,
|
||||
gamma3 are as shown in this diagram:
|
||||
|
||||
:c,image(JPG/pair_atm_dia.jpg)
|
||||
|
||||
Note that for the interaction between a triplet of atoms I,J,K, there
|
||||
is no "central" atom. The interaction is symmetric with respect to
|
||||
permutation of the three atoms. Thus the nu value is
|
||||
the same for all those permutations of the atom types of I,J,K
|
||||
and needs to be specified only once, as discussed below.
|
||||
|
||||
The {atm} potential is typically used in combination with a two-body
|
||||
potential using the "pair_style hybrid/overlay"_pair_hybrid.html
|
||||
command as in the example above.
|
||||
|
||||
The potential for a triplet of atom is calculated only if all 3
|
||||
distances r12, r23, r31 between the 3 atoms satisfy rIJ < cutoff.
|
||||
In addition, the product of the 3 distances r12*r23*r31 <
|
||||
cutoff_triple^3 is required, which excludes from calculation the
|
||||
triplets with small contribution to the interaction.
|
||||
|
||||
The following coefficients must be defined for each pair of atoms
|
||||
types via the "pair_coeff"_pair_coeff.html command as in the examples
|
||||
above, or in the restart files read by the
|
||||
"read_restart"_read_restart.html commands:
|
||||
|
||||
K = atom type of the third atom (1 to Ntypes)
|
||||
nu = prefactor (energy/distance^9 units) :ul
|
||||
|
||||
K can be specified in one of two ways. An explicit numeric value can
|
||||
be used, as in the 2nd example above. J <= K is required. LAMMPS
|
||||
sets the coefficients for the other 5 symmetric interactions to the
|
||||
same values. E.g. if I = 1, J = 2, K = 3, then these 6 values are set
|
||||
to the specified nu: nu123, nu132, nu213, nu231, nu312, nu321. This
|
||||
enforces the symmetry discussed above.
|
||||
|
||||
A wildcard asterisk can be used for K to set the coefficients for
|
||||
multiple triplets of atom types. This takes the form "*" or "*n" or
|
||||
"n*" or "m*n". If N = the number of atom types, then an asterisk with
|
||||
no numeric values means all types from 1 to N. A leading asterisk
|
||||
means all types from 1 to n (inclusive). A trailing asterisk means
|
||||
all types from n to N (inclusive). A middle asterisk means all types
|
||||
from m to n (inclusive). Note that only type triplets with J <= K are
|
||||
considered; if asterisks imply type triplets where K < J, they are
|
||||
ignored.
|
||||
|
||||
Note that a pair_coeff command can override a previous setting for the
|
||||
same I,J,K triplet. For example, these commands set nu for all I,J.K
|
||||
triplets, then overwrite nu for just the I,J,K = 2,3,4 triplet:
|
||||
|
||||
pair_coeff * * * 0.25
|
||||
pair_coeff 2 3 4 0.1 :pre
|
||||
|
||||
Note that for a simulation with a single atom type, only a single
|
||||
entry is required, e.g.
|
||||
|
||||
pair_coeff 1 1 1 0.25 :pre
|
||||
|
||||
For a simulation with two atom types, four pair_coeff commands will
|
||||
specify all possible nu values:
|
||||
|
||||
pair_coeff 1 1 1 nu1
|
||||
pair_coeff 1 1 2 nu2
|
||||
pair_coeff 1 2 2 nu3
|
||||
pair_coeff 2 2 2 nu4 :pre
|
||||
|
||||
For a simulation with three atom types, ten pair_coeff commands will
|
||||
specify all possible nu values:
|
||||
|
||||
pair_coeff 1 1 1 nu1
|
||||
pair_coeff 1 1 2 nu2
|
||||
pair_coeff 1 1 3 nu3
|
||||
pair_coeff 1 2 2 nu4
|
||||
pair_coeff 1 2 3 nu5
|
||||
pair_coeff 1 3 3 nu6
|
||||
pair_coeff 2 2 2 nu7
|
||||
pair_coeff 2 2 3 nu8
|
||||
pair_coeff 2 3 3 nu9
|
||||
pair_coeff 3 3 3 nu10 :pre
|
||||
|
||||
By default the nu value for all triplets is set to 0.0. Thus it is
|
||||
not required to provide pair_coeff commands that enumerate triplet
|
||||
interactions for all K types. If some I,J,K combination is not
|
||||
speficied, then there will be no 3-body ATM interactions for that
|
||||
combination and all its permutations. However, as with all pair
|
||||
styles, it is required to specify a pair_coeff command for all I,J
|
||||
combinations, else an error will result.
|
||||
|
||||
:line
|
||||
|
||||
[Mixing, shift, table, tail correction, restart, rRESPA info]:
|
||||
|
||||
This pair styles do not support the "pair_modify"_pair_modify.html
|
||||
mix, shift, table, and tail options.
|
||||
|
||||
This pair style writes its information to "binary restart
|
||||
files"_restart.html, so pair_style and pair_coeff commands do not need
|
||||
to be specified in an input script that reads a restart file.
|
||||
However, if the {atm} potential is used in combination with other
|
||||
potentials using the "pair_style hybrid/overlay"_pair_hybrid.html
|
||||
command then pair_coeff commands need to be re-specified
|
||||
in the restart input script.
|
||||
|
||||
This pair style can only be used via the {pair} keyword of the
|
||||
"run_style respa"_run_style.html command. It does not support the
|
||||
{inner}, {middle}, {outer} keywords.
|
||||
|
||||
:line
|
||||
|
||||
[Restrictions:]
|
||||
|
||||
This pair style is part of the MANYBODY package. It is only enabled
|
||||
if LAMMPS was built with that package. See the "Build
|
||||
package"_Build_package.html doc page for more info.
|
||||
|
||||
[Related commands:]
|
||||
|
||||
"pair_coeff"_pair_coeff.html
|
||||
|
||||
[Default:] none
|
||||
|
||||
:line
|
||||
|
||||
:link(Axilrod)
|
||||
[(Axilrod)]
|
||||
Axilrod and Teller, J Chem Phys, 11, 299 (1943);
|
||||
Muto, Nippon Sugaku-Buturigakkwaishi 17, 629 (1943).
|
|
@ -38,7 +38,8 @@ charge and molecule ID information is included.
|
|||
|
||||
Where Tap(r_ij) is the taper function which provides a continuous cutoff
|
||||
(up to third derivative) for inter-atomic separations larger than r_c
|
||||
"(Maaravi)"_#Maaravi1. Here {lambda} is the shielding parameter that
|
||||
"(Leven1)"_#Leven3, "(Leven2)"_#Leven4 and "(Maaravi)"_#Maaravi1.
|
||||
Here {lambda} is the shielding parameter that
|
||||
eliminates the short-range singularity of the classical mono-polar
|
||||
electrostatic interaction expression "(Maaravi)"_#Maaravi1.
|
||||
|
||||
|
@ -82,5 +83,11 @@ package"_Build_package.html doc page for more info.
|
|||
|
||||
:line
|
||||
|
||||
:link(Leven3)
|
||||
[(Leven1)] I. Leven, I. Azuri, L. Kronik and O. Hod, J. Chem. Phys. 140, 104106 (2014).
|
||||
|
||||
:link(Leven4)
|
||||
[(Leven2)] I. Leven et al, J. Chem.Theory Comput. 12, 2896-905 (2016).
|
||||
|
||||
:link(Maaravi1)
|
||||
[(Maaravi)] T. Maaravi et al, J. Phys. Chem. C 121, 22826-22835 (2017).
|
||||
|
|
|
@ -25,22 +25,24 @@ pair_coeff * * rebo CH.airebo NULL NULL C
|
|||
pair_coeff * * tersoff BNC.tersoff B N NULL
|
||||
pair_coeff * * ilp/graphene/hbn BNCH.ILP B N C
|
||||
pair_coeff 1 1 coul/shield 0.70
|
||||
pair_coeff 1 2 coul/shield 0.69498201415576216335
|
||||
pair_coeff 1 2 coul/shield 0.695
|
||||
pair_coeff 2 2 coul/shield 0.69 :pre
|
||||
|
||||
[Description:]
|
||||
|
||||
The {ilp/graphene/hbn} style computes the registry-dependent interlayer
|
||||
potential (ILP) potential as described in "(Leven)"_#Leven and
|
||||
"(Maaravi)"_#Maaravi2. The normals are calculated in the way as described
|
||||
potential (ILP) potential as described in "(Leven1)"_#Leven1,
|
||||
"(Leven2)"_#Leven2 and "(Maaravi)"_#Maaravi2.
|
||||
The normals are calculated in the way as described
|
||||
in "(Kolmogorov)"_#Kolmogorov2.
|
||||
|
||||
:c,image(Eqs/pair_ilp_graphene_hbn.jpg)
|
||||
|
||||
Where Tap(r_ij) is the taper function which provides a continuous
|
||||
cutoff (up to third derivative) for interatomic separations larger than
|
||||
r_c "(Maaravi)"_#Maaravi2. The definitions of each parameter in the above
|
||||
equation can be found in "(Leven)"_#Leven and "(Maaravi)"_#Maaravi2.
|
||||
r_c "(Maaravi)"_#Maaravi2. The definitons of each parameter in the above
|
||||
equation can be found in "(Leven1)"_#Leven1 and "(Maaravi)"_#Maaravi2.
|
||||
|
||||
|
||||
It is important to include all the pairs to build the neighbor list for
|
||||
calculating the normals.
|
||||
|
@ -61,13 +63,15 @@ NOTE: The parameters presented in the parameter file (e.g. BNCH.ILP),
|
|||
are fitted with taper function by setting the cutoff equal to 16.0
|
||||
Angstrom. Using different cutoff or taper function should be careful.
|
||||
|
||||
NOTE: Two new sets of parameters of ILP for two-dimensional hexagonal Materials
|
||||
are presented in "(Ouyang)"_#Ouyang1. These parameters provide a good description
|
||||
in both short- and long-range interaction regime, while the old ILP parameters
|
||||
published in "(Leven)"_#Leven and "(Maaravi)"_#Maaravi2 are only suitable for
|
||||
long-range interaction regime. This feature is essential for simulations in
|
||||
high-pressure regime (i.e., the interlayer distance smaller than the equilibrium distance).
|
||||
The benchmark tests and comparison of these parameters can be found in "(Ouyang)"_#Ouyang1.
|
||||
NOTE: Two new sets of parameters of ILP for two-dimensional hexagonal
|
||||
Materials are presented in "(Ouyang)"_#Ouyang. These parameters provide
|
||||
a good description in both short- and long-range interaction regimes.
|
||||
While the old ILP parameters published in "(Leven2)"_#Leven2 and
|
||||
"(Maaravi)"_#Maaravi2 are only suitable for long-range interaction
|
||||
regime. This feature is essential for simulations in high pressure
|
||||
regime (i.e., the interlayer distance is smaller than the equilibrium
|
||||
distance). The benchmark tests and comparison of these parameters can
|
||||
be found in "(Ouyang)"_#Ouyang.
|
||||
|
||||
This potential must be used in combination with hybrid/overlay.
|
||||
Other interactions can be set to zero using pair_style {none}.
|
||||
|
@ -112,14 +116,17 @@ units, if your simulation does not use {metal} units.
|
|||
|
||||
:line
|
||||
|
||||
:link(Leven)
|
||||
[(Leven)] I. Leven et al, J. Chem.Theory Comput. 12, 2896-905 (2016)
|
||||
:link(Leven1)
|
||||
[(Leven1)] I. Leven, I. Azuri, L. Kronik and O. Hod, J. Chem. Phys. 140, 104106 (2014).
|
||||
|
||||
:link(Leven2)
|
||||
[(Leven2)] I. Leven et al, J. Chem.Theory Comput. 12, 2896-905 (2016).
|
||||
|
||||
:link(Maaravi2)
|
||||
[(Maaravi)] T. Maaravi et al, J. Phys. Chem. C 121, 22826-22835 (2017).
|
||||
|
||||
:link(Kolmogorov2)
|
||||
[(Kolmogorov)] A. N. Kolmogorov, V. H. Crespi, Phys. Rev. B 71, 235415 (2005)
|
||||
[(Kolmogorov)] A. N. Kolmogorov, V. H. Crespi, Phys. Rev. B 71, 235415 (2005).
|
||||
|
||||
:link(Ouyang1)
|
||||
[(Ouyang)] W. Ouyang, D. Mandelli, M. Urbakh, O. Hod, arXiv:1806.09555 (2018).
|
||||
:link(Ouyang)
|
||||
[(Ouyang)] W. Ouyang, D. Mandelli, M. Urbakh and O. Hod, Nano Lett. 18, 6009-6016 (2018).
|
||||
|
|
|
@ -19,11 +19,11 @@ tap_flag = 0/1 to turn off/on the taper function
|
|||
|
||||
pair_style hybrid/overlay kolmogorov/crespi/full 20.0 0
|
||||
pair_coeff * * none
|
||||
pair_coeff * * kolmogorov/crespi/full CC.KC C C :pre
|
||||
pair_coeff * * kolmogorov/crespi/full CH.KC C C :pre
|
||||
|
||||
pair_style hybrid/overlay rebo kolmogorov/crespi/full 16.0
|
||||
pair_coeff * * rebo CH.airebo C C
|
||||
pair_coeff * * kolmogorov/crespi/full CC.KC C C :pre
|
||||
pair_style hybrid/overlay rebo kolmogorov/crespi/full 16.0 1
|
||||
pair_coeff * * rebo CH.airebo C H
|
||||
pair_coeff * * kolmogorov/crespi/full CH_taper.KC C H :pre
|
||||
|
||||
[Description:]
|
||||
|
||||
|
@ -38,27 +38,32 @@ forces and to include all the pairs to build the neighbor list for
|
|||
calculating the normals. Energies are shifted so that they go
|
||||
continuously to zero at the cutoff assuming that the exponential part of
|
||||
{Vij} (first term) decays sufficiently fast. This shift is achieved by
|
||||
the last term in the equation for {Vij} above.
|
||||
the last term in the equation for {Vij} above. This is essential only
|
||||
when the tapper function is turned off. The formula of taper function
|
||||
can be found in pair style "ilp/graphene/hbn"_pair_ilp_graphene_hbn.html.
|
||||
|
||||
NOTE: This potential is intended for interactions between two different
|
||||
graphene layers. Therefore, to avoid interaction within the same layers,
|
||||
each layer should have a separate molecule id and is recommended to use
|
||||
"full" atom style in the data file.
|
||||
|
||||
The parameter file (e.g. CC.KC), is intended for use with {metal}
|
||||
The parameter file (e.g. CH.KC), is intended for use with {metal}
|
||||
"units"_units.html, with energies in meV. Two additional parameters, {S},
|
||||
and {rcut} are included in the parameter file. {S} is designed to
|
||||
facilitate scaling of energies. {rcut} is designed to build the neighbor
|
||||
list for calculating the normals for each atom pair.
|
||||
|
||||
NOTE: A new set of parameters of KC potential for hydrocarbons (CH.KC)
|
||||
is presented in "(Ouyang)"_#Ouyang2. The parameters in CH.KC provides
|
||||
a good description in both short- and long-range interaction regime,
|
||||
while the original parameters (CC.KC) published in "(Kolmogorov)"_#Kolmogorov1
|
||||
are only suitable for long-range interaction regime.
|
||||
This feature is essential for simulations in high-pressure regime
|
||||
(i.e., the interlayer distance smaller than the equilibrium distance).
|
||||
The benchmark tests and comparison of these parameters can be found in "(Ouyang)"_#Ouyang2.
|
||||
NOTE: Two new sets of parameters of KC potential for hydrocarbons, CH.KC
|
||||
(without the taper function) and CH_taper.KC (with the taper function)
|
||||
are presented in "(Ouyang)"_#Ouyang1. The energy for the KC potential
|
||||
with the taper function goes continuously to zero at the cutoff. The
|
||||
parameters in both CH.KC and CH_taper.KC provide a good description in
|
||||
both short- and long-range interaction regimes. While the original
|
||||
parameters (CC.KC) published in "(Kolmogorov)"_#Kolmogorov1 are only
|
||||
suitable for long-range interaction regime. This feature is essential
|
||||
for simulations in high pressure regime (i.e., the interlayer distance
|
||||
is smaller than the equilibrium distance). The benchmark tests and
|
||||
comparison of these parameters can be found in "(Ouyang)"_#Ouyang1.
|
||||
|
||||
This potential must be used in combination with hybrid/overlay.
|
||||
Other interactions can be set to zero using pair_style {none}.
|
||||
|
@ -84,7 +89,7 @@ package"_Build_package.html doc page for more info.
|
|||
This pair potential requires the newton setting to be {on} for pair
|
||||
interactions.
|
||||
|
||||
The CC.KC potential file provided with LAMMPS (see the potentials
|
||||
The CH.KC potential file provided with LAMMPS (see the potentials
|
||||
folder) are parameterized for metal units. You can use this potential
|
||||
with any LAMMPS units, but you would need to create your own custom
|
||||
CC.KC potential file with all coefficients converted to the appropriate
|
||||
|
@ -105,5 +110,5 @@ units.
|
|||
:link(Kolmogorov1)
|
||||
[(Kolmogorov)] A. N. Kolmogorov, V. H. Crespi, Phys. Rev. B 71, 235415 (2005)
|
||||
|
||||
:link(Ouyang2)
|
||||
[(Ouyang)] W. Ouyang, D. Mandelli, M. Urbakh, O. Hod, arXiv:1806.09555 (2018).
|
||||
:link(Ouyang1)
|
||||
[(Ouyang)] W. Ouyang, D. Mandelli, M. Urbakh and O. Hod, Nano Lett. 18, 6009-6016 (2018).
|
||||
|
|
|
@ -103,6 +103,7 @@ pair"_Commands_pair.html doc page are followed by one or more of
|
|||
"pair_style adp"_pair_adp.html - angular dependent potential (ADP) of Mishin
|
||||
"pair_style airebo"_pair_airebo.html - AIREBO potential of Stuart
|
||||
"pair_style airebo/morse"_pair_airebo.html - AIREBO with Morse instead of LJ
|
||||
"pair_style atm"_pair_atm.html - Axilrod-Teller-Muto potential
|
||||
"pair_style beck"_pair_beck.html - Beck potential
|
||||
"pair_style body/nparticle"_pair_body_nparticle.html - interactions between body particles
|
||||
"pair_style bop"_pair_bop.html - BOP potential of Pettifor
|
||||
|
|
|
@ -8,6 +8,7 @@ Pair Styles :h1
|
|||
pair_adp
|
||||
pair_agni
|
||||
pair_airebo
|
||||
pair_atm
|
||||
pair_awpmd
|
||||
pair_beck
|
||||
pair_body_nparticle
|
||||
|
|
|
@ -0,0 +1,71 @@
|
|||
"LAMMPS WWW Site"_lws - "LAMMPS Documentation"_ld - "LAMMPS Commands"_lc :c
|
||||
|
||||
:link(lws,http://lammps.sandia.gov)
|
||||
:link(ld,Manual.html)
|
||||
:link(lc,Commands_all.html)
|
||||
|
||||
:line
|
||||
|
||||
server command :h3
|
||||
|
||||
[Syntax:]
|
||||
|
||||
server protocol :pre
|
||||
|
||||
protocol = {md} or {mc} :ul
|
||||
|
||||
[Examples:]
|
||||
|
||||
server md :pre
|
||||
|
||||
[Description:]
|
||||
|
||||
This command starts LAMMPS running in "server" mode, where it receives
|
||||
messages from a separate "client" code and responds by sending a reply
|
||||
message back to the client. The specified {protocol} determines the
|
||||
format and content of messages LAMMPS expects to receive and how it
|
||||
responds.
|
||||
|
||||
The "Howto client/server"_Howto_client_server.html doc page gives an
|
||||
overview of client/server coupling of LAMMPS with another code where
|
||||
one code is the "client" and sends request messages to a "server"
|
||||
code. The server responds to each request with a reply message. This
|
||||
enables the two codes to work in tandem to perform a simulation.
|
||||
|
||||
When this command is invoked, LAMMPS will run in server mode in an
|
||||
endless loop, waiting for messages from the client code. The client
|
||||
signals when it is done sending messages to LAMMPS, at which point the
|
||||
loop will exit, and the remainder of the LAMMPS script will be
|
||||
processed.
|
||||
|
||||
The {protocol} argument defines the format and content of messages
|
||||
that will be exchanged between the two codes. The current options
|
||||
are:
|
||||
|
||||
"md"_server_md.html = run dynamics with another code
|
||||
"mc"_server_mc.html = perform Monte Carlo moves with another code :ul
|
||||
|
||||
For protocol {md}, LAMMPS can be either a client (via the "fix
|
||||
client/md"_fix_client_md.html command) or server. See the "server
|
||||
md"_server_md.html doc page for details on the protocol.
|
||||
|
||||
For protocol {mc}, LAMMPS can be the server. See the "server
|
||||
mc"_server_mc.html doc page for details on the protocol.
|
||||
|
||||
:line
|
||||
|
||||
[Restrictions:]
|
||||
|
||||
This command is part of the MESSAGE package. It is only enabled if
|
||||
LAMMPS was built with that package. See the "Build
|
||||
package"_Build_package.html doc page for more info.
|
||||
|
||||
A script that uses this command must also use the
|
||||
"message"_message.html command to setup the messaging protocol with
|
||||
the other client code.
|
||||
|
||||
[Related commands:]
|
||||
|
||||
"message"_message.html, "fix client/md"_fix_client_md.html
|
||||
|
||||
[Default:] none
|
|
@ -0,0 +1,116 @@
|
|||
"LAMMPS WWW Site"_lws - "LAMMPS Documentation"_ld - "LAMMPS Commands"_lc :c
|
||||
|
||||
:link(lws,http://lammps.sandia.gov)
|
||||
:link(ld,Manual.html)
|
||||
:link(lc,Commands_all.html)
|
||||
|
||||
:line
|
||||
|
||||
server mc command :h3
|
||||
|
||||
[Syntax:]
|
||||
|
||||
server mc :pre
|
||||
|
||||
mc = the protocol argument to the "server"_server.html command
|
||||
|
||||
[Examples:]
|
||||
|
||||
server mc :pre
|
||||
|
||||
[Description:]
|
||||
|
||||
This command starts LAMMPS running in "server" mode, where it will
|
||||
expect messages from a separate "client" code that match the {mc}
|
||||
protocol for format and content explained below. For each message
|
||||
LAMMPS receives it will send a message back to the client.
|
||||
|
||||
The "Howto client/server"_Howto_client_server.html doc page gives an
|
||||
overview of client/server coupling of LAMMPS with another code where
|
||||
one code is the "client" and sends request messages to a "server"
|
||||
code. The server responds to each request with a reply message. This
|
||||
enables the two codes to work in tandem to perform a simulation.
|
||||
|
||||
When this command is invoked, LAMMPS will run in server mode in an
|
||||
endless loop, waiting for messages from the client code. The client
|
||||
signals when it is done sending messages to LAMMPS, at which point the
|
||||
loop will exit, and the remainder of the LAMMPS script will be
|
||||
processed.
|
||||
|
||||
The "server"_server.html doc page gives other options for using LAMMPS
|
||||
See an example of how this command is used in
|
||||
examples/COUPLE/lammps_mc/in.server.
|
||||
|
||||
:line
|
||||
|
||||
When using this command, LAMMPS (as the server code) receives
|
||||
instructions from a Monte Carlo (MC) driver to displace random atoms,
|
||||
compute the energy before and after displacement, and run dynamics to
|
||||
equilibrate the system.
|
||||
|
||||
The MC driver performs the random displacements on random atoms,
|
||||
accepts or rejects the move in an MC sense, and orchestrates the MD
|
||||
runs.
|
||||
|
||||
The format and content of the exchanged messages are explained here in
|
||||
a conceptual sense. Python-style pseudo code for the library calls to
|
||||
the CSlib is shown, which performs the actual message exchange between
|
||||
the two codes. See the "CSlib website"_http://cslib.sandia.gov doc
|
||||
pages for more details on the actual library syntax. The "cs" object
|
||||
in this pseudo code is a pointer to an instance of the CSlib.
|
||||
|
||||
See the src/MESSAGE/server_mc.cpp file for details on how LAMMPS uses
|
||||
these messages. See the examples/COUPLE/lammmps_mc/mc.cpp file for an
|
||||
example of how an MC driver code can use these messages.
|
||||
|
||||
Define NATOMS=1, EINIT=2, DISPLACE=3, ACCEPT=4, RUN=5.
|
||||
|
||||
[Client sends one of these kinds of message]:
|
||||
|
||||
cs->send(NATOMS,0) # msgID = 1 with no fields :pre
|
||||
|
||||
cs->send(EINIT,0) # msgID = 2 with no fields :pre
|
||||
|
||||
cs->send(DISPLACE,2) # msgID = 3 with 2 fields
|
||||
cs->pack_int(1,ID) # 1st field = ID of atom to displace
|
||||
cs->pack(2,3,xnew) # 2nd field = new xyz coords of displaced atom :pre
|
||||
|
||||
cs->send(ACCEPT,1) # msgID = 4 with 1 field
|
||||
cs->pack_int(1,flag) # 1st field = accept/reject flag :pre
|
||||
|
||||
cs->send(RUN,1) # msgID = 5 with 1 field
|
||||
cs->pack_int(1,nsteps) # 1st field = # of timesteps to run MD :pre
|
||||
|
||||
[Server replies]:
|
||||
|
||||
cs->send(NATOMS,1) # msgID = 1 with 1 field
|
||||
cs->pack_int(1,natoms) # 1st field = number of atoms :pre
|
||||
|
||||
cs->send(EINIT,2) # msgID = 2 with 2 fields
|
||||
cs->pack_double(1,poteng) # 1st field = potential energy of system
|
||||
cs->pack(2,3*natoms,x) # 2nd field = 3N coords of Natoms :pre
|
||||
|
||||
cs->send(DISPLACE,1) # msgID = 3 with 1 field
|
||||
cs->pack_double(1,poteng) # 1st field = new potential energy of system :pre
|
||||
|
||||
cs->send(ACCEPT,0) # msgID = 4 with no fields :pre
|
||||
|
||||
cs->send(RUN,0) # msgID = 5 with no fields :pre
|
||||
|
||||
:line
|
||||
|
||||
[Restrictions:]
|
||||
|
||||
This command is part of the MESSAGE package. It is only enabled if
|
||||
LAMMPS was built with that package. See the "Build
|
||||
package"_Build_package.html doc page for more info.
|
||||
|
||||
A script that uses this command must also use the
|
||||
"message"_message.html command to setup the messaging protocol with
|
||||
the other client code.
|
||||
|
||||
[Related commands:]
|
||||
|
||||
"message"_message.html
|
||||
|
||||
[Default:] none
|
|
@ -0,0 +1,149 @@
|
|||
"LAMMPS WWW Site"_lws - "LAMMPS Documentation"_ld - "LAMMPS Commands"_lc :c
|
||||
|
||||
:link(lws,http://lammps.sandia.gov)
|
||||
:link(ld,Manual.html)
|
||||
:link(lc,Commands_all.html)
|
||||
|
||||
:line
|
||||
|
||||
server md command :h3
|
||||
|
||||
[Syntax:]
|
||||
|
||||
server md :pre
|
||||
|
||||
md = the protocol argument to the "server"_server.html command
|
||||
|
||||
[Examples:]
|
||||
|
||||
server md :pre
|
||||
|
||||
[Description:]
|
||||
|
||||
This command starts LAMMPS running in "server" mode, where it will
|
||||
expect messages from a separate "client" code that match the {md}
|
||||
protocol for format and content explained below. For each message
|
||||
LAMMPS receives it will send a message back to the client.
|
||||
|
||||
The "Howto client/server"_Howto_client_server.html doc page gives an
|
||||
overview of client/server coupling of LAMMPS with another code where
|
||||
one code is the "client" and sends request messages to a "server"
|
||||
code. The server responds to each request with a reply message. This
|
||||
enables the two codes to work in tandem to perform a simulation.
|
||||
|
||||
When this command is invoked, LAMMPS will run in server mode in an
|
||||
endless loop, waiting for messages from the client code. The client
|
||||
signals when it is done sending messages to LAMMPS, at which point the
|
||||
loop will exit, and the remainder of the LAMMPS script will be
|
||||
processed.
|
||||
|
||||
The "server"_server.html doc page gives other options for using LAMMPS
|
||||
in server mode. See an example of how this command is used in
|
||||
examples/message/in.message.server.
|
||||
|
||||
:line
|
||||
|
||||
When using this command, LAMMPS (as the server code) receives the
|
||||
current coordinates of all particles from the client code each
|
||||
timestep, computes their interaction, and returns the energy, forces,
|
||||
and pressure for the interacting particles to the client code, so it
|
||||
can complete the timestep. This command could also be used with a
|
||||
client code that performs energy minimization, using the server to
|
||||
compute forces and energy each iteration of its minimizer.
|
||||
|
||||
When using the "fix client/md"_fix_client_md.html command, LAMMPS (as
|
||||
the client code) does the timestepping and receives needed energy,
|
||||
forces, and pressure values from the server code.
|
||||
|
||||
The format and content of the exchanged messages are explained here in
|
||||
a conceptual sense. Python-style pseudo code for the library calls to
|
||||
the CSlib is shown, which performs the actual message exchange between
|
||||
the two codes. See the "CSlib website"_http://cslib.sandia.gov doc
|
||||
pages for more details on the actual library syntax. The "cs" object
|
||||
in this pseudo code is a pointer to an instance of the CSlib.
|
||||
|
||||
See the src/MESSAGE/server_md.cpp and src/MESSAGE/fix_client_md.cpp
|
||||
files for details on how LAMMPS uses these messages. See the
|
||||
examples/COUPLE/lammps_vasp/vasp_wrapper.py file for an example of how
|
||||
a quantum code (VASP) can use use these messages.
|
||||
|
||||
The following pseudo-code uses these values, defined as enums.
|
||||
|
||||
Define:
|
||||
|
||||
SETUP=1, STEP=2
|
||||
DIM=1, PERIODICITY=2, ORIGIN=3, BOX=4, NATOMS=5, NTYPES=6, TYPES=7, COORDS=8, UNITS-9, CHARGE=10
|
||||
FORCES=1, ENERGY=2, PRESSURE=3, ERROR=4 :pre
|
||||
|
||||
[Client sends 2 kinds of messages]:
|
||||
|
||||
# required fields: DIM, PERIODICTY, ORIGIN, BOX, NATOMS, NTYPES, TYPES, COORDS
|
||||
# optional fields: UNITS, CHARGE :pre
|
||||
|
||||
cs->send(SETUP,nfields) # msgID with nfields :pre
|
||||
|
||||
cs->pack_int(DIM,dim) # dimension (2,3) of simulation
|
||||
cs->pack(PERIODICITY,3,xyz) # periodicity flags in 3 dims
|
||||
cs->pack(ORIGIN,3,origin) # lower-left corner of simulation box
|
||||
cs->pack(BOX,9,box) # 3 edge vectors of simulation box
|
||||
cs->pack_int(NATOMS,natoms) # total number of atoms
|
||||
cs->pack_int(NTYPES,ntypes) # number of atom types
|
||||
cs->pack(TYPES,natoms,type) # vector of per-atom types
|
||||
cs->pack(COORDS,3*natoms,x) # vector of 3N atom coords
|
||||
cs->pack_string(UNITS,units) # units = "lj", "real", "metal", etc
|
||||
cs->pack(CHARGE,natoms,q) # vector of per-atom charge :pre
|
||||
|
||||
# required fields: COORDS
|
||||
# optional fields: ORIGIN, BOX :pre
|
||||
|
||||
cs->send(STEP,nfields) # msgID with nfields :pre
|
||||
|
||||
cs->pack(COORDS,3*natoms,x) # vector of 3N atom coords
|
||||
cs->pack(ORIGIN,3,origin) # lower-left corner of simulation box
|
||||
cs->pack(BOX,9,box) # 3 edge vectors of simulation box :pre
|
||||
|
||||
[Server replies to either kind of message]:
|
||||
|
||||
# required fields: FORCES, ENERGY, PRESSURE
|
||||
# optional fields: ERROR :pre
|
||||
|
||||
cs->send(msgID,nfields) # msgID with nfields
|
||||
cs->pack(FORCES,3*Natoms,f) # vector of 3N forces on atoms
|
||||
cs->pack(ENERGY,1,poteng) # total potential energy of system
|
||||
cs->pack(PRESSURE,6,press) # global pressure tensor (6-vector)
|
||||
cs->pack_int(ERROR,flag) # server had an error (e.g. DFT non-convergence) :pre
|
||||
|
||||
:line
|
||||
|
||||
The units for various quantities that are sent and received iva
|
||||
messages are defined for atomic-scale simulations in the table below.
|
||||
The client and server codes (including LAMMPS) can use internal units
|
||||
different than these (e.g. "real units"_units.html in LAMMPS), so long
|
||||
as they convert to these units for meesaging.
|
||||
|
||||
COORDS, ORIGIN, BOX = Angstroms
|
||||
CHARGE = multiple of electron charge (1.0 is a proton)
|
||||
ENERGY = eV
|
||||
FORCES = eV/Angstrom
|
||||
PRESSURE = bars :ul
|
||||
|
||||
Note that these are "metal units"_units.html in LAMMPS.
|
||||
|
||||
If you wish to run LAMMPS in another its non-atomic units, e.g. "lj
|
||||
units"_units.html, then the client and server should exchange a UNITS
|
||||
message as indicated above, and both the client and server should
|
||||
agree on the units for the data they exchange.
|
||||
|
||||
:line
|
||||
|
||||
[Restrictions:]
|
||||
|
||||
This command is part of the MESSAGE package. It is only enabled if
|
||||
LAMMPS was built with that package. See the "Build
|
||||
package"_Build_package.html doc page for more info.
|
||||
|
||||
[Related commands:]
|
||||
|
||||
"message"_message.html, "fix client/md"_fix_client_md.html
|
||||
|
||||
[Default:] none
|
|
@ -10,6 +10,7 @@ See these sections of the LAMMPS manaul for details:
|
|||
|
||||
2.5 Building LAMMPS as a library (doc/Section_start.html#start_5)
|
||||
6.10 Coupling LAMMPS to other codes (doc/Section_howto.html#howto_10)
|
||||
6.29 Using LAMMPS in client/server mode (doc/Section_howto.html#howto_29)
|
||||
|
||||
In all of the examples included here, LAMMPS must first be built as a
|
||||
library. Basically, in the src dir you type one of
|
||||
|
@ -33,9 +34,13 @@ These are the sub-directories included in this directory:
|
|||
|
||||
simple simple example of driver code calling LAMMPS as a lib
|
||||
multiple example of driver code calling multiple instances of LAMMPS
|
||||
lammps_mc client/server coupling of Monte Carlo client
|
||||
with LAMMPS server for energy evaluation
|
||||
lammps_quest MD with quantum forces, coupling to Quest DFT code
|
||||
lammps_spparks grain-growth Monte Carlo with strain via MD,
|
||||
coupling to SPPARKS kinetic MC code
|
||||
lammps_vasp client/server coupling of LAMMPS client with
|
||||
VASP quantum DFT as server for quantum forces
|
||||
library collection of useful inter-code communication routines
|
||||
fortran a simple wrapper on the LAMMPS library API that
|
||||
can be called from Fortran
|
||||
|
|
|
@ -0,0 +1,33 @@
|
|||
# Makefile for MC
|
||||
|
||||
SHELL = /bin/sh
|
||||
|
||||
SRC = mc.cpp random_park.cpp
|
||||
OBJ = $(SRC:.cpp=.o)
|
||||
|
||||
# change this line for your machine to path for CSlib src dir
|
||||
|
||||
CSLIB = /home/sjplimp/lammps/lib/message/cslib/src
|
||||
|
||||
# compiler/linker settings
|
||||
|
||||
CC = g++
|
||||
CCFLAGS = -g -O3 -I$(CSLIB)
|
||||
LINK = g++
|
||||
LINKFLAGS = -g -O -L$(CSLIB)
|
||||
|
||||
# targets
|
||||
|
||||
mc: $(OBJ)
|
||||
# first line if built the CSlib within lib/message with ZMQ support
|
||||
# second line if built the CSlib without ZMQ support
|
||||
$(LINK) $(LINKFLAGS) $(OBJ) -lcsnompi -lzmq -o mc
|
||||
# $(LINK) $(LINKFLAGS) $(OBJ) -lcsnompi -o mc
|
||||
|
||||
clean:
|
||||
@rm -f *.o mc
|
||||
|
||||
# rules
|
||||
|
||||
%.o:%.cpp
|
||||
$(CC) $(CCFLAGS) -c $<
|
|
@ -0,0 +1,128 @@
|
|||
Sample Monte Carlo (MC) wrapper on LAMMPS via client/server coupling
|
||||
|
||||
See the MESSAGE package (doc/Section_messages.html#MESSAGE)
|
||||
and Section_howto.html#howto10 for more details on how
|
||||
client/server coupling works in LAMMPS.
|
||||
|
||||
In this dir, the mc.cpp/h files are a standalone "client" MC code. It
|
||||
should be run on a single processor, though it could become a parallel
|
||||
program at some point. LAMMPS is also run as a standalone executable
|
||||
as a "server" on as many processors as desired using its "server mc"
|
||||
command; see it's doc page for details.
|
||||
|
||||
Messages are exchanged between MC and LAMMPS via a client/server
|
||||
library (CSlib), which is included in the LAMMPS distribution in
|
||||
lib/message. As explained below you can choose to exchange data
|
||||
between the two programs either via files or sockets (ZMQ). If the MC
|
||||
program became parallel, data could also be exchanged via MPI.
|
||||
|
||||
The MC code makes simple MC moves, by displacing a single random atom
|
||||
by a small random amount. It uses LAMMPS to calculate the energy
|
||||
change, and to run dynamics between MC moves.
|
||||
|
||||
----------------
|
||||
|
||||
Build LAMMPS with its MESSAGE package installed:
|
||||
|
||||
See the Build extras doc page and its MESSAGE package
|
||||
section for details.
|
||||
|
||||
CMake:
|
||||
|
||||
-D PKG_MESSAGE=yes # include the MESSAGE package
|
||||
-D MESSAGE_ZMQ=value # build with ZeroMQ support, value = no (default) or yes
|
||||
|
||||
Traditional make:
|
||||
|
||||
% cd lammps/lib/message
|
||||
% python Install.py -m -z # build CSlib with MPI and ZMQ support
|
||||
% cd lammps/src
|
||||
% make yes-message
|
||||
% make mpi
|
||||
|
||||
You can leave off the -z if you do not have ZMQ on your system.
|
||||
|
||||
----------------
|
||||
|
||||
Build the MC client code
|
||||
|
||||
The source files for the MC code are in this dir. It links with the
|
||||
CSlib library in lib/message/cslib.
|
||||
|
||||
You must first build the CSlib in serial mode, e.g.
|
||||
|
||||
% cd lammps/lib/message/cslib/src
|
||||
% make lib # build serial and parallel lib with ZMQ support
|
||||
% make lib zmq=no # build serial and parallel lib without ZMQ support
|
||||
|
||||
Then edit the Makefile in this dir. The CSLIB variable should be the
|
||||
path to where the LAMMPS lib/message/cslib/src dir is on your system.
|
||||
If you built the CSlib without ZMQ support you will also need to
|
||||
comment/uncomment one line. Then you can just type
|
||||
|
||||
% make
|
||||
|
||||
and you should get an "mc" executable.
|
||||
|
||||
----------------
|
||||
|
||||
To run in client/server mode:
|
||||
|
||||
Both the client (MC) and server (LAMMPS) must use the same messaging
|
||||
mode, namely file or zmq. This is an argument to the MC code; it can
|
||||
be selected by setting the "mode" variable when you run LAMMPS. The
|
||||
default mode = file.
|
||||
|
||||
Here we assume LAMMPS was built to run in parallel, and the MESSAGE
|
||||
package was installed with socket (ZMQ) support. This means either of
|
||||
the messaging modes can be used and LAMMPS can be run in serial or
|
||||
parallel. The MC code is always run in serial.
|
||||
|
||||
When you run, the server should print out thermodynamic info
|
||||
for every MD run it performs (between MC moves). The client
|
||||
will print nothing until the simulation ends, then it will
|
||||
print stats about the accepted MC moves.
|
||||
|
||||
The examples below are commands you should use in two different
|
||||
terminal windows. The order of the two commands (client or server
|
||||
launch) does not matter. You can run them both in the same window if
|
||||
you append a "&" character to the first one to run it in the
|
||||
background.
|
||||
|
||||
--------------
|
||||
|
||||
File mode of messaging:
|
||||
|
||||
% mpirun -np 1 mc in.mc file tmp.couple
|
||||
% mpirun -np 1 lmp_mpi -v mode file < in.mc.server
|
||||
|
||||
% mpirun -np 1 mc in.mc file tmp.couple
|
||||
% mpirun -np 4 lmp_mpi -v mode file < in.mc.server
|
||||
|
||||
ZMQ mode of messaging:
|
||||
|
||||
% mpirun -np 1 mc in.mc zmq localhost:5555
|
||||
% mpirun -np 1 lmp_mpi -v mode zmq < in.mc.server
|
||||
|
||||
% mpirun -np 1 mc in.mc zmq localhost:5555
|
||||
% mpirun -np 4 lmp_mpi -v mode zmq < in.mc.server
|
||||
|
||||
--------------
|
||||
|
||||
The input script for the MC program is in.mc. You can edit it to run
|
||||
longer simulations.
|
||||
|
||||
500 nsteps = total # of steps of MD
|
||||
100 ndynamics = # of MD steps between MC moves
|
||||
0.1 delta = displacement size of MC move
|
||||
1.0 temperature = used in MC Boltzman factor
|
||||
12345 seed = random number seed
|
||||
|
||||
--------------
|
||||
|
||||
The problem size that LAMMPS is computing the MC energy for and
|
||||
running dynamics on is set by the x,y,z variables in the LAMMPS
|
||||
in.mc.server script. The default size is 500 particles. You can
|
||||
adjust the size as follows:
|
||||
|
||||
lmp_mpi -v x 10 -v y 10 -v z 20 # 8000 particles
|
|
@ -0,0 +1,7 @@
|
|||
# MC params
|
||||
|
||||
500 nsteps
|
||||
100 ndynamics
|
||||
0.1 delta
|
||||
1.0 temperature
|
||||
12345 seed
|
|
@ -0,0 +1,36 @@
|
|||
# 3d Lennard-Jones Monte Carlo server script
|
||||
|
||||
variable mode index file
|
||||
|
||||
if "${mode} == file" then &
|
||||
"message server mc file tmp.couple" &
|
||||
elif "${mode} == zmq" &
|
||||
"message server mc zmq *:5555" &
|
||||
|
||||
variable x index 5
|
||||
variable y index 5
|
||||
variable z index 5
|
||||
|
||||
units lj
|
||||
atom_style atomic
|
||||
atom_modify map yes
|
||||
|
||||
lattice fcc 0.8442
|
||||
region box block 0 $x 0 $y 0 $z
|
||||
create_box 1 box
|
||||
create_atoms 1 box
|
||||
mass 1 1.0
|
||||
|
||||
pair_style lj/cut 2.5
|
||||
pair_coeff 1 1 1.0 1.0 2.5
|
||||
|
||||
neighbor 0.3 bin
|
||||
neigh_modify delay 0 every 20 check no
|
||||
|
||||
velocity all create 1.44 87287 loop geom
|
||||
|
||||
fix 1 all nve
|
||||
|
||||
thermo 50
|
||||
|
||||
server mc
|
|
@ -0,0 +1,254 @@
|
|||
LAMMPS (22 Aug 2018)
|
||||
# 3d Lennard-Jones Monte Carlo server script
|
||||
|
||||
variable mode index file
|
||||
|
||||
if "${mode} == file" then "message server mc file tmp.couple" elif "${mode} == zmq" "message server mc zmq *:5555"
|
||||
message server mc file tmp.couple
|
||||
variable x index 5
|
||||
variable y index 5
|
||||
variable z index 5
|
||||
|
||||
units lj
|
||||
atom_style atomic
|
||||
atom_modify map yes
|
||||
|
||||
lattice fcc 0.8442
|
||||
Lattice spacing in x,y,z = 1.6796 1.6796 1.6796
|
||||
region box block 0 $x 0 $y 0 $z
|
||||
region box block 0 5 0 $y 0 $z
|
||||
region box block 0 5 0 5 0 $z
|
||||
region box block 0 5 0 5 0 5
|
||||
create_box 1 box
|
||||
Created orthogonal box = (0 0 0) to (8.39798 8.39798 8.39798)
|
||||
1 by 1 by 1 MPI processor grid
|
||||
create_atoms 1 box
|
||||
Created 500 atoms
|
||||
Time spent = 0.000649929 secs
|
||||
mass 1 1.0
|
||||
|
||||
pair_style lj/cut 2.5
|
||||
pair_coeff 1 1 1.0 1.0 2.5
|
||||
|
||||
neighbor 0.3 bin
|
||||
neigh_modify delay 0 every 20 check no
|
||||
|
||||
velocity all create 1.44 87287 loop geom
|
||||
|
||||
fix 1 all nve
|
||||
|
||||
thermo 50
|
||||
|
||||
server mc
|
||||
run 0
|
||||
Neighbor list info ...
|
||||
update every 20 steps, delay 0 steps, check no
|
||||
max neighbors/atom: 2000, page size: 100000
|
||||
master list distance cutoff = 2.8
|
||||
ghost atom cutoff = 2.8
|
||||
binsize = 1.4, bins = 6 6 6
|
||||
1 neighbor lists, perpetual/occasional/extra = 1 0 0
|
||||
(1) pair lj/cut, perpetual
|
||||
attributes: half, newton on
|
||||
pair build: half/bin/atomonly/newton
|
||||
stencil: half/bin/3d/newton
|
||||
bin: standard
|
||||
Per MPI rank memory allocation (min/avg/max) = 2.658 | 2.658 | 2.658 Mbytes
|
||||
Step Temp E_pair E_mol TotEng Press
|
||||
0 1.44 -6.7733681 0 -4.6176881 -5.0221006
|
||||
Loop time of 2.14577e-06 on 1 procs for 0 steps with 500 atoms
|
||||
|
||||
93.2% CPU use with 1 MPI tasks x no OpenMP threads
|
||||
|
||||
MPI task timing breakdown:
|
||||
Section | min time | avg time | max time |%varavg| %total
|
||||
---------------------------------------------------------------
|
||||
Pair | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Neigh | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Comm | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Output | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Modify | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Other | | 2.146e-06 | | |100.00
|
||||
|
||||
Nlocal: 500 ave 500 max 500 min
|
||||
Histogram: 1 0 0 0 0 0 0 0 0 0
|
||||
Nghost: 1956 ave 1956 max 1956 min
|
||||
Histogram: 1 0 0 0 0 0 0 0 0 0
|
||||
Neighs: 19500 ave 19500 max 19500 min
|
||||
Histogram: 1 0 0 0 0 0 0 0 0 0
|
||||
|
||||
Total # of neighbors = 19500
|
||||
Ave neighs/atom = 39
|
||||
Neighbor list builds = 0
|
||||
Dangerous builds not checked
|
||||
run 0
|
||||
Per MPI rank memory allocation (min/avg/max) = 2.658 | 2.658 | 2.658 Mbytes
|
||||
Step Temp E_pair E_mol TotEng Press
|
||||
0 1.44 -6.7723127 0 -4.6166327 -5.015531
|
||||
Loop time of 2.14577e-06 on 1 procs for 0 steps with 500 atoms
|
||||
|
||||
93.2% CPU use with 1 MPI tasks x no OpenMP threads
|
||||
|
||||
MPI task timing breakdown:
|
||||
Section | min time | avg time | max time |%varavg| %total
|
||||
---------------------------------------------------------------
|
||||
Pair | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Neigh | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Comm | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Output | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Modify | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Other | | 2.146e-06 | | |100.00
|
||||
|
||||
Nlocal: 500 ave 500 max 500 min
|
||||
Histogram: 1 0 0 0 0 0 0 0 0 0
|
||||
Nghost: 1956 ave 1956 max 1956 min
|
||||
Histogram: 1 0 0 0 0 0 0 0 0 0
|
||||
Neighs: 19501 ave 19501 max 19501 min
|
||||
Histogram: 1 0 0 0 0 0 0 0 0 0
|
||||
|
||||
Total # of neighbors = 19501
|
||||
Ave neighs/atom = 39.002
|
||||
Neighbor list builds = 0
|
||||
Dangerous builds not checked
|
||||
Per MPI rank memory allocation (min/avg/max) = 2.658 | 2.658 | 2.658 Mbytes
|
||||
Step Temp E_pair E_mol TotEng Press
|
||||
0 1.44 -6.7723127 0 -4.6166327 -5.015531
|
||||
50 0.70239211 -5.6763152 0 -4.6248342 0.59544428
|
||||
100 0.7565013 -5.757431 0 -4.6249485 0.21982657
|
||||
run 0
|
||||
Per MPI rank memory allocation (min/avg/max) = 2.658 | 2.658 | 2.658 Mbytes
|
||||
Step Temp E_pair E_mol TotEng Press
|
||||
100 0.7565013 -5.7565768 0 -4.6240944 0.22436405
|
||||
Loop time of 1.90735e-06 on 1 procs for 0 steps with 500 atoms
|
||||
|
||||
157.3% CPU use with 1 MPI tasks x no OpenMP threads
|
||||
|
||||
MPI task timing breakdown:
|
||||
Section | min time | avg time | max time |%varavg| %total
|
||||
---------------------------------------------------------------
|
||||
Pair | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Neigh | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Comm | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Output | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Modify | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Other | | 1.907e-06 | | |100.00
|
||||
|
||||
Nlocal: 500 ave 500 max 500 min
|
||||
Histogram: 1 0 0 0 0 0 0 0 0 0
|
||||
Nghost: 1939 ave 1939 max 1939 min
|
||||
Histogram: 1 0 0 0 0 0 0 0 0 0
|
||||
Neighs: 18757 ave 18757 max 18757 min
|
||||
Histogram: 1 0 0 0 0 0 0 0 0 0
|
||||
|
||||
Total # of neighbors = 18757
|
||||
Ave neighs/atom = 37.514
|
||||
Neighbor list builds = 0
|
||||
Dangerous builds not checked
|
||||
Per MPI rank memory allocation (min/avg/max) = 2.658 | 2.658 | 2.658 Mbytes
|
||||
Step Temp E_pair E_mol TotEng Press
|
||||
100 0.7565013 -5.757431 0 -4.6249485 0.21982657
|
||||
150 0.76110797 -5.7664315 0 -4.6270529 0.16005254
|
||||
200 0.73505651 -5.7266069 0 -4.6262273 0.34189744
|
||||
run 0
|
||||
Per MPI rank memory allocation (min/avg/max) = 2.658 | 2.658 | 2.658 Mbytes
|
||||
Step Temp E_pair E_mol TotEng Press
|
||||
200 0.73505651 -5.7181381 0 -4.6177585 0.37629943
|
||||
Loop time of 2.14577e-06 on 1 procs for 0 steps with 500 atoms
|
||||
|
||||
139.8% CPU use with 1 MPI tasks x no OpenMP threads
|
||||
|
||||
MPI task timing breakdown:
|
||||
Section | min time | avg time | max time |%varavg| %total
|
||||
---------------------------------------------------------------
|
||||
Pair | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Neigh | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Comm | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Output | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Modify | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Other | | 2.146e-06 | | |100.00
|
||||
|
||||
Nlocal: 500 ave 500 max 500 min
|
||||
Histogram: 1 0 0 0 0 0 0 0 0 0
|
||||
Nghost: 1899 ave 1899 max 1899 min
|
||||
Histogram: 1 0 0 0 0 0 0 0 0 0
|
||||
Neighs: 18699 ave 18699 max 18699 min
|
||||
Histogram: 1 0 0 0 0 0 0 0 0 0
|
||||
|
||||
Total # of neighbors = 18699
|
||||
Ave neighs/atom = 37.398
|
||||
Neighbor list builds = 0
|
||||
Dangerous builds not checked
|
||||
Per MPI rank memory allocation (min/avg/max) = 2.658 | 2.658 | 2.658 Mbytes
|
||||
Step Temp E_pair E_mol TotEng Press
|
||||
200 0.73505651 -5.7266069 0 -4.6262273 0.34189744
|
||||
250 0.73052476 -5.7206316 0 -4.627036 0.39287516
|
||||
300 0.76300831 -5.7675007 0 -4.6252773 0.16312925
|
||||
run 0
|
||||
Per MPI rank memory allocation (min/avg/max) = 2.658 | 2.658 | 2.658 Mbytes
|
||||
Step Temp E_pair E_mol TotEng Press
|
||||
300 0.76300831 -5.768304 0 -4.6260806 0.15954325
|
||||
Loop time of 2.14577e-06 on 1 procs for 0 steps with 500 atoms
|
||||
|
||||
139.8% CPU use with 1 MPI tasks x no OpenMP threads
|
||||
|
||||
MPI task timing breakdown:
|
||||
Section | min time | avg time | max time |%varavg| %total
|
||||
---------------------------------------------------------------
|
||||
Pair | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Neigh | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Comm | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Output | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Modify | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Other | | 2.146e-06 | | |100.00
|
||||
|
||||
Nlocal: 500 ave 500 max 500 min
|
||||
Histogram: 1 0 0 0 0 0 0 0 0 0
|
||||
Nghost: 1903 ave 1903 max 1903 min
|
||||
Histogram: 1 0 0 0 0 0 0 0 0 0
|
||||
Neighs: 18715 ave 18715 max 18715 min
|
||||
Histogram: 1 0 0 0 0 0 0 0 0 0
|
||||
|
||||
Total # of neighbors = 18715
|
||||
Ave neighs/atom = 37.43
|
||||
Neighbor list builds = 0
|
||||
Dangerous builds not checked
|
||||
Per MPI rank memory allocation (min/avg/max) = 2.658 | 2.658 | 2.658 Mbytes
|
||||
Step Temp E_pair E_mol TotEng Press
|
||||
300 0.76300831 -5.768304 0 -4.6260806 0.15954325
|
||||
350 0.72993309 -5.7193261 0 -4.6266162 0.3358374
|
||||
400 0.72469448 -5.713463 0 -4.6285954 0.44859547
|
||||
run 0
|
||||
Per MPI rank memory allocation (min/avg/max) = 2.658 | 2.658 | 2.658 Mbytes
|
||||
Step Temp E_pair E_mol TotEng Press
|
||||
400 0.72469448 -5.7077332 0 -4.6228655 0.47669832
|
||||
Loop time of 1.90735e-06 on 1 procs for 0 steps with 500 atoms
|
||||
|
||||
157.3% CPU use with 1 MPI tasks x no OpenMP threads
|
||||
|
||||
MPI task timing breakdown:
|
||||
Section | min time | avg time | max time |%varavg| %total
|
||||
---------------------------------------------------------------
|
||||
Pair | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Neigh | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Comm | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Output | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Modify | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Other | | 1.907e-06 | | |100.00
|
||||
|
||||
Nlocal: 500 ave 500 max 500 min
|
||||
Histogram: 1 0 0 0 0 0 0 0 0 0
|
||||
Nghost: 1899 ave 1899 max 1899 min
|
||||
Histogram: 1 0 0 0 0 0 0 0 0 0
|
||||
Neighs: 18683 ave 18683 max 18683 min
|
||||
Histogram: 1 0 0 0 0 0 0 0 0 0
|
||||
|
||||
Total # of neighbors = 18683
|
||||
Ave neighs/atom = 37.366
|
||||
Neighbor list builds = 0
|
||||
Dangerous builds not checked
|
||||
Per MPI rank memory allocation (min/avg/max) = 2.658 | 2.658 | 2.658 Mbytes
|
||||
Step Temp E_pair E_mol TotEng Press
|
||||
400 0.72469448 -5.713463 0 -4.6285954 0.44859547
|
||||
450 0.75305735 -5.7518283 0 -4.6245015 0.34658587
|
||||
500 0.73092571 -5.7206337 0 -4.6264379 0.43715809
|
||||
Total wall time: 0:00:02
|
|
@ -0,0 +1,254 @@
|
|||
LAMMPS (22 Aug 2018)
|
||||
# 3d Lennard-Jones Monte Carlo server script
|
||||
|
||||
variable mode index file
|
||||
|
||||
if "${mode} == file" then "message server mc file tmp.couple" elif "${mode} == zmq" "message server mc zmq *:5555"
|
||||
message server mc file tmp.couple
|
||||
variable x index 5
|
||||
variable y index 5
|
||||
variable z index 5
|
||||
|
||||
units lj
|
||||
atom_style atomic
|
||||
atom_modify map yes
|
||||
|
||||
lattice fcc 0.8442
|
||||
Lattice spacing in x,y,z = 1.6796 1.6796 1.6796
|
||||
region box block 0 $x 0 $y 0 $z
|
||||
region box block 0 5 0 $y 0 $z
|
||||
region box block 0 5 0 5 0 $z
|
||||
region box block 0 5 0 5 0 5
|
||||
create_box 1 box
|
||||
Created orthogonal box = (0 0 0) to (8.39798 8.39798 8.39798)
|
||||
1 by 2 by 2 MPI processor grid
|
||||
create_atoms 1 box
|
||||
Created 500 atoms
|
||||
Time spent = 0.000592947 secs
|
||||
mass 1 1.0
|
||||
|
||||
pair_style lj/cut 2.5
|
||||
pair_coeff 1 1 1.0 1.0 2.5
|
||||
|
||||
neighbor 0.3 bin
|
||||
neigh_modify delay 0 every 20 check no
|
||||
|
||||
velocity all create 1.44 87287 loop geom
|
||||
|
||||
fix 1 all nve
|
||||
|
||||
thermo 50
|
||||
|
||||
server mc
|
||||
run 0
|
||||
Neighbor list info ...
|
||||
update every 20 steps, delay 0 steps, check no
|
||||
max neighbors/atom: 2000, page size: 100000
|
||||
master list distance cutoff = 2.8
|
||||
ghost atom cutoff = 2.8
|
||||
binsize = 1.4, bins = 6 6 6
|
||||
1 neighbor lists, perpetual/occasional/extra = 1 0 0
|
||||
(1) pair lj/cut, perpetual
|
||||
attributes: half, newton on
|
||||
pair build: half/bin/atomonly/newton
|
||||
stencil: half/bin/3d/newton
|
||||
bin: standard
|
||||
Per MPI rank memory allocation (min/avg/max) = 2.619 | 2.619 | 2.619 Mbytes
|
||||
Step Temp E_pair E_mol TotEng Press
|
||||
0 1.44 -6.7733681 0 -4.6176881 -5.0221006
|
||||
Loop time of 3.8147e-06 on 4 procs for 0 steps with 500 atoms
|
||||
|
||||
59.0% CPU use with 4 MPI tasks x no OpenMP threads
|
||||
|
||||
MPI task timing breakdown:
|
||||
Section | min time | avg time | max time |%varavg| %total
|
||||
---------------------------------------------------------------
|
||||
Pair | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Neigh | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Comm | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Output | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Modify | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Other | | 3.815e-06 | | |100.00
|
||||
|
||||
Nlocal: 125 ave 125 max 125 min
|
||||
Histogram: 4 0 0 0 0 0 0 0 0 0
|
||||
Nghost: 1099 ave 1099 max 1099 min
|
||||
Histogram: 4 0 0 0 0 0 0 0 0 0
|
||||
Neighs: 4875 ave 4875 max 4875 min
|
||||
Histogram: 4 0 0 0 0 0 0 0 0 0
|
||||
|
||||
Total # of neighbors = 19500
|
||||
Ave neighs/atom = 39
|
||||
Neighbor list builds = 0
|
||||
Dangerous builds not checked
|
||||
run 0
|
||||
Per MPI rank memory allocation (min/avg/max) = 2.619 | 2.619 | 2.619 Mbytes
|
||||
Step Temp E_pair E_mol TotEng Press
|
||||
0 1.44 -6.7723127 0 -4.6166327 -5.015531
|
||||
Loop time of 3.03984e-06 on 4 procs for 0 steps with 500 atoms
|
||||
|
||||
106.9% CPU use with 4 MPI tasks x no OpenMP threads
|
||||
|
||||
MPI task timing breakdown:
|
||||
Section | min time | avg time | max time |%varavg| %total
|
||||
---------------------------------------------------------------
|
||||
Pair | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Neigh | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Comm | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Output | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Modify | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Other | | 3.04e-06 | | |100.00
|
||||
|
||||
Nlocal: 125 ave 125 max 125 min
|
||||
Histogram: 4 0 0 0 0 0 0 0 0 0
|
||||
Nghost: 1099 ave 1099 max 1099 min
|
||||
Histogram: 4 0 0 0 0 0 0 0 0 0
|
||||
Neighs: 4875.25 ave 4885 max 4866 min
|
||||
Histogram: 1 0 0 0 2 0 0 0 0 1
|
||||
|
||||
Total # of neighbors = 19501
|
||||
Ave neighs/atom = 39.002
|
||||
Neighbor list builds = 0
|
||||
Dangerous builds not checked
|
||||
Per MPI rank memory allocation (min/avg/max) = 2.619 | 2.619 | 2.619 Mbytes
|
||||
Step Temp E_pair E_mol TotEng Press
|
||||
0 1.44 -6.7723127 0 -4.6166327 -5.015531
|
||||
50 0.70210225 -5.6759068 0 -4.6248598 0.59609192
|
||||
100 0.75891559 -5.7611234 0 -4.6250267 0.20841608
|
||||
run 0
|
||||
Per MPI rank memory allocation (min/avg/max) = 2.619 | 2.619 | 2.619 Mbytes
|
||||
Step Temp E_pair E_mol TotEng Press
|
||||
100 0.75891559 -5.7609392 0 -4.6248426 0.20981291
|
||||
Loop time of 3.75509e-06 on 4 procs for 0 steps with 500 atoms
|
||||
|
||||
113.2% CPU use with 4 MPI tasks x no OpenMP threads
|
||||
|
||||
MPI task timing breakdown:
|
||||
Section | min time | avg time | max time |%varavg| %total
|
||||
---------------------------------------------------------------
|
||||
Pair | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Neigh | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Comm | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Output | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Modify | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Other | | 3.755e-06 | | |100.00
|
||||
|
||||
Nlocal: 125 ave 126 max 124 min
|
||||
Histogram: 2 0 0 0 0 0 0 0 0 2
|
||||
Nghost: 1085.25 ave 1089 max 1079 min
|
||||
Histogram: 1 0 0 0 0 1 0 0 0 2
|
||||
Neighs: 4690.25 ave 4996 max 4401 min
|
||||
Histogram: 1 0 0 1 0 1 0 0 0 1
|
||||
|
||||
Total # of neighbors = 18761
|
||||
Ave neighs/atom = 37.522
|
||||
Neighbor list builds = 0
|
||||
Dangerous builds not checked
|
||||
Per MPI rank memory allocation (min/avg/max) = 2.619 | 2.619 | 2.619 Mbytes
|
||||
Step Temp E_pair E_mol TotEng Press
|
||||
100 0.75891559 -5.7609392 0 -4.6248426 0.20981291
|
||||
150 0.75437991 -5.7558622 0 -4.6265555 0.20681722
|
||||
200 0.73111257 -5.7193748 0 -4.6248993 0.35230715
|
||||
run 0
|
||||
Per MPI rank memory allocation (min/avg/max) = 2.619 | 2.619 | 2.619 Mbytes
|
||||
Step Temp E_pair E_mol TotEng Press
|
||||
200 0.73111257 -5.7143906 0 -4.6199151 0.37126023
|
||||
Loop time of 2.563e-06 on 4 procs for 0 steps with 500 atoms
|
||||
|
||||
117.1% CPU use with 4 MPI tasks x no OpenMP threads
|
||||
|
||||
MPI task timing breakdown:
|
||||
Section | min time | avg time | max time |%varavg| %total
|
||||
---------------------------------------------------------------
|
||||
Pair | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Neigh | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Comm | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Output | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Modify | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Other | | 2.563e-06 | | |100.00
|
||||
|
||||
Nlocal: 125 ave 126 max 123 min
|
||||
Histogram: 1 0 0 0 0 0 1 0 0 2
|
||||
Nghost: 1068.5 ave 1076 max 1063 min
|
||||
Histogram: 2 0 0 0 0 0 1 0 0 1
|
||||
Neighs: 4674.75 ave 4938 max 4419 min
|
||||
Histogram: 1 0 0 0 1 1 0 0 0 1
|
||||
|
||||
Total # of neighbors = 18699
|
||||
Ave neighs/atom = 37.398
|
||||
Neighbor list builds = 0
|
||||
Dangerous builds not checked
|
||||
Per MPI rank memory allocation (min/avg/max) = 2.619 | 2.619 | 2.619 Mbytes
|
||||
Step Temp E_pair E_mol TotEng Press
|
||||
200 0.73111257 -5.7193748 0 -4.6248993 0.35230715
|
||||
250 0.73873144 -5.7312505 0 -4.6253696 0.33061033
|
||||
300 0.76392796 -5.7719207 0 -4.6283206 0.18197874
|
||||
run 0
|
||||
Per MPI rank memory allocation (min/avg/max) = 2.619 | 2.619 | 2.619 Mbytes
|
||||
Step Temp E_pair E_mol TotEng Press
|
||||
300 0.76392796 -5.7725589 0 -4.6289588 0.17994628
|
||||
Loop time of 3.99351e-06 on 4 procs for 0 steps with 500 atoms
|
||||
|
||||
93.9% CPU use with 4 MPI tasks x no OpenMP threads
|
||||
|
||||
MPI task timing breakdown:
|
||||
Section | min time | avg time | max time |%varavg| %total
|
||||
---------------------------------------------------------------
|
||||
Pair | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Neigh | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Comm | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Output | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Modify | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Other | | 3.994e-06 | | |100.00
|
||||
|
||||
Nlocal: 125 ave 128 max 121 min
|
||||
Histogram: 1 0 0 0 0 1 0 1 0 1
|
||||
Nghost: 1069 ave 1080 max 1055 min
|
||||
Histogram: 1 0 0 0 0 0 2 0 0 1
|
||||
Neighs: 4672 ave 4803 max 4600 min
|
||||
Histogram: 2 0 0 1 0 0 0 0 0 1
|
||||
|
||||
Total # of neighbors = 18688
|
||||
Ave neighs/atom = 37.376
|
||||
Neighbor list builds = 0
|
||||
Dangerous builds not checked
|
||||
Per MPI rank memory allocation (min/avg/max) = 2.619 | 2.619 | 2.619 Mbytes
|
||||
Step Temp E_pair E_mol TotEng Press
|
||||
300 0.76392796 -5.7725589 0 -4.6289588 0.17994628
|
||||
350 0.71953041 -5.7041632 0 -4.6270261 0.44866153
|
||||
400 0.7319047 -5.7216051 0 -4.6259438 0.46321355
|
||||
run 0
|
||||
Per MPI rank memory allocation (min/avg/max) = 2.619 | 2.619 | 2.619 Mbytes
|
||||
Step Temp E_pair E_mol TotEng Press
|
||||
400 0.7319047 -5.7158168 0 -4.6201554 0.49192039
|
||||
Loop time of 3.57628e-06 on 4 procs for 0 steps with 500 atoms
|
||||
|
||||
111.8% CPU use with 4 MPI tasks x no OpenMP threads
|
||||
|
||||
MPI task timing breakdown:
|
||||
Section | min time | avg time | max time |%varavg| %total
|
||||
---------------------------------------------------------------
|
||||
Pair | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Neigh | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Comm | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Output | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Modify | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Other | | 3.576e-06 | | |100.00
|
||||
|
||||
Nlocal: 125 ave 132 max 118 min
|
||||
Histogram: 1 0 0 0 0 2 0 0 0 1
|
||||
Nghost: 1057.5 ave 1068 max 1049 min
|
||||
Histogram: 1 0 0 1 1 0 0 0 0 1
|
||||
Neighs: 4685.75 ave 5045 max 4229 min
|
||||
Histogram: 1 0 0 1 0 0 0 0 0 2
|
||||
|
||||
Total # of neighbors = 18743
|
||||
Ave neighs/atom = 37.486
|
||||
Neighbor list builds = 0
|
||||
Dangerous builds not checked
|
||||
Per MPI rank memory allocation (min/avg/max) = 2.619 | 2.619 | 2.619 Mbytes
|
||||
Step Temp E_pair E_mol TotEng Press
|
||||
400 0.7319047 -5.7216051 0 -4.6259438 0.46321355
|
||||
450 0.74503154 -5.7405318 0 -4.6252196 0.33211879
|
||||
500 0.70570501 -5.6824439 0 -4.6260035 0.62020788
|
||||
Total wall time: 0:00:02
|
|
@ -0,0 +1,254 @@
|
|||
LAMMPS (22 Aug 2018)
|
||||
# 3d Lennard-Jones Monte Carlo server script
|
||||
|
||||
variable mode index file
|
||||
|
||||
if "${mode} == file" then "message server mc file tmp.couple" elif "${mode} == zmq" "message server mc zmq *:5555"
|
||||
message server mc zmq *:5555
|
||||
variable x index 5
|
||||
variable y index 5
|
||||
variable z index 5
|
||||
|
||||
units lj
|
||||
atom_style atomic
|
||||
atom_modify map yes
|
||||
|
||||
lattice fcc 0.8442
|
||||
Lattice spacing in x,y,z = 1.6796 1.6796 1.6796
|
||||
region box block 0 $x 0 $y 0 $z
|
||||
region box block 0 5 0 $y 0 $z
|
||||
region box block 0 5 0 5 0 $z
|
||||
region box block 0 5 0 5 0 5
|
||||
create_box 1 box
|
||||
Created orthogonal box = (0 0 0) to (8.39798 8.39798 8.39798)
|
||||
1 by 1 by 1 MPI processor grid
|
||||
create_atoms 1 box
|
||||
Created 500 atoms
|
||||
Time spent = 0.000741005 secs
|
||||
mass 1 1.0
|
||||
|
||||
pair_style lj/cut 2.5
|
||||
pair_coeff 1 1 1.0 1.0 2.5
|
||||
|
||||
neighbor 0.3 bin
|
||||
neigh_modify delay 0 every 20 check no
|
||||
|
||||
velocity all create 1.44 87287 loop geom
|
||||
|
||||
fix 1 all nve
|
||||
|
||||
thermo 50
|
||||
|
||||
server mc
|
||||
run 0
|
||||
Neighbor list info ...
|
||||
update every 20 steps, delay 0 steps, check no
|
||||
max neighbors/atom: 2000, page size: 100000
|
||||
master list distance cutoff = 2.8
|
||||
ghost atom cutoff = 2.8
|
||||
binsize = 1.4, bins = 6 6 6
|
||||
1 neighbor lists, perpetual/occasional/extra = 1 0 0
|
||||
(1) pair lj/cut, perpetual
|
||||
attributes: half, newton on
|
||||
pair build: half/bin/atomonly/newton
|
||||
stencil: half/bin/3d/newton
|
||||
bin: standard
|
||||
Per MPI rank memory allocation (min/avg/max) = 2.658 | 2.658 | 2.658 Mbytes
|
||||
Step Temp E_pair E_mol TotEng Press
|
||||
0 1.44 -6.7733681 0 -4.6176881 -5.0221006
|
||||
Loop time of 1.90735e-06 on 1 procs for 0 steps with 500 atoms
|
||||
|
||||
52.4% CPU use with 1 MPI tasks x no OpenMP threads
|
||||
|
||||
MPI task timing breakdown:
|
||||
Section | min time | avg time | max time |%varavg| %total
|
||||
---------------------------------------------------------------
|
||||
Pair | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Neigh | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Comm | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Output | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Modify | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Other | | 1.907e-06 | | |100.00
|
||||
|
||||
Nlocal: 500 ave 500 max 500 min
|
||||
Histogram: 1 0 0 0 0 0 0 0 0 0
|
||||
Nghost: 1956 ave 1956 max 1956 min
|
||||
Histogram: 1 0 0 0 0 0 0 0 0 0
|
||||
Neighs: 19500 ave 19500 max 19500 min
|
||||
Histogram: 1 0 0 0 0 0 0 0 0 0
|
||||
|
||||
Total # of neighbors = 19500
|
||||
Ave neighs/atom = 39
|
||||
Neighbor list builds = 0
|
||||
Dangerous builds not checked
|
||||
run 0
|
||||
Per MPI rank memory allocation (min/avg/max) = 2.658 | 2.658 | 2.658 Mbytes
|
||||
Step Temp E_pair E_mol TotEng Press
|
||||
0 1.44 -6.7723127 0 -4.6166327 -5.015531
|
||||
Loop time of 1.90735e-06 on 1 procs for 0 steps with 500 atoms
|
||||
|
||||
52.4% CPU use with 1 MPI tasks x no OpenMP threads
|
||||
|
||||
MPI task timing breakdown:
|
||||
Section | min time | avg time | max time |%varavg| %total
|
||||
---------------------------------------------------------------
|
||||
Pair | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Neigh | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Comm | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Output | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Modify | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Other | | 1.907e-06 | | |100.00
|
||||
|
||||
Nlocal: 500 ave 500 max 500 min
|
||||
Histogram: 1 0 0 0 0 0 0 0 0 0
|
||||
Nghost: 1956 ave 1956 max 1956 min
|
||||
Histogram: 1 0 0 0 0 0 0 0 0 0
|
||||
Neighs: 19501 ave 19501 max 19501 min
|
||||
Histogram: 1 0 0 0 0 0 0 0 0 0
|
||||
|
||||
Total # of neighbors = 19501
|
||||
Ave neighs/atom = 39.002
|
||||
Neighbor list builds = 0
|
||||
Dangerous builds not checked
|
||||
Per MPI rank memory allocation (min/avg/max) = 2.658 | 2.658 | 2.658 Mbytes
|
||||
Step Temp E_pair E_mol TotEng Press
|
||||
0 1.44 -6.7723127 0 -4.6166327 -5.015531
|
||||
50 0.70239211 -5.6763152 0 -4.6248342 0.59544428
|
||||
100 0.7565013 -5.757431 0 -4.6249485 0.21982657
|
||||
run 0
|
||||
Per MPI rank memory allocation (min/avg/max) = 2.658 | 2.658 | 2.658 Mbytes
|
||||
Step Temp E_pair E_mol TotEng Press
|
||||
100 0.7565013 -5.7565768 0 -4.6240944 0.22436405
|
||||
Loop time of 1.19209e-06 on 1 procs for 0 steps with 500 atoms
|
||||
|
||||
83.9% CPU use with 1 MPI tasks x no OpenMP threads
|
||||
|
||||
MPI task timing breakdown:
|
||||
Section | min time | avg time | max time |%varavg| %total
|
||||
---------------------------------------------------------------
|
||||
Pair | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Neigh | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Comm | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Output | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Modify | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Other | | 1.192e-06 | | |100.00
|
||||
|
||||
Nlocal: 500 ave 500 max 500 min
|
||||
Histogram: 1 0 0 0 0 0 0 0 0 0
|
||||
Nghost: 1939 ave 1939 max 1939 min
|
||||
Histogram: 1 0 0 0 0 0 0 0 0 0
|
||||
Neighs: 18757 ave 18757 max 18757 min
|
||||
Histogram: 1 0 0 0 0 0 0 0 0 0
|
||||
|
||||
Total # of neighbors = 18757
|
||||
Ave neighs/atom = 37.514
|
||||
Neighbor list builds = 0
|
||||
Dangerous builds not checked
|
||||
Per MPI rank memory allocation (min/avg/max) = 2.658 | 2.658 | 2.658 Mbytes
|
||||
Step Temp E_pair E_mol TotEng Press
|
||||
100 0.7565013 -5.757431 0 -4.6249485 0.21982657
|
||||
150 0.76110797 -5.7664315 0 -4.6270529 0.16005254
|
||||
200 0.73505651 -5.7266069 0 -4.6262273 0.34189744
|
||||
run 0
|
||||
Per MPI rank memory allocation (min/avg/max) = 2.658 | 2.658 | 2.658 Mbytes
|
||||
Step Temp E_pair E_mol TotEng Press
|
||||
200 0.73505651 -5.7181381 0 -4.6177585 0.37629943
|
||||
Loop time of 9.53674e-07 on 1 procs for 0 steps with 500 atoms
|
||||
|
||||
209.7% CPU use with 1 MPI tasks x no OpenMP threads
|
||||
|
||||
MPI task timing breakdown:
|
||||
Section | min time | avg time | max time |%varavg| %total
|
||||
---------------------------------------------------------------
|
||||
Pair | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Neigh | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Comm | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Output | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Modify | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Other | | 9.537e-07 | | |100.00
|
||||
|
||||
Nlocal: 500 ave 500 max 500 min
|
||||
Histogram: 1 0 0 0 0 0 0 0 0 0
|
||||
Nghost: 1899 ave 1899 max 1899 min
|
||||
Histogram: 1 0 0 0 0 0 0 0 0 0
|
||||
Neighs: 18699 ave 18699 max 18699 min
|
||||
Histogram: 1 0 0 0 0 0 0 0 0 0
|
||||
|
||||
Total # of neighbors = 18699
|
||||
Ave neighs/atom = 37.398
|
||||
Neighbor list builds = 0
|
||||
Dangerous builds not checked
|
||||
Per MPI rank memory allocation (min/avg/max) = 2.658 | 2.658 | 2.658 Mbytes
|
||||
Step Temp E_pair E_mol TotEng Press
|
||||
200 0.73505651 -5.7266069 0 -4.6262273 0.34189744
|
||||
250 0.73052476 -5.7206316 0 -4.627036 0.39287516
|
||||
300 0.76300831 -5.7675007 0 -4.6252773 0.16312925
|
||||
run 0
|
||||
Per MPI rank memory allocation (min/avg/max) = 2.658 | 2.658 | 2.658 Mbytes
|
||||
Step Temp E_pair E_mol TotEng Press
|
||||
300 0.76300831 -5.768304 0 -4.6260806 0.15954325
|
||||
Loop time of 9.53674e-07 on 1 procs for 0 steps with 500 atoms
|
||||
|
||||
104.9% CPU use with 1 MPI tasks x no OpenMP threads
|
||||
|
||||
MPI task timing breakdown:
|
||||
Section | min time | avg time | max time |%varavg| %total
|
||||
---------------------------------------------------------------
|
||||
Pair | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Neigh | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Comm | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Output | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Modify | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Other | | 9.537e-07 | | |100.00
|
||||
|
||||
Nlocal: 500 ave 500 max 500 min
|
||||
Histogram: 1 0 0 0 0 0 0 0 0 0
|
||||
Nghost: 1903 ave 1903 max 1903 min
|
||||
Histogram: 1 0 0 0 0 0 0 0 0 0
|
||||
Neighs: 18715 ave 18715 max 18715 min
|
||||
Histogram: 1 0 0 0 0 0 0 0 0 0
|
||||
|
||||
Total # of neighbors = 18715
|
||||
Ave neighs/atom = 37.43
|
||||
Neighbor list builds = 0
|
||||
Dangerous builds not checked
|
||||
Per MPI rank memory allocation (min/avg/max) = 2.658 | 2.658 | 2.658 Mbytes
|
||||
Step Temp E_pair E_mol TotEng Press
|
||||
300 0.76300831 -5.768304 0 -4.6260806 0.15954325
|
||||
350 0.72993309 -5.7193261 0 -4.6266162 0.3358374
|
||||
400 0.72469448 -5.713463 0 -4.6285954 0.44859547
|
||||
run 0
|
||||
Per MPI rank memory allocation (min/avg/max) = 2.658 | 2.658 | 2.658 Mbytes
|
||||
Step Temp E_pair E_mol TotEng Press
|
||||
400 0.72469448 -5.7077332 0 -4.6228655 0.47669832
|
||||
Loop time of 9.53674e-07 on 1 procs for 0 steps with 500 atoms
|
||||
|
||||
209.7% CPU use with 1 MPI tasks x no OpenMP threads
|
||||
|
||||
MPI task timing breakdown:
|
||||
Section | min time | avg time | max time |%varavg| %total
|
||||
---------------------------------------------------------------
|
||||
Pair | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Neigh | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Comm | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Output | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Modify | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Other | | 9.537e-07 | | |100.00
|
||||
|
||||
Nlocal: 500 ave 500 max 500 min
|
||||
Histogram: 1 0 0 0 0 0 0 0 0 0
|
||||
Nghost: 1899 ave 1899 max 1899 min
|
||||
Histogram: 1 0 0 0 0 0 0 0 0 0
|
||||
Neighs: 18683 ave 18683 max 18683 min
|
||||
Histogram: 1 0 0 0 0 0 0 0 0 0
|
||||
|
||||
Total # of neighbors = 18683
|
||||
Ave neighs/atom = 37.366
|
||||
Neighbor list builds = 0
|
||||
Dangerous builds not checked
|
||||
Per MPI rank memory allocation (min/avg/max) = 2.658 | 2.658 | 2.658 Mbytes
|
||||
Step Temp E_pair E_mol TotEng Press
|
||||
400 0.72469448 -5.713463 0 -4.6285954 0.44859547
|
||||
450 0.75305735 -5.7518283 0 -4.6245015 0.34658587
|
||||
500 0.73092571 -5.7206337 0 -4.6264379 0.43715809
|
||||
Total wall time: 0:00:00
|
|
@ -0,0 +1,254 @@
|
|||
LAMMPS (22 Aug 2018)
|
||||
# 3d Lennard-Jones Monte Carlo server script
|
||||
|
||||
variable mode index file
|
||||
|
||||
if "${mode} == file" then "message server mc file tmp.couple" elif "${mode} == zmq" "message server mc zmq *:5555"
|
||||
message server mc zmq *:5555
|
||||
variable x index 5
|
||||
variable y index 5
|
||||
variable z index 5
|
||||
|
||||
units lj
|
||||
atom_style atomic
|
||||
atom_modify map yes
|
||||
|
||||
lattice fcc 0.8442
|
||||
Lattice spacing in x,y,z = 1.6796 1.6796 1.6796
|
||||
region box block 0 $x 0 $y 0 $z
|
||||
region box block 0 5 0 $y 0 $z
|
||||
region box block 0 5 0 5 0 $z
|
||||
region box block 0 5 0 5 0 5
|
||||
create_box 1 box
|
||||
Created orthogonal box = (0 0 0) to (8.39798 8.39798 8.39798)
|
||||
1 by 2 by 2 MPI processor grid
|
||||
create_atoms 1 box
|
||||
Created 500 atoms
|
||||
Time spent = 0.000576019 secs
|
||||
mass 1 1.0
|
||||
|
||||
pair_style lj/cut 2.5
|
||||
pair_coeff 1 1 1.0 1.0 2.5
|
||||
|
||||
neighbor 0.3 bin
|
||||
neigh_modify delay 0 every 20 check no
|
||||
|
||||
velocity all create 1.44 87287 loop geom
|
||||
|
||||
fix 1 all nve
|
||||
|
||||
thermo 50
|
||||
|
||||
server mc
|
||||
run 0
|
||||
Neighbor list info ...
|
||||
update every 20 steps, delay 0 steps, check no
|
||||
max neighbors/atom: 2000, page size: 100000
|
||||
master list distance cutoff = 2.8
|
||||
ghost atom cutoff = 2.8
|
||||
binsize = 1.4, bins = 6 6 6
|
||||
1 neighbor lists, perpetual/occasional/extra = 1 0 0
|
||||
(1) pair lj/cut, perpetual
|
||||
attributes: half, newton on
|
||||
pair build: half/bin/atomonly/newton
|
||||
stencil: half/bin/3d/newton
|
||||
bin: standard
|
||||
Per MPI rank memory allocation (min/avg/max) = 2.619 | 2.619 | 2.619 Mbytes
|
||||
Step Temp E_pair E_mol TotEng Press
|
||||
0 1.44 -6.7733681 0 -4.6176881 -5.0221006
|
||||
Loop time of 4.76837e-06 on 4 procs for 0 steps with 500 atoms
|
||||
|
||||
89.1% CPU use with 4 MPI tasks x no OpenMP threads
|
||||
|
||||
MPI task timing breakdown:
|
||||
Section | min time | avg time | max time |%varavg| %total
|
||||
---------------------------------------------------------------
|
||||
Pair | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Neigh | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Comm | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Output | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Modify | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Other | | 4.768e-06 | | |100.00
|
||||
|
||||
Nlocal: 125 ave 125 max 125 min
|
||||
Histogram: 4 0 0 0 0 0 0 0 0 0
|
||||
Nghost: 1099 ave 1099 max 1099 min
|
||||
Histogram: 4 0 0 0 0 0 0 0 0 0
|
||||
Neighs: 4875 ave 4875 max 4875 min
|
||||
Histogram: 4 0 0 0 0 0 0 0 0 0
|
||||
|
||||
Total # of neighbors = 19500
|
||||
Ave neighs/atom = 39
|
||||
Neighbor list builds = 0
|
||||
Dangerous builds not checked
|
||||
run 0
|
||||
Per MPI rank memory allocation (min/avg/max) = 2.619 | 2.619 | 2.619 Mbytes
|
||||
Step Temp E_pair E_mol TotEng Press
|
||||
0 1.44 -6.7723127 0 -4.6166327 -5.015531
|
||||
Loop time of 3.45707e-06 on 4 procs for 0 steps with 500 atoms
|
||||
|
||||
94.0% CPU use with 4 MPI tasks x no OpenMP threads
|
||||
|
||||
MPI task timing breakdown:
|
||||
Section | min time | avg time | max time |%varavg| %total
|
||||
---------------------------------------------------------------
|
||||
Pair | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Neigh | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Comm | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Output | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Modify | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Other | | 3.457e-06 | | |100.00
|
||||
|
||||
Nlocal: 125 ave 125 max 125 min
|
||||
Histogram: 4 0 0 0 0 0 0 0 0 0
|
||||
Nghost: 1099 ave 1099 max 1099 min
|
||||
Histogram: 4 0 0 0 0 0 0 0 0 0
|
||||
Neighs: 4875.25 ave 4885 max 4866 min
|
||||
Histogram: 1 0 0 0 2 0 0 0 0 1
|
||||
|
||||
Total # of neighbors = 19501
|
||||
Ave neighs/atom = 39.002
|
||||
Neighbor list builds = 0
|
||||
Dangerous builds not checked
|
||||
Per MPI rank memory allocation (min/avg/max) = 2.619 | 2.619 | 2.619 Mbytes
|
||||
Step Temp E_pair E_mol TotEng Press
|
||||
0 1.44 -6.7723127 0 -4.6166327 -5.015531
|
||||
50 0.70210225 -5.6759068 0 -4.6248598 0.59609192
|
||||
100 0.75891559 -5.7611234 0 -4.6250267 0.20841608
|
||||
run 0
|
||||
Per MPI rank memory allocation (min/avg/max) = 2.619 | 2.619 | 2.619 Mbytes
|
||||
Step Temp E_pair E_mol TotEng Press
|
||||
100 0.75891559 -5.7609392 0 -4.6248426 0.20981291
|
||||
Loop time of 3.03984e-06 on 4 procs for 0 steps with 500 atoms
|
||||
|
||||
115.1% CPU use with 4 MPI tasks x no OpenMP threads
|
||||
|
||||
MPI task timing breakdown:
|
||||
Section | min time | avg time | max time |%varavg| %total
|
||||
---------------------------------------------------------------
|
||||
Pair | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Neigh | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Comm | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Output | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Modify | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Other | | 3.04e-06 | | |100.00
|
||||
|
||||
Nlocal: 125 ave 126 max 124 min
|
||||
Histogram: 2 0 0 0 0 0 0 0 0 2
|
||||
Nghost: 1085.25 ave 1089 max 1079 min
|
||||
Histogram: 1 0 0 0 0 1 0 0 0 2
|
||||
Neighs: 4690.25 ave 4996 max 4401 min
|
||||
Histogram: 1 0 0 1 0 1 0 0 0 1
|
||||
|
||||
Total # of neighbors = 18761
|
||||
Ave neighs/atom = 37.522
|
||||
Neighbor list builds = 0
|
||||
Dangerous builds not checked
|
||||
Per MPI rank memory allocation (min/avg/max) = 2.619 | 2.619 | 2.619 Mbytes
|
||||
Step Temp E_pair E_mol TotEng Press
|
||||
100 0.75891559 -5.7609392 0 -4.6248426 0.20981291
|
||||
150 0.75437991 -5.7558622 0 -4.6265555 0.20681722
|
||||
200 0.73111257 -5.7193748 0 -4.6248993 0.35230715
|
||||
run 0
|
||||
Per MPI rank memory allocation (min/avg/max) = 2.619 | 2.619 | 2.619 Mbytes
|
||||
Step Temp E_pair E_mol TotEng Press
|
||||
200 0.73111257 -5.7143906 0 -4.6199151 0.37126023
|
||||
Loop time of 2.38419e-06 on 4 procs for 0 steps with 500 atoms
|
||||
|
||||
125.8% CPU use with 4 MPI tasks x no OpenMP threads
|
||||
|
||||
MPI task timing breakdown:
|
||||
Section | min time | avg time | max time |%varavg| %total
|
||||
---------------------------------------------------------------
|
||||
Pair | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Neigh | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Comm | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Output | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Modify | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Other | | 2.384e-06 | | |100.00
|
||||
|
||||
Nlocal: 125 ave 126 max 123 min
|
||||
Histogram: 1 0 0 0 0 0 1 0 0 2
|
||||
Nghost: 1068.5 ave 1076 max 1063 min
|
||||
Histogram: 2 0 0 0 0 0 1 0 0 1
|
||||
Neighs: 4674.75 ave 4938 max 4419 min
|
||||
Histogram: 1 0 0 0 1 1 0 0 0 1
|
||||
|
||||
Total # of neighbors = 18699
|
||||
Ave neighs/atom = 37.398
|
||||
Neighbor list builds = 0
|
||||
Dangerous builds not checked
|
||||
Per MPI rank memory allocation (min/avg/max) = 2.619 | 2.619 | 2.619 Mbytes
|
||||
Step Temp E_pair E_mol TotEng Press
|
||||
200 0.73111257 -5.7193748 0 -4.6248993 0.35230715
|
||||
250 0.73873144 -5.7312505 0 -4.6253696 0.33061033
|
||||
300 0.76392796 -5.7719207 0 -4.6283206 0.18197874
|
||||
run 0
|
||||
Per MPI rank memory allocation (min/avg/max) = 2.619 | 2.619 | 2.619 Mbytes
|
||||
Step Temp E_pair E_mol TotEng Press
|
||||
300 0.76392796 -5.7725589 0 -4.6289588 0.17994628
|
||||
Loop time of 2.44379e-06 on 4 procs for 0 steps with 500 atoms
|
||||
|
||||
112.5% CPU use with 4 MPI tasks x no OpenMP threads
|
||||
|
||||
MPI task timing breakdown:
|
||||
Section | min time | avg time | max time |%varavg| %total
|
||||
---------------------------------------------------------------
|
||||
Pair | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Neigh | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Comm | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Output | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Modify | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Other | | 2.444e-06 | | |100.00
|
||||
|
||||
Nlocal: 125 ave 128 max 121 min
|
||||
Histogram: 1 0 0 0 0 1 0 1 0 1
|
||||
Nghost: 1069 ave 1080 max 1055 min
|
||||
Histogram: 1 0 0 0 0 0 2 0 0 1
|
||||
Neighs: 4672 ave 4803 max 4600 min
|
||||
Histogram: 2 0 0 1 0 0 0 0 0 1
|
||||
|
||||
Total # of neighbors = 18688
|
||||
Ave neighs/atom = 37.376
|
||||
Neighbor list builds = 0
|
||||
Dangerous builds not checked
|
||||
Per MPI rank memory allocation (min/avg/max) = 2.619 | 2.619 | 2.619 Mbytes
|
||||
Step Temp E_pair E_mol TotEng Press
|
||||
300 0.76392796 -5.7725589 0 -4.6289588 0.17994628
|
||||
350 0.71953041 -5.7041632 0 -4.6270261 0.44866153
|
||||
400 0.7319047 -5.7216051 0 -4.6259438 0.46321355
|
||||
run 0
|
||||
Per MPI rank memory allocation (min/avg/max) = 2.619 | 2.619 | 2.619 Mbytes
|
||||
Step Temp E_pair E_mol TotEng Press
|
||||
400 0.7319047 -5.7158168 0 -4.6201554 0.49192039
|
||||
Loop time of 2.14577e-06 on 4 procs for 0 steps with 500 atoms
|
||||
|
||||
139.8% CPU use with 4 MPI tasks x no OpenMP threads
|
||||
|
||||
MPI task timing breakdown:
|
||||
Section | min time | avg time | max time |%varavg| %total
|
||||
---------------------------------------------------------------
|
||||
Pair | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Neigh | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Comm | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Output | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Modify | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Other | | 2.146e-06 | | |100.00
|
||||
|
||||
Nlocal: 125 ave 132 max 118 min
|
||||
Histogram: 1 0 0 0 0 2 0 0 0 1
|
||||
Nghost: 1057.5 ave 1068 max 1049 min
|
||||
Histogram: 1 0 0 1 1 0 0 0 0 1
|
||||
Neighs: 4685.75 ave 5045 max 4229 min
|
||||
Histogram: 1 0 0 1 0 0 0 0 0 2
|
||||
|
||||
Total # of neighbors = 18743
|
||||
Ave neighs/atom = 37.486
|
||||
Neighbor list builds = 0
|
||||
Dangerous builds not checked
|
||||
Per MPI rank memory allocation (min/avg/max) = 2.619 | 2.619 | 2.619 Mbytes
|
||||
Step Temp E_pair E_mol TotEng Press
|
||||
400 0.7319047 -5.7216051 0 -4.6259438 0.46321355
|
||||
450 0.74503154 -5.7405318 0 -4.6252196 0.33211879
|
||||
500 0.70570501 -5.6824439 0 -4.6260035 0.62020788
|
||||
Total wall time: 0:00:00
|
|
@ -0,0 +1,263 @@
|
|||
/* ----------------------------------------------------------------------
|
||||
LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator
|
||||
http://lammps.sandia.gov, Sandia National Laboratories
|
||||
Steve Plimpton, sjplimp@sandia.gov
|
||||
------------------------------------------------------------------------- */
|
||||
|
||||
// MC code used with LAMMPS in client/server mode
|
||||
// MC is the client, LAMMPS is the server
|
||||
|
||||
// Syntax: mc infile mode modearg
|
||||
// mode = file, zmq
|
||||
// modearg = filename for file, localhost:5555 for zmq
|
||||
|
||||
#include <cmath>
|
||||
#include <cstdio>
|
||||
#include <cstdlib>
|
||||
#include <cstring>
|
||||
#include "mc.h"
|
||||
#include "random_park.h"
|
||||
|
||||
#include "cslib.h"
|
||||
using namespace CSLIB_NS;
|
||||
|
||||
void error(const char *);
|
||||
CSlib *cs_create(char *, char *);
|
||||
|
||||
#define MAXLINE 256
|
||||
|
||||
/* ---------------------------------------------------------------------- */
|
||||
|
||||
// main program
|
||||
|
||||
int main(int narg, char **arg)
|
||||
{
|
||||
if (narg != 4) {
|
||||
error("Syntax: mc infile mode modearg");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
// initialize CSlib
|
||||
|
||||
CSlib *cs = cs_create(arg[2],arg[3]);
|
||||
|
||||
// create MC class and perform run
|
||||
|
||||
MC *mc = new MC(arg[1],cs);
|
||||
mc->run();
|
||||
|
||||
// final MC stats
|
||||
|
||||
int naccept = mc->naccept;
|
||||
int nattempt = mc->nattempt;
|
||||
|
||||
printf("------ MC stats ------\n");
|
||||
printf("MC attempts = %d\n",nattempt);
|
||||
printf("MC accepts = %d\n",naccept);
|
||||
printf("Acceptance ratio = %g\n",1.0*naccept/nattempt);
|
||||
|
||||
// clean up
|
||||
|
||||
delete cs;
|
||||
delete mc;
|
||||
}
|
||||
|
||||
/* ---------------------------------------------------------------------- */
|
||||
|
||||
void error(const char *str)
|
||||
{
|
||||
printf("ERROR: %s\n",str);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* ---------------------------------------------------------------------- */
|
||||
|
||||
CSlib *cs_create(char *mode, char *arg)
|
||||
{
|
||||
CSlib *cs = new CSlib(0,mode,arg,NULL);
|
||||
|
||||
// initial handshake to agree on protocol
|
||||
|
||||
cs->send(0,1);
|
||||
cs->pack_string(1,(char *) "mc");
|
||||
|
||||
int msgID,nfield;
|
||||
int *fieldID,*fieldtype,*fieldlen;
|
||||
msgID = cs->recv(nfield,fieldID,fieldtype,fieldlen);
|
||||
|
||||
return cs;
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------
|
||||
// MC class
|
||||
// ----------------------------------------------------------------------
|
||||
|
||||
MC::MC(char *mcfile, void *cs_caller)
|
||||
//MC::MC(char *mcfile, CSlib *cs_caller)
|
||||
{
|
||||
cs_void = cs_caller;
|
||||
|
||||
// setup MC params
|
||||
|
||||
options(mcfile);
|
||||
|
||||
// random # generator
|
||||
|
||||
random = new RanPark(seed);
|
||||
}
|
||||
|
||||
/* ---------------------------------------------------------------------- */
|
||||
|
||||
MC::~MC()
|
||||
{
|
||||
free(x);
|
||||
delete random;
|
||||
}
|
||||
|
||||
/* ---------------------------------------------------------------------- */
|
||||
|
||||
void MC::run()
|
||||
{
|
||||
int iatom,accept,msgID,nfield;
|
||||
double pe_initial,pe_final,edelta;
|
||||
double dx,dy,dz;
|
||||
double xold[3],xnew[3];
|
||||
int *fieldID,*fieldtype,*fieldlen;
|
||||
|
||||
enum{NATOMS=1,EINIT,DISPLACE,ACCEPT,RUN};
|
||||
|
||||
CSlib *cs = (CSlib *) cs_void;
|
||||
|
||||
// one-time request for atom count from MD
|
||||
// allocate 1d coord buffer
|
||||
|
||||
cs->send(NATOMS,0);
|
||||
|
||||
msgID = cs->recv(nfield,fieldID,fieldtype,fieldlen);
|
||||
natoms = cs->unpack_int(1);
|
||||
|
||||
x = (double *) malloc(3*natoms*sizeof(double));
|
||||
|
||||
// loop over MC moves
|
||||
|
||||
naccept = nattempt = 0;
|
||||
|
||||
for (int iloop = 0; iloop < nloop; iloop++) {
|
||||
|
||||
// request current energy from MD
|
||||
// recv energy, coords from MD
|
||||
|
||||
cs->send(EINIT,0);
|
||||
|
||||
msgID = cs->recv(nfield,fieldID,fieldtype,fieldlen);
|
||||
pe_initial = cs->unpack_double(1);
|
||||
double *x = (double *) cs->unpack(2);
|
||||
|
||||
// perform simple MC event
|
||||
// displace a single atom by random amount
|
||||
|
||||
iatom = (int) natoms*random->uniform();
|
||||
xold[0] = x[3*iatom+0];
|
||||
xold[1] = x[3*iatom+1];
|
||||
xold[2] = x[3*iatom+2];
|
||||
|
||||
dx = 2.0*delta*random->uniform() - delta;
|
||||
dy = 2.0*delta*random->uniform() - delta;
|
||||
dz = 2.0*delta*random->uniform() - delta;
|
||||
|
||||
xnew[0] = xold[0] + dx;
|
||||
xnew[1] = xold[1] + dx;
|
||||
xnew[2] = xold[2] + dx;
|
||||
|
||||
// send atom ID and its new coords to MD
|
||||
// recv new energy
|
||||
|
||||
cs->send(DISPLACE,2);
|
||||
cs->pack_int(1,iatom+1);
|
||||
cs->pack(2,4,3,xnew);
|
||||
|
||||
msgID = cs->recv(nfield,fieldID,fieldtype,fieldlen);
|
||||
pe_final = cs->unpack_double(1);
|
||||
|
||||
// decide whether to accept/reject MC event
|
||||
|
||||
if (pe_final <= pe_initial) accept = 1;
|
||||
else if (temperature == 0.0) accept = 0;
|
||||
else if (random->uniform() >
|
||||
exp(natoms*(pe_initial-pe_final)/temperature)) accept = 0;
|
||||
else accept = 1;
|
||||
|
||||
nattempt++;
|
||||
if (accept) naccept++;
|
||||
|
||||
// send accept (1) or reject (0) flag to MD
|
||||
|
||||
cs->send(ACCEPT,1);
|
||||
cs->pack_int(1,accept);
|
||||
|
||||
msgID = cs->recv(nfield,fieldID,fieldtype,fieldlen);
|
||||
|
||||
// send dynamics timesteps
|
||||
|
||||
cs->send(RUN,1);
|
||||
cs->pack_int(1,ndynamics);
|
||||
|
||||
msgID = cs->recv(nfield,fieldID,fieldtype,fieldlen);
|
||||
}
|
||||
|
||||
// send exit message to MD
|
||||
|
||||
cs->send(-1,0);
|
||||
msgID = cs->recv(nfield,fieldID,fieldtype,fieldlen);
|
||||
}
|
||||
|
||||
/* ---------------------------------------------------------------------- */
|
||||
|
||||
void MC::options(char *filename)
|
||||
{
|
||||
// default params
|
||||
|
||||
nsteps = 0;
|
||||
ndynamics = 100;
|
||||
delta = 0.1;
|
||||
temperature = 1.0;
|
||||
seed = 12345;
|
||||
|
||||
// read and parse file
|
||||
|
||||
FILE *fp = fopen(filename,"r");
|
||||
if (fp == NULL) error("Could not open MC file");
|
||||
|
||||
char line[MAXLINE];
|
||||
char *keyword,*value;
|
||||
char *eof = fgets(line,MAXLINE,fp);
|
||||
|
||||
while (eof) {
|
||||
if (line[0] == '#') { // comment line
|
||||
eof = fgets(line,MAXLINE,fp);
|
||||
continue;
|
||||
}
|
||||
|
||||
value = strtok(line," \t\n\r\f");
|
||||
if (value == NULL) { // blank line
|
||||
eof = fgets(line,MAXLINE,fp);
|
||||
continue;
|
||||
}
|
||||
|
||||
keyword = strtok(NULL," \t\n\r\f");
|
||||
if (keyword == NULL) error("Missing keyword in MC file");
|
||||
|
||||
if (strcmp(keyword,"nsteps") == 0) nsteps = atoi(value);
|
||||
else if (strcmp(keyword,"ndynamics") == 0) ndynamics = atoi(value);
|
||||
else if (strcmp(keyword,"delta") == 0) delta = atof(value);
|
||||
else if (strcmp(keyword,"temperature") == 0) temperature = atof(value);
|
||||
else if (strcmp(keyword,"seed") == 0) seed = atoi(value);
|
||||
else error("Unknown param in MC file");
|
||||
|
||||
eof = fgets(line,MAXLINE,fp);
|
||||
}
|
||||
|
||||
// derived params
|
||||
|
||||
nloop = nsteps/ndynamics;
|
||||
}
|
|
@ -0,0 +1,40 @@
|
|||
/* ----------------------------------------------------------------------
|
||||
LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator
|
||||
http://lammps.sandia.gov, Sandia National Laboratories
|
||||
Steve Plimpton, sjplimp@sandia.gov
|
||||
------------------------------------------------------------------------- */
|
||||
|
||||
#ifndef MC_H
|
||||
#define MC_H
|
||||
|
||||
/* ---------------------------------------------------------------------- */
|
||||
|
||||
class MC {
|
||||
public:
|
||||
int naccept; // # of accepted MC events
|
||||
int nattempt; // # of attempted MC events
|
||||
|
||||
MC(char *, void *);
|
||||
~MC();
|
||||
void run();
|
||||
|
||||
private:
|
||||
int nsteps; // total # of MD steps
|
||||
int ndynamics; // steps in one short dynamics run
|
||||
int nloop; // nsteps/ndynamics
|
||||
int natoms; // # of MD atoms
|
||||
|
||||
double delta; // MC displacement distance
|
||||
double temperature; // MC temperature for Boltzmann criterion
|
||||
double *x; // atom coords as 3N 1d vector
|
||||
double energy; // global potential energy
|
||||
|
||||
int seed; // RNG seed
|
||||
class RanPark *random;
|
||||
|
||||
void *cs_void; // messaging library
|
||||
|
||||
void options(char *);
|
||||
};
|
||||
|
||||
#endif
|
|
@ -0,0 +1,72 @@
|
|||
/* ----------------------------------------------------------------------
|
||||
LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator
|
||||
http://lammps.sandia.gov, Sandia National Laboratories
|
||||
Steve Plimpton, sjplimp@sandia.gov
|
||||
|
||||
Copyright (2003) Sandia Corporation. Under the terms of Contract
|
||||
DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains
|
||||
certain rights in this software. This software is distributed under
|
||||
the GNU General Public License.
|
||||
|
||||
See the README file in the top-level LAMMPS directory.
|
||||
------------------------------------------------------------------------- */
|
||||
|
||||
// Park/Miller RNG
|
||||
|
||||
#include <math.h>
|
||||
#include "random_park.h"
|
||||
//#include "error.h"
|
||||
|
||||
#define IA 16807
|
||||
#define IM 2147483647
|
||||
#define AM (1.0/IM)
|
||||
#define IQ 127773
|
||||
#define IR 2836
|
||||
|
||||
/* ---------------------------------------------------------------------- */
|
||||
|
||||
RanPark::RanPark(int seed_init)
|
||||
{
|
||||
//if (seed_init <= 0)
|
||||
// error->one(FLERR,"Invalid seed for Park random # generator");
|
||||
seed = seed_init;
|
||||
save = 0;
|
||||
}
|
||||
|
||||
/* ----------------------------------------------------------------------
|
||||
uniform RN
|
||||
------------------------------------------------------------------------- */
|
||||
|
||||
double RanPark::uniform()
|
||||
{
|
||||
int k = seed/IQ;
|
||||
seed = IA*(seed-k*IQ) - IR*k;
|
||||
if (seed < 0) seed += IM;
|
||||
double ans = AM*seed;
|
||||
return ans;
|
||||
}
|
||||
|
||||
/* ----------------------------------------------------------------------
|
||||
gaussian RN
|
||||
------------------------------------------------------------------------- */
|
||||
|
||||
double RanPark::gaussian()
|
||||
{
|
||||
double first,v1,v2,rsq,fac;
|
||||
|
||||
if (!save) {
|
||||
do {
|
||||
v1 = 2.0*uniform()-1.0;
|
||||
v2 = 2.0*uniform()-1.0;
|
||||
rsq = v1*v1 + v2*v2;
|
||||
} while ((rsq >= 1.0) || (rsq == 0.0));
|
||||
fac = sqrt(-2.0*log(rsq)/rsq);
|
||||
second = v1*fac;
|
||||
first = v2*fac;
|
||||
save = 1;
|
||||
} else {
|
||||
first = second;
|
||||
save = 0;
|
||||
}
|
||||
return first;
|
||||
}
|
|
@ -0,0 +1,28 @@
|
|||
/* ----------------------------------------------------------------------
|
||||
LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator
|
||||
http://lammps.sandia.gov, Sandia National Laboratories
|
||||
Steve Plimpton, sjplimp@sandia.gov
|
||||
|
||||
Copyright (2003) Sandia Corporation. Under the terms of Contract
|
||||
DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains
|
||||
certain rights in this software. This software is distributed under
|
||||
the GNU General Public License.
|
||||
|
||||
See the README file in the top-level LAMMPS directory.
|
||||
------------------------------------------------------------------------- */
|
||||
|
||||
#ifndef RANPARK_H
|
||||
#define RANPARK_H
|
||||
|
||||
class RanPark {
|
||||
public:
|
||||
RanPark(int);
|
||||
double uniform();
|
||||
double gaussian();
|
||||
|
||||
private:
|
||||
int seed,save;
|
||||
double second;
|
||||
};
|
||||
|
||||
#endif
|
|
@ -0,0 +1,53 @@
|
|||
# Startparameter for this run:
|
||||
NWRITE = 2 write-flag & timer
|
||||
PREC = normal normal or accurate (medium, high low for compatibility)
|
||||
ISTART = 0 job : 0-new 1-cont 2-samecut
|
||||
ICHARG = 2 charge: 1-file 2-atom 10-const
|
||||
ISPIN = 1 spin polarized calculation?
|
||||
LSORBIT = F spin-orbit coupling
|
||||
INIWAV = 1 electr: 0-lowe 1-rand 2-diag
|
||||
|
||||
# Electronic Relaxation 1
|
||||
ENCUT = 600.0 eV #Plane wave energy cutoff
|
||||
ENINI = 600.0 initial cutoff
|
||||
NELM = 100; NELMIN= 2; NELMDL= -5 # of ELM steps
|
||||
EDIFF = 0.1E-05 stopping-criterion for ELM
|
||||
# Ionic relaxation
|
||||
EDIFFG = 0.1E-02 stopping-criterion for IOM
|
||||
NSW = 0 number of steps for IOM
|
||||
NBLOCK = 1; KBLOCK = 1 inner block; outer block
|
||||
IBRION = -1 ionic relax: 0-MD 1-quasi-New 2-CG #No ion relaxation with -1
|
||||
NFREE = 0 steps in history (QN), initial steepest desc. (CG)
|
||||
ISIF = 2 stress and relaxation # 2: F-yes Sts-yes RlxIon-yes cellshape-no cellvol-no
|
||||
IWAVPR = 10 prediction: 0-non 1-charg 2-wave 3-comb # 10: TMPCAR stored in memory rather than file
|
||||
|
||||
POTIM = 0.5000 time-step for ionic-motion
|
||||
TEBEG = 3500.0; TEEND = 3500.0 temperature during run # Finite Temperature variables if AI-MD is on
|
||||
SMASS = -3.00 Nose mass-parameter (am)
|
||||
estimated Nose-frequenzy (Omega) = 0.10E-29 period in steps =****** mass= -0.366E-27a.u.
|
||||
PSTRESS= 0.0 pullay stress
|
||||
|
||||
# DOS related values:
|
||||
EMIN = 10.00; EMAX =-10.00 energy-range for DOS
|
||||
EFERMI = 0.00
|
||||
ISMEAR = 0; SIGMA = 0.10 broadening in eV -4-tet -1-fermi 0-gaus
|
||||
|
||||
# Electronic relaxation 2 (details)
|
||||
IALGO = 48 algorithm
|
||||
|
||||
# Write flags
|
||||
LWAVE = T write WAVECAR
|
||||
LCHARG = T write CHGCAR
|
||||
LVTOT = F write LOCPOT, total local potential
|
||||
LVHAR = F write LOCPOT, Hartree potential only
|
||||
LELF = F write electronic localiz. function (ELF)
|
||||
|
||||
# Dipole corrections
|
||||
LMONO = F monopole corrections only (constant potential shift)
|
||||
LDIPOL = F correct potential (dipole corrections)
|
||||
IDIPOL = 0 1-x, 2-y, 3-z, 4-all directions
|
||||
EPSILON= 1.0000000 bulk dielectric constant
|
||||
|
||||
# Exchange correlation treatment:
|
||||
GGA = -- GGA type
|
||||
|
|
@ -0,0 +1,6 @@
|
|||
K-Points
|
||||
0
|
||||
Monkhorst Pack
|
||||
15 15 15
|
||||
0 0 0
|
||||
|
|
@ -0,0 +1,11 @@
|
|||
W unit cell
|
||||
1.0
|
||||
3.16 0.00000000 0.00000000
|
||||
0.00000000 3.16 0.00000000
|
||||
0.00000000 0.00000000 3.16
|
||||
W
|
||||
2
|
||||
Direct
|
||||
0.00000000 0.00000000 0.00000000
|
||||
0.50000000 0.50000000 0.50000000
|
||||
|
|
@ -0,0 +1,149 @@
|
|||
Sample LAMMPS MD wrapper on VASP quantum DFT via client/server
|
||||
coupling
|
||||
|
||||
See the MESSAGE package (doc/Section_messages.html#MESSAGE) and
|
||||
Section_howto.html#howto10 for more details on how client/server
|
||||
coupling works in LAMMPS.
|
||||
|
||||
In this dir, the vasp_wrap.py is a wrapper on the VASP quantum DFT
|
||||
code so it can work as a "server" code which LAMMPS drives as a
|
||||
"client" code to perform ab initio MD. LAMMPS performs the MD
|
||||
timestepping, sends VASP a current set of coordinates each timestep,
|
||||
VASP computes forces and energy and virial and returns that info to
|
||||
LAMMPS.
|
||||
|
||||
Messages are exchanged between MC and LAMMPS via a client/server
|
||||
library (CSlib), which is included in the LAMMPS distribution in
|
||||
lib/message. As explained below you can choose to exchange data
|
||||
between the two programs either via files or sockets (ZMQ). If the
|
||||
vasp_wrap.py program became parallel, or the CSlib library calls were
|
||||
integrated into VASP directly, then data could also be exchanged via
|
||||
MPI.
|
||||
|
||||
----------------
|
||||
|
||||
Build LAMMPS with its MESSAGE package installed:
|
||||
|
||||
See the Build extras doc page and its MESSAGE package
|
||||
section for details.
|
||||
|
||||
CMake:
|
||||
|
||||
-D PKG_MESSAGE=yes # include the MESSAGE package
|
||||
-D MESSAGE_ZMQ=value # build with ZeroMQ support, value = no (default) or yes
|
||||
|
||||
Traditional make:
|
||||
|
||||
cd lammps/lib/message
|
||||
python Install.py -m -z # build CSlib with MPI and ZMQ support
|
||||
cd lammps/src
|
||||
make yes-message
|
||||
make mpi
|
||||
|
||||
You can leave off the -z if you do not have ZMQ on your system.
|
||||
|
||||
----------------
|
||||
|
||||
Build the CSlib in a form usable by the vasp_wrapper.py script:
|
||||
|
||||
% cd lammps/lib/message/cslib/src
|
||||
% make shlib # build serial and parallel shared lib with ZMQ support
|
||||
% make shlib zmq=no # build serial and parallel shared lib w/out ZMQ support
|
||||
|
||||
This will make a shared library versions of the CSlib, which Python
|
||||
requires. Python must be able to find both the cslib.py script and
|
||||
the libcsnompi.so library in your lammps/lib/message/cslib/src
|
||||
directory. If it is not able to do this, you will get an error when
|
||||
you run vasp_wrapper.py.
|
||||
|
||||
You can do this by augmenting two environment variables, either
|
||||
from the command line, or in your shell start-up script.
|
||||
Here is the sample syntax for the csh or tcsh shells:
|
||||
|
||||
setenv PYTHONPATH ${PYTHONPATH}:/home/sjplimp/lammps/lib/message/cslib/src
|
||||
setenv LD_LIBRARY_PATH ${LD_LIBRARY_PATH}:/home/sjplimp/lammps/lib/message/cslib/src
|
||||
|
||||
----------------
|
||||
|
||||
Prepare to use VASP and the vasp_wrapper.py script
|
||||
|
||||
You can run the vasp_wrap.py script as-is to test that the coupling
|
||||
between it and LAMMPS is functional. This will use the included
|
||||
vasprun.xml file output by a previous VASP run.
|
||||
|
||||
But note that the as-is version of vasp_wrap.py will not attempt to
|
||||
run VASP.
|
||||
|
||||
To do this, you must edit the 1st vaspcmd line at the top of
|
||||
vasp_wrapper.py to be the launch command needed to run VASP on your
|
||||
system. It can be a command to run VASP in serial or in parallel,
|
||||
e.g. an mpirun command. Then comment out the 2nd vaspcmd line
|
||||
immediately following it.
|
||||
|
||||
Insure you have the necessary VASP input files in this
|
||||
directory, suitable for the VASP calculation you want to perform:
|
||||
|
||||
INCAR
|
||||
KPOINTS
|
||||
POSCAR_template
|
||||
POTCAR
|
||||
|
||||
Examples of all but the POTCAR file are provided. As explained below,
|
||||
POSCAR_W is an input file for a 2-atom unit cell of tungsten and can
|
||||
be used to test the LAMMPS/VASP coupling. The POTCAR file is a
|
||||
proprietary VASP file, so use one from your VASP installation.
|
||||
|
||||
Note that the POSCAR_template file should be matched to the LAMMPS
|
||||
input script (# of atoms and atom types, box size, etc). The provided
|
||||
POSCAR_W matches in.client.W.
|
||||
|
||||
Once you run VASP yourself, the vasprun.xml file will be overwritten.
|
||||
|
||||
----------------
|
||||
|
||||
To run in client/server mode:
|
||||
|
||||
NOTE: The vasp_wrap.py script must be run with Python version 2, not
|
||||
3. This is because it used the CSlib python wrapper, which only
|
||||
supports version 2. We plan to upgrade CSlib to support Python 3.
|
||||
|
||||
Both the client (LAMMPS) and server (vasp_wrap.py) must use the same
|
||||
messaging mode, namely file or zmq. This is an argument to the
|
||||
vasp_wrap.py code; it can be selected by setting the "mode" variable
|
||||
when you run LAMMPS. The default mode = file.
|
||||
|
||||
Here we assume LAMMPS was built to run in parallel, and the MESSAGE
|
||||
package was installed with socket (ZMQ) support. This means either of
|
||||
the messaging modes can be used and LAMMPS can be run in serial or
|
||||
parallel. The vasp_wrap.py code is always run in serial, but it
|
||||
launches VASP from Python via an mpirun command which can run VASP
|
||||
itself in parallel.
|
||||
|
||||
When you run, the server should print out thermodynamic info every
|
||||
timestep which corresponds to the forces and virial computed by VASP.
|
||||
VASP will also generate output files each timestep. The vasp_wrapper.py
|
||||
script could be generalized to archive these.
|
||||
|
||||
The examples below are commands you should use in two different
|
||||
terminal windows. The order of the two commands (client or server
|
||||
launch) does not matter. You can run them both in the same window if
|
||||
you append a "&" character to the first one to run it in the
|
||||
background.
|
||||
|
||||
--------------
|
||||
|
||||
File mode of messaging:
|
||||
|
||||
% mpirun -np 1 lmp_mpi -v mode file < in.client.W
|
||||
% python vasp_wrap.py file POSCAR_W
|
||||
|
||||
% mpirun -np 2 lmp_mpi -v mode file < in.client.W
|
||||
% python vasp_wrap.py file POSCAR_W
|
||||
|
||||
ZMQ mode of messaging:
|
||||
|
||||
% mpirun -np 1 lmp_mpi -v mode zmq < in.client.W
|
||||
% python vasp_wrap.py zmq POSCAR_W
|
||||
|
||||
% mpirun -np 2 lmp_mpi -v mode zmq < in.client.W
|
||||
% python vasp_wrap.py zmq POSCAR_W
|
|
@ -0,0 +1,15 @@
|
|||
LAMMPS W data file
|
||||
|
||||
2 atoms
|
||||
|
||||
1 atom types
|
||||
|
||||
0.0 3.16 xlo xhi
|
||||
0.0 3.16 ylo yhi
|
||||
0.0 3.16 zlo zhi
|
||||
|
||||
Atoms
|
||||
|
||||
1 1 0.000 0.000 0.000
|
||||
2 1 1.58 1.58 1.58
|
||||
|
|
@ -0,0 +1,34 @@
|
|||
# small W unit cell for use with VASP
|
||||
|
||||
variable mode index file
|
||||
|
||||
if "${mode} == file" then &
|
||||
"message client md file tmp.couple" &
|
||||
elif "${mode} == zmq" &
|
||||
"message client md zmq localhost:5555" &
|
||||
|
||||
variable x index 1
|
||||
variable y index 1
|
||||
variable z index 1
|
||||
|
||||
units metal
|
||||
atom_style atomic
|
||||
atom_modify sort 0 0.0 map yes
|
||||
|
||||
read_data data.W
|
||||
mass 1 183.85
|
||||
|
||||
replicate $x $y $z
|
||||
|
||||
velocity all create 300.0 87287 loop geom
|
||||
|
||||
neighbor 0.3 bin
|
||||
neigh_modify delay 0 every 10 check no
|
||||
|
||||
fix 1 all nve
|
||||
fix 2 all client/md
|
||||
fix_modify 2 energy yes
|
||||
|
||||
thermo 1
|
||||
run 3
|
||||
|
|
@ -0,0 +1,76 @@
|
|||
LAMMPS (22 Aug 2018)
|
||||
# small W unit cell for use with VASP
|
||||
|
||||
variable mode index file
|
||||
|
||||
if "${mode} == file" then "message client md file tmp.couple" elif "${mode} == zmq" "message client md zmq localhost:5555"
|
||||
message client md zmq localhost:5555
|
||||
variable x index 1
|
||||
variable y index 1
|
||||
variable z index 1
|
||||
|
||||
units metal
|
||||
atom_style atomic
|
||||
atom_modify sort 0 0.0 map yes
|
||||
|
||||
read_data data.W
|
||||
orthogonal box = (0 0 0) to (3.16 3.16 3.16)
|
||||
1 by 1 by 2 MPI processor grid
|
||||
reading atoms ...
|
||||
2 atoms
|
||||
mass 1 183.85
|
||||
|
||||
replicate $x $y $z
|
||||
replicate 1 $y $z
|
||||
replicate 1 1 $z
|
||||
replicate 1 1 1
|
||||
orthogonal box = (0 0 0) to (3.16 3.16 3.16)
|
||||
1 by 1 by 2 MPI processor grid
|
||||
2 atoms
|
||||
Time spent = 0.000148058 secs
|
||||
|
||||
velocity all create 300.0 87287 loop geom
|
||||
|
||||
neighbor 0.3 bin
|
||||
neigh_modify delay 0 every 10 check no
|
||||
|
||||
fix 1 all nve
|
||||
fix 2 all client/md
|
||||
fix_modify 2 energy yes
|
||||
|
||||
thermo 1
|
||||
run 3
|
||||
Per MPI rank memory allocation (min/avg/max) = 1.8 | 1.8 | 1.8 Mbytes
|
||||
Step Temp E_pair E_mol TotEng Press
|
||||
0 300 0 0 -48.030793 -78159.503
|
||||
1 298.24318 0 0 -48.03102 -78167.19
|
||||
2 296.85584 0 0 -48.031199 -78173.26
|
||||
3 295.83795 0 0 -48.031331 -78177.714
|
||||
Loop time of 0.457491 on 2 procs for 3 steps with 2 atoms
|
||||
|
||||
Performance: 0.567 ns/day, 42.360 hours/ns, 6.558 timesteps/s
|
||||
50.1% CPU use with 2 MPI tasks x no OpenMP threads
|
||||
|
||||
MPI task timing breakdown:
|
||||
Section | min time | avg time | max time |%varavg| %total
|
||||
---------------------------------------------------------------
|
||||
Pair | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Neigh | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Comm | 1.3828e-05 | 2.9922e-05 | 4.6015e-05 | 0.0 | 0.01
|
||||
Output | 7.5817e-05 | 9.3937e-05 | 0.00011206 | 0.0 | 0.02
|
||||
Modify | 0.45735 | 0.45736 | 0.45736 | 0.0 | 99.97
|
||||
Other | | 1.204e-05 | | | 0.00
|
||||
|
||||
Nlocal: 1 ave 1 max 1 min
|
||||
Histogram: 2 0 0 0 0 0 0 0 0 0
|
||||
Nghost: 4 ave 4 max 4 min
|
||||
Histogram: 2 0 0 0 0 0 0 0 0 0
|
||||
Neighs: 0 ave 0 max 0 min
|
||||
Histogram: 2 0 0 0 0 0 0 0 0 0
|
||||
|
||||
Total # of neighbors = 0
|
||||
Ave neighs/atom = 0
|
||||
Neighbor list builds = 0
|
||||
Dangerous builds not checked
|
||||
|
||||
Total wall time: 0:01:21
|
|
@ -0,0 +1,300 @@
|
|||
#!/usr/bin/env python
|
||||
|
||||
# ----------------------------------------------------------------------
|
||||
# LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator
|
||||
# http://lammps.sandia.gov, Sandia National Laboratories
|
||||
# Steve Plimpton, sjplimp@sandia.gov
|
||||
# ----------------------------------------------------------------------
|
||||
|
||||
# Syntax: vasp_wrap.py file/zmq POSCARfile
|
||||
|
||||
# wrapper on VASP to act as server program using CSlib
|
||||
# receives message with list of coords from client
|
||||
# creates VASP inputs
|
||||
# invokes VASP to calculate self-consistent energy of that config
|
||||
# reads VASP outputs
|
||||
# sends message with energy, forces, pressure to client
|
||||
|
||||
# NOTES:
|
||||
# check to insure basic VASP input files are in place?
|
||||
# could archive VASP input/output in special filenames or dirs?
|
||||
# need to check that POTCAR file is consistent with atom ordering?
|
||||
# could make syntax for launching VASP more flexible
|
||||
# e.g. command-line arg for # of procs
|
||||
# detect if VASP had an error and return ERROR field, e.g. non-convergence ??
|
||||
|
||||
from __future__ import print_function
|
||||
import sys
|
||||
|
||||
version = sys.version_info[0]
|
||||
if version == 3:
|
||||
sys.exit("The CSlib python wrapper does not yet support python 3")
|
||||
|
||||
import subprocess
|
||||
import xml.etree.ElementTree as ET
|
||||
from cslib import CSlib
|
||||
|
||||
# comment out 2nd line once 1st line is correct for your system
|
||||
|
||||
vaspcmd = "srun -N 1 --ntasks-per-node=4 " + \
|
||||
"-n 4 /projects/vasp/2017-build/cts1/vasp5.4.4/vasp_tfermi/bin/vasp_std"
|
||||
vaspcmd = "touch tmp"
|
||||
|
||||
# enums matching FixClientMD class in LAMMPS
|
||||
|
||||
SETUP,STEP = range(1,2+1)
|
||||
DIM,PERIODICITY,ORIGIN,BOX,NATOMS,NTYPES,TYPES,COORDS,UNITS,CHARGE = range(1,10+1)
|
||||
FORCES,ENERGY,VIRIAL,ERROR = range(1,4+1)
|
||||
|
||||
# -------------------------------------
|
||||
# functions
|
||||
|
||||
# error message and exit
|
||||
|
||||
def error(txt):
|
||||
print("ERROR:",txt)
|
||||
sys.exit(1)
|
||||
|
||||
# -------------------------------------
|
||||
# read initial VASP POSCAR file to setup problem
|
||||
# return natoms,ntypes,box
|
||||
|
||||
def vasp_setup(poscar):
|
||||
|
||||
ps = open(poscar,'r').readlines()
|
||||
|
||||
# box size
|
||||
|
||||
words = ps[2].split()
|
||||
xbox = float(words[0])
|
||||
words = ps[3].split()
|
||||
ybox = float(words[1])
|
||||
words = ps[4].split()
|
||||
zbox = float(words[2])
|
||||
box = [xbox,ybox,zbox]
|
||||
|
||||
ntypes = 0
|
||||
natoms = 0
|
||||
words = ps[6].split()
|
||||
for word in words:
|
||||
if word == '#': break
|
||||
ntypes += 1
|
||||
natoms += int(word)
|
||||
|
||||
return natoms,ntypes,box
|
||||
|
||||
# -------------------------------------
|
||||
# write a new POSCAR file for VASP
|
||||
|
||||
def poscar_write(poscar,natoms,ntypes,types,coords,box):
|
||||
|
||||
psold = open(poscar,'r').readlines()
|
||||
psnew = open("POSCAR",'w')
|
||||
|
||||
# header, including box size
|
||||
|
||||
psnew.write(psold[0])
|
||||
psnew.write(psold[1])
|
||||
psnew.write("%g %g %g\n" % (box[0],box[1],box[2]))
|
||||
psnew.write("%g %g %g\n" % (box[3],box[4],box[5]))
|
||||
psnew.write("%g %g %g\n" % (box[6],box[7],box[8]))
|
||||
psnew.write(psold[5])
|
||||
psnew.write(psold[6])
|
||||
|
||||
# per-atom coords
|
||||
# grouped by types
|
||||
|
||||
psnew.write("Cartesian\n")
|
||||
|
||||
for itype in range(1,ntypes+1):
|
||||
for i in range(natoms):
|
||||
if types[i] != itype: continue
|
||||
x = coords[3*i+0]
|
||||
y = coords[3*i+1]
|
||||
z = coords[3*i+2]
|
||||
aline = " %g %g %g\n" % (x,y,z)
|
||||
psnew.write(aline)
|
||||
|
||||
psnew.close()
|
||||
|
||||
# -------------------------------------
|
||||
# read a VASP output vasprun.xml file
|
||||
# uses ElementTree module
|
||||
# see https://docs.python.org/2/library/xml.etree.elementtree.html
|
||||
|
||||
def vasprun_read():
|
||||
tree = ET.parse('vasprun.xml')
|
||||
root = tree.getroot()
|
||||
|
||||
#fp = open("vasprun.xml","r")
|
||||
#root = ET.parse(fp)
|
||||
|
||||
scsteps = root.findall('calculation/scstep')
|
||||
energy = scsteps[-1].find('energy')
|
||||
for child in energy:
|
||||
if child.attrib["name"] == "e_0_energy":
|
||||
eout = float(child.text)
|
||||
|
||||
fout = []
|
||||
sout = []
|
||||
|
||||
varrays = root.findall('calculation/varray')
|
||||
for varray in varrays:
|
||||
if varray.attrib["name"] == "forces":
|
||||
forces = varray.findall("v")
|
||||
for line in forces:
|
||||
fxyz = line.text.split()
|
||||
fxyz = [float(value) for value in fxyz]
|
||||
fout += fxyz
|
||||
if varray.attrib["name"] == "stress":
|
||||
tensor = varray.findall("v")
|
||||
stensor = []
|
||||
for line in tensor:
|
||||
sxyz = line.text.split()
|
||||
sxyz = [float(value) for value in sxyz]
|
||||
stensor.append(sxyz)
|
||||
sxx = stensor[0][0]
|
||||
syy = stensor[1][1]
|
||||
szz = stensor[2][2]
|
||||
# symmetrize off-diagonal components
|
||||
sxy = 0.5 * (stensor[0][1] + stensor[1][0])
|
||||
sxz = 0.5 * (stensor[0][2] + stensor[2][0])
|
||||
syz = 0.5 * (stensor[1][2] + stensor[2][1])
|
||||
sout = [sxx,syy,szz,sxy,sxz,syz]
|
||||
|
||||
#fp.close()
|
||||
|
||||
return eout,fout,sout
|
||||
|
||||
# -------------------------------------
|
||||
# main program
|
||||
|
||||
# command-line args
|
||||
|
||||
if len(sys.argv) != 3:
|
||||
print("Syntax: python vasp_wrap.py file/zmq POSCARfile")
|
||||
sys.exit(1)
|
||||
|
||||
mode = sys.argv[1]
|
||||
poscar_template = sys.argv[2]
|
||||
|
||||
if mode == "file": cs = CSlib(1,mode,"tmp.couple",None)
|
||||
elif mode == "zmq": cs = CSlib(1,mode,"*:5555",None)
|
||||
else:
|
||||
print("Syntax: python vasp_wrap.py file/zmq POSCARfile")
|
||||
sys.exit(1)
|
||||
|
||||
natoms,ntypes,box = vasp_setup(poscar_template)
|
||||
|
||||
# initial message for MD protocol
|
||||
|
||||
msgID,nfield,fieldID,fieldtype,fieldlen = cs.recv()
|
||||
if msgID != 0: error("Bad initial client/server handshake")
|
||||
protocol = cs.unpack_string(1)
|
||||
if protocol != "md": error("Mismatch in client/server protocol")
|
||||
cs.send(0,0)
|
||||
|
||||
# endless server loop
|
||||
|
||||
while 1:
|
||||
|
||||
# recv message from client
|
||||
# msgID = 0 = all-done message
|
||||
|
||||
msgID,nfield,fieldID,fieldtype,fieldlen = cs.recv()
|
||||
if msgID < 0: break
|
||||
|
||||
# SETUP receive at beginning of each run
|
||||
# required fields: DIM, PERIODICTY, ORIGIN, BOX,
|
||||
# NATOMS, NTYPES, TYPES, COORDS
|
||||
# optional fields: others in enum above, but VASP ignores them
|
||||
|
||||
if msgID == SETUP:
|
||||
|
||||
origin = []
|
||||
box = []
|
||||
natoms_recv = ntypes_recv = 0
|
||||
types = []
|
||||
coords = []
|
||||
|
||||
for field in fieldID:
|
||||
if field == DIM:
|
||||
dim = cs.unpack_int(DIM)
|
||||
if dim != 3: error("VASP only performs 3d simulations")
|
||||
elif field == PERIODICITY:
|
||||
periodicity = cs.unpack(PERIODICITY,1)
|
||||
if not periodicity[0] or not periodicity[1] or not periodicity[2]:
|
||||
error("VASP wrapper only currently supports fully periodic systems")
|
||||
elif field == ORIGIN:
|
||||
origin = cs.unpack(ORIGIN,1)
|
||||
elif field == BOX:
|
||||
box = cs.unpack(BOX,1)
|
||||
elif field == NATOMS:
|
||||
natoms_recv = cs.unpack_int(NATOMS)
|
||||
if natoms != natoms_recv:
|
||||
error("VASP wrapper mis-match in number of atoms")
|
||||
elif field == NTYPES:
|
||||
ntypes_recv = cs.unpack_int(NTYPES)
|
||||
if ntypes != ntypes_recv:
|
||||
error("VASP wrapper mis-match in number of atom types")
|
||||
elif field == TYPES:
|
||||
types = cs.unpack(TYPES,1)
|
||||
elif field == COORDS:
|
||||
coords = cs.unpack(COORDS,1)
|
||||
|
||||
if not origin or not box or not natoms or not ntypes or \
|
||||
not types or not coords:
|
||||
error("Required VASP wrapper setup field not received");
|
||||
|
||||
# STEP receive at each timestep of run or minimization
|
||||
# required fields: COORDS
|
||||
# optional fields: ORIGIN, BOX
|
||||
|
||||
elif msgID == STEP:
|
||||
|
||||
coords = []
|
||||
|
||||
for field in fieldID:
|
||||
if field == COORDS:
|
||||
coords = cs.unpack(COORDS,1)
|
||||
elif field == ORIGIN:
|
||||
origin = cs.unpack(ORIGIN,1)
|
||||
elif field == BOX:
|
||||
box = cs.unpack(BOX,1)
|
||||
|
||||
if not coords: error("Required VASP wrapper step field not received");
|
||||
|
||||
else: error("VASP wrapper received unrecognized message")
|
||||
|
||||
# create POSCAR file
|
||||
|
||||
poscar_write(poscar_template,natoms,ntypes,types,coords,box)
|
||||
|
||||
# invoke VASP
|
||||
|
||||
print("\nLaunching VASP ...")
|
||||
print(vaspcmd)
|
||||
subprocess.check_output(vaspcmd,stderr=subprocess.STDOUT,shell=True)
|
||||
|
||||
# process VASP output
|
||||
|
||||
energy,forces,virial = vasprun_read()
|
||||
|
||||
# convert VASP kilobars to bars
|
||||
|
||||
for i,value in enumerate(virial): virial[i] *= 1000.0
|
||||
|
||||
# return forces, energy, pressure to client
|
||||
|
||||
cs.send(msgID,3);
|
||||
cs.pack(FORCES,4,3*natoms,forces)
|
||||
cs.pack_double(ENERGY,energy)
|
||||
cs.pack(VIRIAL,4,6,virial)
|
||||
|
||||
# final reply to client
|
||||
|
||||
cs.send(0,0)
|
||||
|
||||
# clean-up
|
||||
|
||||
del cs
|
File diff suppressed because it is too large
Load Diff
|
@ -59,6 +59,7 @@ sub-directories:
|
|||
|
||||
accelerate: use of all the various accelerator packages
|
||||
airebo: polyethylene with AIREBO potential
|
||||
atm: Axilrod-Teller-Muto potential
|
||||
balance: dynamic load balancing, 2d system
|
||||
body: body particles, 2d system
|
||||
cmap: CMAP 5-body contributions to CHARMM force field
|
||||
|
@ -82,6 +83,7 @@ kim: use of potentials in Knowledge Base for Interatomic Models (KIM)
|
|||
latte: use of LATTE density-functional tight-binding quantum code
|
||||
meam: MEAM test for SiC and shear (same as shear examples)
|
||||
melt: rapid melt of 3d LJ system
|
||||
message: client/server coupling of 2 codes
|
||||
micelle: self-assembly of small lipid-like molecules into 2d bilayers
|
||||
min: energy minimization of 2d LJ melt
|
||||
mscg: parameterize a multi-scale coarse-graining (MSCG) model
|
||||
|
|
|
@ -17,8 +17,9 @@ atom_modify sort 0 0
|
|||
|
||||
compute XRD all xrd 1.541838 Ni 2Theta 40 80 c 2 2 2 LP 1 echo
|
||||
|
||||
compute SAED all saed 0.0251 Ni Kmax 0.85 Zone 1 0 0 c 0.025 0.025 0.025 &
|
||||
dR_Ewald 0.05 echo manual
|
||||
compute SAED all saed 0.0251 Ni Kmax 0.85 &
|
||||
Zone 0 0 0 c 0.025 0.025 0.025 &
|
||||
dR_Ewald 0.01 echo manual
|
||||
|
||||
fix 1 all ave/histo/weight 1 1 1 40 80 200 c_XRD[1] c_XRD[2] &
|
||||
mode vector file $A.hist.xrd
|
||||
|
|
|
@ -1,35 +0,0 @@
|
|||
variable A string bulkNi
|
||||
log $A.log
|
||||
|
||||
boundary p p p
|
||||
|
||||
units metal
|
||||
timestep 0.001
|
||||
|
||||
lattice fcc 3.52
|
||||
region box block 0 20 0 20 0 20
|
||||
create_box 1 box
|
||||
create_atoms 1 box
|
||||
|
||||
pair_style none
|
||||
mass * 58.71
|
||||
atom_modify sort 0 0
|
||||
|
||||
compute XRD all xrd 1.541838 Ni 2Theta 40 80 c 2 2 2 LP 1 echo
|
||||
|
||||
compute SAED all saed 0.0251 Ni Kmax 0.85 &
|
||||
Zone 0 0 0 c 0.025 0.025 0.025 &
|
||||
dR_Ewald 0.01 echo manual
|
||||
|
||||
fix 1 all ave/histo/weight 1 1 1 40 80 200 c_XRD[1] c_XRD[2] &
|
||||
mode vector file $A.hist.xrd
|
||||
|
||||
fix 2 all saed/vtk 1 1 1 c_SAED file $A_001.saed
|
||||
|
||||
dump 1 all custom 1 $A.dump id x y z
|
||||
run 0
|
||||
|
||||
unfix 1
|
||||
unfix 2
|
||||
uncompute XRD
|
||||
uncompute SAED
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,40 @@
|
|||
variable T equal 0.8
|
||||
variable p_solid equal 0.05
|
||||
|
||||
read_data data.mop
|
||||
|
||||
pair_style lj/cut 2.5
|
||||
pair_coeff * * 1.0 1.0
|
||||
pair_coeff 1 2 0.5 1.0
|
||||
pair_coeff 2 2 0.0 0.0
|
||||
neigh_modify delay 0
|
||||
|
||||
group liquid type 1
|
||||
group solid type 2
|
||||
region bottom block INF INF INF INF INF 7.0
|
||||
group bottom region bottom
|
||||
group solid_bottom intersect solid bottom
|
||||
group solid_up subtract solid solid_bottom
|
||||
|
||||
variable faSolid equal ${p_solid}*lx*ly/count(solid_up)
|
||||
fix piston_up solid_up aveforce NULL NULL -${faSolid}
|
||||
fix freeze_up solid_up setforce 0.0 0.0 NULL
|
||||
fix freeze_bottom solid_bottom setforce 0.0 0.0 0.0
|
||||
fix nvesol solid nve
|
||||
compute Tliq liquid temp
|
||||
fix nvtliq liquid nvt temp $T $T 0.5
|
||||
fix_modify nvtliq temp Tliq
|
||||
|
||||
thermo 1000
|
||||
thermo_modify flush yes temp Tliq
|
||||
|
||||
fix fxbal all balance 1000 1.05 shift z 10 1.05
|
||||
|
||||
compute mopz0 all stress/mop z center kin conf
|
||||
fix mopz0t all ave/time 1 1 1 c_mopz0[*] file mopz0.time
|
||||
|
||||
compute moppz liquid stress/mop/profile z 0.0 0.1 kin conf
|
||||
fix moppzt all ave/time 1 1 1 c_moppz[*] ave running overwrite file moppz.time mode vector
|
||||
|
||||
run 0
|
||||
|
|
@ -0,0 +1,111 @@
|
|||
LAMMPS (5 Sep 2018)
|
||||
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (src/comm.cpp:87)
|
||||
using 1 OpenMP thread(s) per MPI task
|
||||
variable T equal 0.8
|
||||
variable p_solid equal 0.05
|
||||
|
||||
read_data data.mop
|
||||
orthogonal box = (0 0 -2) to (9.52441 9.52441 16)
|
||||
1 by 1 by 1 MPI processor grid
|
||||
reading atoms ...
|
||||
1224 atoms
|
||||
reading velocities ...
|
||||
1224 velocities
|
||||
|
||||
pair_style lj/cut 2.5
|
||||
pair_coeff * * 1.0 1.0
|
||||
pair_coeff 1 2 0.5 1.0
|
||||
pair_coeff 2 2 0.0 0.0
|
||||
neigh_modify delay 0
|
||||
|
||||
group liquid type 1
|
||||
792 atoms in group liquid
|
||||
group solid type 2
|
||||
432 atoms in group solid
|
||||
region bottom block INF INF INF INF INF 7.0
|
||||
group bottom region bottom
|
||||
630 atoms in group bottom
|
||||
group solid_bottom intersect solid bottom
|
||||
216 atoms in group solid_bottom
|
||||
group solid_up subtract solid solid_bottom
|
||||
216 atoms in group solid_up
|
||||
|
||||
variable faSolid equal ${p_solid}*lx*ly/count(solid_up)
|
||||
variable faSolid equal 0.05*lx*ly/count(solid_up)
|
||||
fix piston_up solid_up aveforce NULL NULL -${faSolid}
|
||||
fix piston_up solid_up aveforce NULL NULL -0.0209986841649146
|
||||
fix freeze_up solid_up setforce 0.0 0.0 NULL
|
||||
fix freeze_bottom solid_bottom setforce 0.0 0.0 0.0
|
||||
fix nvesol solid nve
|
||||
compute Tliq liquid temp
|
||||
fix nvtliq liquid nvt temp $T $T 0.5
|
||||
fix nvtliq liquid nvt temp 0.8 $T 0.5
|
||||
fix nvtliq liquid nvt temp 0.8 0.8 0.5
|
||||
fix_modify nvtliq temp Tliq
|
||||
WARNING: Temperature for fix modify is not for group all (src/fix_nh.cpp:1404)
|
||||
|
||||
thermo 1000
|
||||
thermo_modify flush yes temp Tliq
|
||||
WARNING: Temperature for thermo pressure is not for group all (src/thermo.cpp:488)
|
||||
|
||||
fix fxbal all balance 1000 1.05 shift z 10 1.05
|
||||
|
||||
compute mopz0 all stress/mop z center kin conf
|
||||
fix mopz0t all ave/time 1 1 1 c_mopz0[*] file mopz0.time
|
||||
|
||||
compute moppz liquid stress/mop/profile z 0.0 0.1 kin conf
|
||||
fix moppzt all ave/time 1 1 1 c_moppz[*] ave running overwrite file moppz.time mode vector
|
||||
|
||||
run 0
|
||||
Neighbor list info ...
|
||||
update every 1 steps, delay 0 steps, check yes
|
||||
max neighbors/atom: 2000, page size: 100000
|
||||
master list distance cutoff = 2.8
|
||||
ghost atom cutoff = 2.8
|
||||
binsize = 1.4, bins = 7 7 13
|
||||
3 neighbor lists, perpetual/occasional/extra = 1 2 0
|
||||
(1) pair lj/cut, perpetual
|
||||
attributes: half, newton on
|
||||
pair build: half/bin/atomonly/newton
|
||||
stencil: half/bin/3d/newton
|
||||
bin: standard
|
||||
(2) compute stress/mop, occasional, copy from (1)
|
||||
attributes: half, newton on
|
||||
pair build: copy
|
||||
stencil: none
|
||||
bin: none
|
||||
(3) compute stress/mop/profile, occasional, copy from (1)
|
||||
attributes: half, newton on
|
||||
pair build: copy
|
||||
stencil: none
|
||||
bin: none
|
||||
Per MPI rank memory allocation (min/avg/max) = 3.596 | 3.596 | 3.596 Mbytes
|
||||
Step Temp E_pair E_mol TotEng Press Volume
|
||||
0 0.82011245 -3.0642111 0 -2.2692246 0.16906107 1632.8577
|
||||
Loop time of 1.19209e-06 on 1 procs for 0 steps with 1224 atoms
|
||||
|
||||
167.8% CPU use with 1 MPI tasks x 1 OpenMP threads
|
||||
|
||||
MPI task timing breakdown:
|
||||
Section | min time | avg time | max time |%varavg| %total
|
||||
---------------------------------------------------------------
|
||||
Pair | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Neigh | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Comm | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Output | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Modify | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Other | | 1.192e-06 | | |100.00
|
||||
|
||||
Nlocal: 1224 ave 1224 max 1224 min
|
||||
Histogram: 1 0 0 0 0 0 0 0 0 0
|
||||
Nghost: 2975 ave 2975 max 2975 min
|
||||
Histogram: 1 0 0 0 0 0 0 0 0 0
|
||||
Neighs: 40241 ave 40241 max 40241 min
|
||||
Histogram: 1 0 0 0 0 0 0 0 0 0
|
||||
|
||||
Total # of neighbors = 40241
|
||||
Ave neighs/atom = 32.8766
|
||||
Neighbor list builds = 0
|
||||
Dangerous builds = 0
|
||||
|
||||
Total wall time: 0:00:00
|
|
@ -0,0 +1,111 @@
|
|||
LAMMPS (5 Sep 2018)
|
||||
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (src/comm.cpp:87)
|
||||
using 1 OpenMP thread(s) per MPI task
|
||||
variable T equal 0.8
|
||||
variable p_solid equal 0.05
|
||||
|
||||
read_data data.mop
|
||||
orthogonal box = (0 0 -2) to (9.52441 9.52441 16)
|
||||
1 by 2 by 2 MPI processor grid
|
||||
reading atoms ...
|
||||
1224 atoms
|
||||
reading velocities ...
|
||||
1224 velocities
|
||||
|
||||
pair_style lj/cut 2.5
|
||||
pair_coeff * * 1.0 1.0
|
||||
pair_coeff 1 2 0.5 1.0
|
||||
pair_coeff 2 2 0.0 0.0
|
||||
neigh_modify delay 0
|
||||
|
||||
group liquid type 1
|
||||
792 atoms in group liquid
|
||||
group solid type 2
|
||||
432 atoms in group solid
|
||||
region bottom block INF INF INF INF INF 7.0
|
||||
group bottom region bottom
|
||||
630 atoms in group bottom
|
||||
group solid_bottom intersect solid bottom
|
||||
216 atoms in group solid_bottom
|
||||
group solid_up subtract solid solid_bottom
|
||||
216 atoms in group solid_up
|
||||
|
||||
variable faSolid equal ${p_solid}*lx*ly/count(solid_up)
|
||||
variable faSolid equal 0.05*lx*ly/count(solid_up)
|
||||
fix piston_up solid_up aveforce NULL NULL -${faSolid}
|
||||
fix piston_up solid_up aveforce NULL NULL -0.0209986841649146
|
||||
fix freeze_up solid_up setforce 0.0 0.0 NULL
|
||||
fix freeze_bottom solid_bottom setforce 0.0 0.0 0.0
|
||||
fix nvesol solid nve
|
||||
compute Tliq liquid temp
|
||||
fix nvtliq liquid nvt temp $T $T 0.5
|
||||
fix nvtliq liquid nvt temp 0.8 $T 0.5
|
||||
fix nvtliq liquid nvt temp 0.8 0.8 0.5
|
||||
fix_modify nvtliq temp Tliq
|
||||
WARNING: Temperature for fix modify is not for group all (src/fix_nh.cpp:1404)
|
||||
|
||||
thermo 1000
|
||||
thermo_modify flush yes temp Tliq
|
||||
WARNING: Temperature for thermo pressure is not for group all (src/thermo.cpp:488)
|
||||
|
||||
fix fxbal all balance 1000 1.05 shift z 10 1.05
|
||||
|
||||
compute mopz0 all stress/mop z center kin conf
|
||||
fix mopz0t all ave/time 1 1 1 c_mopz0[*] file mopz0.time
|
||||
|
||||
compute moppz liquid stress/mop/profile z 0.0 0.1 kin conf
|
||||
fix moppzt all ave/time 1 1 1 c_moppz[*] ave running overwrite file moppz.time mode vector
|
||||
|
||||
run 0
|
||||
Neighbor list info ...
|
||||
update every 1 steps, delay 0 steps, check yes
|
||||
max neighbors/atom: 2000, page size: 100000
|
||||
master list distance cutoff = 2.8
|
||||
ghost atom cutoff = 2.8
|
||||
binsize = 1.4, bins = 7 7 13
|
||||
3 neighbor lists, perpetual/occasional/extra = 1 2 0
|
||||
(1) pair lj/cut, perpetual
|
||||
attributes: half, newton on
|
||||
pair build: half/bin/atomonly/newton
|
||||
stencil: half/bin/3d/newton
|
||||
bin: standard
|
||||
(2) compute stress/mop, occasional, copy from (1)
|
||||
attributes: half, newton on
|
||||
pair build: copy
|
||||
stencil: none
|
||||
bin: none
|
||||
(3) compute stress/mop/profile, occasional, copy from (1)
|
||||
attributes: half, newton on
|
||||
pair build: copy
|
||||
stencil: none
|
||||
bin: none
|
||||
Per MPI rank memory allocation (min/avg/max) = 3.509 | 3.51 | 3.511 Mbytes
|
||||
Step Temp E_pair E_mol TotEng Press Volume
|
||||
0 0.82011245 -3.0642111 0 -2.2692246 0.16906107 1632.8577
|
||||
Loop time of 4.06504e-05 on 4 procs for 0 steps with 1224 atoms
|
||||
|
||||
65.2% CPU use with 4 MPI tasks x 1 OpenMP threads
|
||||
|
||||
MPI task timing breakdown:
|
||||
Section | min time | avg time | max time |%varavg| %total
|
||||
---------------------------------------------------------------
|
||||
Pair | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Neigh | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Comm | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Output | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Modify | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Other | | 4.065e-05 | | |100.00
|
||||
|
||||
Nlocal: 306 ave 320 max 295 min
|
||||
Histogram: 1 1 0 0 0 0 1 0 0 1
|
||||
Nghost: 1450.25 ave 1485 max 1422 min
|
||||
Histogram: 2 0 0 0 0 0 0 1 0 1
|
||||
Neighs: 10060.2 ave 10866 max 9507 min
|
||||
Histogram: 2 0 0 0 0 1 0 0 0 1
|
||||
|
||||
Total # of neighbors = 40241
|
||||
Ave neighs/atom = 32.8766
|
||||
Neighbor list builds = 0
|
||||
Dangerous builds = 0
|
||||
|
||||
Total wall time: 0:00:00
|
|
@ -0,0 +1,185 @@
|
|||
# Time-averaged data for fix moppzt
|
||||
# TimeStep Number-of-rows
|
||||
# Row c_moppz[1] c_moppz[2] c_moppz[3] c_moppz[4] c_moppz[5] c_moppz[6] c_moppz[7]
|
||||
0 181
|
||||
1 -2 0 0 0 0 0 0
|
||||
2 -1.9 0 0 0 0 0 0
|
||||
3 -1.8 0 0 0 0 0 0
|
||||
4 -1.7 0 0 0 0 0 0
|
||||
5 -1.6 0 0 0 0 0 0
|
||||
6 -1.5 0 0 0 -9.81273e-05 0.000228605 -0.00421138
|
||||
7 -1.4 0 0 0 -9.81273e-05 0.000228605 -0.00421138
|
||||
8 -1.3 0 0 0 -9.81273e-05 0.000228605 -0.00421138
|
||||
9 -1.2 0 0 0 -9.81273e-05 0.000228605 -0.00421138
|
||||
10 -1.1 0 0 0 -9.81273e-05 0.000228605 -0.00421138
|
||||
11 -1 0 0 0 -9.81273e-05 0.000228605 -0.00421138
|
||||
12 -0.9 0 0 0 -9.81273e-05 0.000228605 -0.00421138
|
||||
13 -0.8 0 0 0 -9.81273e-05 0.000228605 -0.00421138
|
||||
14 -0.7 0 0 0 -0.000370675 -0.00240125 -0.26848
|
||||
15 -0.6 0 0 0 -0.000370675 -0.00240125 -0.26848
|
||||
16 -0.5 0 0 0 -0.000370675 -0.00240125 -0.26848
|
||||
17 -0.4 0 0 0 -0.000370675 -0.00240125 -0.26848
|
||||
18 -0.3 0 0 0 -0.000370675 -0.00240125 -0.26848
|
||||
19 -0.2 0 0 0 -0.000370675 -0.00240125 -0.26848
|
||||
20 -0.1 0 0 0 -0.000370675 -0.00240125 -0.26848
|
||||
21 0 0 0 0 -0.000370675 -0.00240125 -0.26848
|
||||
22 0.1 0 0 0 0.190761 -0.491728 0.287704
|
||||
23 0.2 0 0 0 0.190761 -0.491728 0.287704
|
||||
24 0.3 0 0 0 0.190761 -0.491728 0.287704
|
||||
25 0.4 0 0 0 0.190761 -0.491728 0.287704
|
||||
26 0.5 0 0 0 0.190761 -0.491728 0.287704
|
||||
27 0.6 0 0 0 0.190761 -0.491728 0.287704
|
||||
28 0.7 0 0 0 0.190761 -0.491728 0.287704
|
||||
29 0.8 0 0 0 -0.181602 -0.198457 -0.0964774
|
||||
30 0.9 0 0 0 -0.15138 0.183353 0.206848
|
||||
31 1 0 0 0 0.174362 1.27701 0.600545
|
||||
32 1.1 0 0 0 0.160987 0.563442 0.494994
|
||||
33 1.2 0 0 0 0.218876 0.59796 0.398527
|
||||
34 1.3 0 0 0 0.187614 0.558909 0.372353
|
||||
35 1.4 0 0 0 0.118586 0.410013 0.331945
|
||||
36 1.5 0 0 0 -0.0514208 0.40381 0.128097
|
||||
37 1.6 3.08628 0.241189 5.90817 -0.198262 0.324128 -0.0449302
|
||||
38 1.7 0 0 0 -0.104542 0.256677 -0.332854
|
||||
39 1.8 0.222123 2.43524 1.10089 -0.324638 -0.168682 -1.06238
|
||||
40 1.9 0 0 0 -0.175732 -0.186846 -0.163062
|
||||
41 2 0 0 0 -0.137995 0.0920401 -0.260106
|
||||
42 2.1 -0.179621 -2.59775 1.80077 -0.480624 -0.0439511 -0.0824913
|
||||
43 2.2 0 0 0 -0.499868 -0.0106185 -0.108924
|
||||
44 2.3 0 0 0 -0.703301 0.124555 -0.0880158
|
||||
45 2.4 0 0 0 -0.581211 -0.244281 -0.250071
|
||||
46 2.5 1.05274 -2.86043 3.36339 -0.575104 -0.148715 -0.249092
|
||||
47 2.6 0 0 0 0.66061 -0.157649 -0.357141
|
||||
48 2.7 0 0 0 0.299971 -0.302298 -0.572714
|
||||
49 2.8 0 0 0 0.33107 -0.201699 -0.470466
|
||||
50 2.9 0 0 0 0.822686 1.08427 -0.390511
|
||||
51 3 0 0 0 0.716428 0.750998 -0.698174
|
||||
52 3.1 0.805189 0.571878 4.31938 0.121891 0.922727 -0.932582
|
||||
53 3.2 0 0 0 0.0442642 1.02537 -1.03066
|
||||
54 3.3 2.54289 -1.93701 4.88355 0.0731321 1.09091 -0.83075
|
||||
55 3.4 0 0 0 0.426589 0.821174 -0.765855
|
||||
56 3.5 0 0 0 0.445135 0.299996 -1.48972
|
||||
57 3.6 0 0 0 0.362916 -1.28673 -0.853897
|
||||
58 3.7 0.952867 -1.07044 1.04141 0.12517 -1.00353 -0.785272
|
||||
59 3.8 0.617661 0.991499 1.80973 -0.182369 -1.04057 -1.00435
|
||||
60 3.9 0.60295 -2.41888 3.98011 0.0347345 -1.01302 -0.88314
|
||||
61 4 -2.97421 -2.01531 2.98586 0.43463 -0.465643 -0.801128
|
||||
62 4.1 -3.23318 -3.31281 0.956525 0.732752 0.140718 -1.10583
|
||||
63 4.2 0 0 0 0.969872 0.298566 -0.823464
|
||||
64 4.3 0 0 0 0.7707 0.557002 -0.836549
|
||||
65 4.4 0 0 0 0.395828 0.66755 -1.53454
|
||||
66 4.5 0 0 0 0.104451 0.46777 -1.32358
|
||||
67 4.6 0 0 0 0.402084 0.464983 -1.22051
|
||||
68 4.7 0 0 0 0.352808 0.0794986 -1.31292
|
||||
69 4.8 0 0 0 0.0215512 0.284343 -0.975326
|
||||
70 4.9 0 0 0 -0.133637 0.250925 -1.33918
|
||||
71 5 0 0 0 -0.066208 0.104514 -1.27412
|
||||
72 5.1 0 0 0 -0.184391 0.479805 -1.15139
|
||||
73 5.2 0 0 0 -0.200251 0.527142 -1.34307
|
||||
74 5.3 0 0 0 0.043532 -0.0788824 -0.998406
|
||||
75 5.4 0 0 0 -0.531846 0.126289 -1.05818
|
||||
76 5.5 0 0 0 -0.259593 0.0818463 -1.58939
|
||||
77 5.6 0 0 0 -0.373828 -0.343977 -1.50908
|
||||
78 5.7 -0.294161 -1.07567 3.46536 -0.0644873 -0.424333 -1.28548
|
||||
79 5.8 0 0 0 -0.293233 -0.201133 -1.19085
|
||||
80 5.9 0.961568 -1.44949 2.42101 -0.632816 -0.0669315 -0.85119
|
||||
81 6 0 0 0 -0.0559892 -0.0194478 -1.04541
|
||||
82 6.1 0 0 0 -0.339753 0.286693 -1.24366
|
||||
83 6.2 0 0 0 -0.376208 0.444053 -1.7662
|
||||
84 6.3 0 0 0 -0.718923 0.555398 -1.93862
|
||||
85 6.4 0 0 0 -1.10631 0.263525 -1.79723
|
||||
86 6.5 0 0 0 -0.217948 -0.0489491 -2.07833
|
||||
87 6.6 0 0 0 -0.376248 -0.0588682 -2.45322
|
||||
88 6.7 -2.12742 4.22609 2.36568 -0.236703 -0.279582 -1.56434
|
||||
89 6.8 0.869072 -0.141389 3.92123 0.0540986 -0.00271606 -0.930143
|
||||
90 6.9 0 0 0 1.08829 -1.11737 -0.808187
|
||||
91 7 1.62633 1.08234 0.844097 1.18575 -0.408792 -0.752394
|
||||
92 7.1 0 0 0 1.03324 -0.470631 -0.486767
|
||||
93 7.2 0 0 0 0.950164 -0.112451 -0.479409
|
||||
94 7.3 -2.66121 -0.326607 7.83093 0.359 -0.482493 0.154384
|
||||
95 7.4 0 0 0 0.359089 -1.12337 0.409711
|
||||
96 7.5 -1.88971 1.34806 3.56893 0.394677 -1.0109 0.548348
|
||||
97 7.6 -1.34494 -0.896214 2.06959 0.231398 -0.728529 0.313513
|
||||
98 7.7 0 0 0 0.415681 -0.45268 0.507181
|
||||
99 7.8 0 0 0 0.259423 -0.11638 0.464208
|
||||
100 7.9 -1.97572 -1.20836 3.95731 0.252257 -0.0845701 -0.249345
|
||||
101 8 0 0 0 0.0688154 0.290386 -0.462467
|
||||
102 8.1 0.25925 -0.458269 3.33086 0.360399 -0.0409494 -0.656911
|
||||
103 8.2 0 0 0 -0.0587033 0.347698 -0.340604
|
||||
104 8.3 0 0 0 -0.377192 0.153096 -0.914654
|
||||
105 8.4 0 0 0 -0.431553 0.274996 -0.946252
|
||||
106 8.5 0 0 0 -0.898366 0.146653 -1.36383
|
||||
107 8.6 0 0 0 -0.889593 0.385951 0.125116
|
||||
108 8.7 0 0 0 -0.0139171 -0.162302 -0.0287854
|
||||
109 8.8 0 0 0 -0.266284 -0.148945 0.393533
|
||||
110 8.9 0 0 0 -0.00920376 -0.0770818 0.334642
|
||||
111 9 0 0 0 -0.0949156 0.0113352 -0.0761263
|
||||
112 9.1 0 0 0 0.0688045 0.104558 -0.101891
|
||||
113 9.2 3.79773 0.0255401 3.75032 0.419832 0.295402 0.652533
|
||||
114 9.3 0 0 0 0.594267 0.70396 0.836434
|
||||
115 9.4 0 0 0 0.174722 1.00483 1.42787
|
||||
116 9.5 0 0 0 0.0626835 0.518952 0.269158
|
||||
117 9.6 0 0 0 -0.302859 -0.265212 -0.0145578
|
||||
118 9.7 0 0 0 -0.114026 -0.201336 -0.539522
|
||||
119 9.8 0 0 0 0.104008 -0.30236 -0.0789062
|
||||
120 9.9 0 0 0 -0.0482778 -0.553118 0.45214
|
||||
121 10 0 0 0 -0.0554938 -0.402692 0.141112
|
||||
122 10.1 0 0 0 0.174338 0.556958 -0.0922154
|
||||
123 10.2 0 0 0 -1.06045 0.541565 -0.0409312
|
||||
124 10.3 0 0 0 -1.20782 0.464574 -0.413871
|
||||
125 10.4 0 0 0 -0.891701 0.327653 -0.286438
|
||||
126 10.5 0 0 0 0.231227 -0.064277 -0.89684
|
||||
127 10.6 -1.27989 -4.87365 9.40433 0.211278 0.230826 -1.23536
|
||||
128 10.7 -2.1001 -0.417817 1.17745 0.425856 0.078728 -1.44229
|
||||
129 10.8 0 0 0 0.30965 0.450884 -1.74985
|
||||
130 10.9 0 0 0 0.36735 0.990032 -1.19971
|
||||
131 11 0.253834 -1.84303 3.91828 1.01826 0.0660896 -0.481086
|
||||
132 11.1 0 0 0 0.744006 0.0906555 -0.897417
|
||||
133 11.2 0 0 0 0.339073 0.361038 -0.545084
|
||||
134 11.3 -1.9974 -0.431998 3.46296 0.611295 0.17282 0.0341483
|
||||
135 11.4 0 0 0 -0.491432 -0.958871 1.28001
|
||||
136 11.5 0 0 0 0.0431048 -1.50924 1.24037
|
||||
137 11.6 0 0 0 -0.684419 -0.0163951 1.06179
|
||||
138 11.7 0 0 0 -0.425278 -0.127741 0.757298
|
||||
139 11.8 -2.09164 0.00894897 2.22812 -0.0955178 -0.310572 0.661289
|
||||
140 11.9 0 0 0 0.156959 -0.233409 0.802568
|
||||
141 12 0 0 0 -0.05541 -0.346448 0.541571
|
||||
142 12.1 0 0 0 0.706767 0.182767 0.25767
|
||||
143 12.2 0 0 0 0.4791 0.464612 -0.212887
|
||||
144 12.3 0 0 0 0.81454 0.440323 -0.461359
|
||||
145 12.4 0 0 0 -0.110025 0.200698 -0.996706
|
||||
146 12.5 0 0 0 -0.149791 0.165599 -1.02233
|
||||
147 12.6 0 0 0 -0.170933 0.0644682 -0.866174
|
||||
148 12.7 0 0 0 -0.122869 -0.0196287 -0.801348
|
||||
149 12.8 0 0 0 -0.0693832 -0.0673091 -0.382802
|
||||
150 12.9 0 0 0 -0.0693832 -0.0673091 -0.382802
|
||||
151 13 0 0 0 -0.0693832 -0.0673091 -0.382802
|
||||
152 13.1 0 0 0 -0.0693832 -0.0673091 -0.382802
|
||||
153 13.2 0 0 0 -0.0693832 -0.0673091 -0.382802
|
||||
154 13.3 0 0 0 -0.0693832 -0.0673091 -0.382802
|
||||
155 13.4 0 0 0 -0.0693832 -0.0673091 -0.382802
|
||||
156 13.5 0 0 0 -0.000502433 0.000137492 -0.227425
|
||||
157 13.6 0 0 0 -0.000502433 0.000137492 -0.227425
|
||||
158 13.7 0 0 0 -0.000502433 0.000137492 -0.227425
|
||||
159 13.8 0 0 0 -0.000502433 0.000137492 -0.227425
|
||||
160 13.9 0 0 0 -0.000502433 0.000137492 -0.227425
|
||||
161 14 0 0 0 -0.000502433 0.000137492 -0.227425
|
||||
162 14.1 0 0 0 -0.000502433 0.000137492 -0.227425
|
||||
163 14.2 0 0 0 -0.000502433 0.000137492 -0.227425
|
||||
164 14.3 0 0 0 5.79042e-05 4.68687e-05 -0.00286094
|
||||
165 14.4 0 0 0 5.79042e-05 4.68687e-05 -0.00286094
|
||||
166 14.5 0 0 0 5.79042e-05 4.68687e-05 -0.00286094
|
||||
167 14.6 0 0 0 5.79042e-05 4.68687e-05 -0.00286094
|
||||
168 14.7 0 0 0 5.79042e-05 4.68687e-05 -0.00286094
|
||||
169 14.8 0 0 0 5.79042e-05 4.68687e-05 -0.00286094
|
||||
170 14.9 0 0 0 5.79042e-05 4.68687e-05 -0.00286094
|
||||
171 15 0 0 0 5.79042e-05 4.68687e-05 -0.00286094
|
||||
172 15.1 0 0 0 0 0 0
|
||||
173 15.2 0 0 0 0 0 0
|
||||
174 15.3 0 0 0 0 0 0
|
||||
175 15.4 0 0 0 0 0 0
|
||||
176 15.5 0 0 0 0 0 0
|
||||
177 15.6 0 0 0 0 0 0
|
||||
178 15.7 0 0 0 0 0 0
|
||||
179 15.8 0 0 0 0 0 0
|
||||
180 15.9 0 0 0 0 0 0
|
||||
181 16 0 0 0 0 0 0
|
|
@ -0,0 +1,3 @@
|
|||
# Time-averaged data for fix mopz0t
|
||||
# TimeStep c_mopz0[1] c_mopz0[2] c_mopz0[3] c_mopz0[4] c_mopz0[5] c_mopz0[6]
|
||||
0 1.62633 1.08234 0.844097 1.18575 -0.408792 -0.752394
|
|
@ -0,0 +1,25 @@
|
|||
LAMMPS Description
|
||||
|
||||
8 atoms
|
||||
|
||||
2 atom types
|
||||
|
||||
0 1 xlo xhi
|
||||
0 1 ylo yhi
|
||||
0 1 zlo zhi
|
||||
|
||||
Masses
|
||||
|
||||
1 22.98976928
|
||||
2 35.45
|
||||
|
||||
Atoms
|
||||
|
||||
1 2 1 0.25 0.25 0.25
|
||||
2 1 -1 0.75 0.25 0.25
|
||||
3 1 -1 0.25 0.75 0.25
|
||||
4 2 1 0.75 0.75 0.25
|
||||
5 1 -1 0.25 0.25 0.75
|
||||
6 2 1 0.75 0.25 0.75
|
||||
7 2 1 0.25 0.75 0.75
|
||||
8 1 -1 0.75 0.75 0.75
|
|
@ -0,0 +1,316 @@
|
|||
LAMMPS Description
|
||||
|
||||
300 atoms
|
||||
|
||||
1 atom types
|
||||
|
||||
0 10 xlo xhi
|
||||
0 10 ylo yhi
|
||||
0 10 zlo zhi
|
||||
|
||||
Masses
|
||||
|
||||
1 1.0
|
||||
|
||||
Atoms
|
||||
|
||||
1 1 1 0 0 4.5
|
||||
2 1 -1 0 0 5.5
|
||||
3 1 1 0 1 4.5
|
||||
4 1 -1 0 1 5.5
|
||||
5 1 1 0 2 4.5
|
||||
6 1 -1 0 2 5.5
|
||||
7 1 1 0 3 4.5
|
||||
8 1 -1 0 3 5.5
|
||||
9 1 1 0 4 4.5
|
||||
10 1 -1 0 4 5.5
|
||||
11 1 1 0 5 4.5
|
||||
12 1 -1 0 5 5.5
|
||||
13 1 1 0 6 4.5
|
||||
14 1 -1 0 6 5.5
|
||||
15 1 1 0 7 4.5
|
||||
16 1 -1 0 7 5.5
|
||||
17 1 1 0 8 4.5
|
||||
18 1 -1 0 8 5.5
|
||||
19 1 1 0 9 4.5
|
||||
20 1 -1 0 9 5.5
|
||||
21 1 1 1 0 4.5
|
||||
22 1 -1 1 0 5.5
|
||||
23 1 1 1 1 4.5
|
||||
24 1 -1 1 1 5.5
|
||||
25 1 1 1 2 4.5
|
||||
26 1 -1 1 2 5.5
|
||||
27 1 1 1 3 4.5
|
||||
28 1 -1 1 3 5.5
|
||||
29 1 1 1 4 4.5
|
||||
30 1 -1 1 4 5.5
|
||||
31 1 1 1 5 4.5
|
||||
32 1 -1 1 5 5.5
|
||||
33 1 1 1 6 4.5
|
||||
34 1 -1 1 6 5.5
|
||||
35 1 1 1 7 4.5
|
||||
36 1 -1 1 7 5.5
|
||||
37 1 1 1 8 4.5
|
||||
38 1 -1 1 8 5.5
|
||||
39 1 1 1 9 4.5
|
||||
40 1 -1 1 9 5.5
|
||||
41 1 1 2 0 4.5
|
||||
42 1 -1 2 0 5.5
|
||||
43 1 1 2 1 4.5
|
||||
44 1 -1 2 1 5.5
|
||||
45 1 1 2 2 4.5
|
||||
46 1 -1 2 2 5.5
|
||||
47 1 1 2 3 4.5
|
||||
48 1 -1 2 3 5.5
|
||||
49 1 1 2 4 4.5
|
||||
50 1 -1 2 4 5.5
|
||||
51 1 1 2 5 4.5
|
||||
52 1 -1 2 5 5.5
|
||||
53 1 1 2 6 4.5
|
||||
54 1 -1 2 6 5.5
|
||||
55 1 1 2 7 4.5
|
||||
56 1 -1 2 7 5.5
|
||||
57 1 1 2 8 4.5
|
||||
58 1 -1 2 8 5.5
|
||||
59 1 1 2 9 4.5
|
||||
60 1 -1 2 9 5.5
|
||||
61 1 1 3 0 4.5
|
||||
62 1 -1 3 0 5.5
|
||||
63 1 1 3 1 4.5
|
||||
64 1 -1 3 1 5.5
|
||||
65 1 1 3 2 4.5
|
||||
66 1 -1 3 2 5.5
|
||||
67 1 1 3 3 4.5
|
||||
68 1 -1 3 3 5.5
|
||||
69 1 1 3 4 4.5
|
||||
70 1 -1 3 4 5.5
|
||||
71 1 1 3 5 4.5
|
||||
72 1 -1 3 5 5.5
|
||||
73 1 1 3 6 4.5
|
||||
74 1 -1 3 6 5.5
|
||||
75 1 1 3 7 4.5
|
||||
76 1 -1 3 7 5.5
|
||||
77 1 1 3 8 4.5
|
||||
78 1 -1 3 8 5.5
|
||||
79 1 1 3 9 4.5
|
||||
80 1 -1 3 9 5.5
|
||||
81 1 1 4 0 4.5
|
||||
82 1 -1 4 0 5.5
|
||||
83 1 1 4 1 4.5
|
||||
84 1 -1 4 1 5.5
|
||||
85 1 1 4 2 4.5
|
||||
86 1 -1 4 2 5.5
|
||||
87 1 1 4 3 4.5
|
||||
88 1 -1 4 3 5.5
|
||||
89 1 1 4 4 4.5
|
||||
90 1 -1 4 4 5.5
|
||||
91 1 1 4 5 4.5
|
||||
92 1 -1 4 5 5.5
|
||||
93 1 1 4 6 4.5
|
||||
94 1 -1 4 6 5.5
|
||||
95 1 1 4 7 4.5
|
||||
96 1 -1 4 7 5.5
|
||||
97 1 1 4 8 4.5
|
||||
98 1 -1 4 8 5.5
|
||||
99 1 1 4 9 4.5
|
||||
100 1 -1 4 9 5.5
|
||||
101 1 1 5 0 4.5
|
||||
102 1 -1 5 0 5.5
|
||||
103 1 1 5 1 4.5
|
||||
104 1 -1 5 1 5.5
|
||||
105 1 1 5 2 4.5
|
||||
106 1 -1 5 2 5.5
|
||||
107 1 1 5 3 4.5
|
||||
108 1 -1 5 3 5.5
|
||||
109 1 1 5 4 4.5
|
||||
110 1 -1 5 4 5.5
|
||||
111 1 1 5 5 4.5
|
||||
112 1 -1 5 5 5.5
|
||||
113 1 1 5 6 4.5
|
||||
114 1 -1 5 6 5.5
|
||||
115 1 1 5 7 4.5
|
||||
116 1 -1 5 7 5.5
|
||||
117 1 1 5 8 4.5
|
||||
118 1 -1 5 8 5.5
|
||||
119 1 1 5 9 4.5
|
||||
120 1 -1 5 9 5.5
|
||||
121 1 1 6 0 4.5
|
||||
122 1 -1 6 0 5.5
|
||||
123 1 1 6 1 4.5
|
||||
124 1 -1 6 1 5.5
|
||||
125 1 1 6 2 4.5
|
||||
126 1 -1 6 2 5.5
|
||||
127 1 1 6 3 4.5
|
||||
128 1 -1 6 3 5.5
|
||||
129 1 1 6 4 4.5
|
||||
130 1 -1 6 4 5.5
|
||||
131 1 1 6 5 4.5
|
||||
132 1 -1 6 5 5.5
|
||||
133 1 1 6 6 4.5
|
||||
134 1 -1 6 6 5.5
|
||||
135 1 1 6 7 4.5
|
||||
136 1 -1 6 7 5.5
|
||||
137 1 1 6 8 4.5
|
||||
138 1 -1 6 8 5.5
|
||||
139 1 1 6 9 4.5
|
||||
140 1 -1 6 9 5.5
|
||||
141 1 1 7 0 4.5
|
||||
142 1 -1 7 0 5.5
|
||||
143 1 1 7 1 4.5
|
||||
144 1 -1 7 1 5.5
|
||||
145 1 1 7 2 4.5
|
||||
146 1 -1 7 2 5.5
|
||||
147 1 1 7 3 4.5
|
||||
148 1 -1 7 3 5.5
|
||||
149 1 1 7 4 4.5
|
||||
150 1 -1 7 4 5.5
|
||||
151 1 1 7 5 4.5
|
||||
152 1 -1 7 5 5.5
|
||||
153 1 1 7 6 4.5
|
||||
154 1 -1 7 6 5.5
|
||||
155 1 1 7 7 4.5
|
||||
156 1 -1 7 7 5.5
|
||||
157 1 1 7 8 4.5
|
||||
158 1 -1 7 8 5.5
|
||||
159 1 1 7 9 4.5
|
||||
160 1 -1 7 9 5.5
|
||||
161 1 1 8 0 4.5
|
||||
162 1 -1 8 0 5.5
|
||||
163 1 1 8 1 4.5
|
||||
164 1 -1 8 1 5.5
|
||||
165 1 1 8 2 4.5
|
||||
166 1 -1 8 2 5.5
|
||||
167 1 1 8 3 4.5
|
||||
168 1 -1 8 3 5.5
|
||||
169 1 1 8 4 4.5
|
||||
170 1 -1 8 4 5.5
|
||||
171 1 1 8 5 4.5
|
||||
172 1 -1 8 5 5.5
|
||||
173 1 1 8 6 4.5
|
||||
174 1 -1 8 6 5.5
|
||||
175 1 1 8 7 4.5
|
||||
176 1 -1 8 7 5.5
|
||||
177 1 1 8 8 4.5
|
||||
178 1 -1 8 8 5.5
|
||||
179 1 1 8 9 4.5
|
||||
180 1 -1 8 9 5.5
|
||||
181 1 1 9 0 4.5
|
||||
182 1 -1 9 0 5.5
|
||||
183 1 1 9 1 4.5
|
||||
184 1 -1 9 1 5.5
|
||||
185 1 1 9 2 4.5
|
||||
186 1 -1 9 2 5.5
|
||||
187 1 1 9 3 4.5
|
||||
188 1 -1 9 3 5.5
|
||||
189 1 1 9 4 4.5
|
||||
190 1 -1 9 4 5.5
|
||||
191 1 1 9 5 4.5
|
||||
192 1 -1 9 5 5.5
|
||||
193 1 1 9 6 4.5
|
||||
194 1 -1 9 6 5.5
|
||||
195 1 1 9 7 4.5
|
||||
196 1 -1 9 7 5.5
|
||||
197 1 1 9 8 4.5
|
||||
198 1 -1 9 8 5.5
|
||||
199 1 1 9 9 4.5
|
||||
200 1 -1 9 9 5.5
|
||||
201 1 -1 9.28495 2.13839 8.88019
|
||||
202 1 1 4.99281 4.17459 9.83905
|
||||
203 1 -1 4.91265 6.89408 2.39989
|
||||
204 1 1 4.43647 3.68895 8.86086
|
||||
205 1 -1 0.659075 7.07271 0.179131
|
||||
206 1 1 7.791 3.40021 0.969703
|
||||
207 1 -1 1.18008 3.63874 7.28751
|
||||
208 1 1 8.51522 5.24681 6.37702
|
||||
209 1 -1 4.24226 9.60726 3.16084
|
||||
210 1 1 8.43745 8.23344 9.2883
|
||||
211 1 -1 8.48509 8.84988 9.43407
|
||||
212 1 1 2.81127 8.9903 0.00909212
|
||||
213 1 -1 6.38283 6.20858 9.92482
|
||||
214 1 1 4.59962 5.7925 7.52571
|
||||
215 1 -1 7.03797 7.09336 8.15957
|
||||
216 1 1 6.68103 8.04734 7.95661
|
||||
217 1 -1 2.531 8.47145 1.6209
|
||||
218 1 1 6.71915 8.79876 9.59581
|
||||
219 1 -1 4.96758 0.0381298 0.827927
|
||||
220 1 1 9.22955 1.04572 0.84722
|
||||
221 1 -1 2.3224 2.57084 8.07306
|
||||
222 1 1 1.94283 3.17375 3.92051
|
||||
223 1 -1 2.34735 1.91295 1.29127
|
||||
224 1 1 3.33928 3.30688 0.892089
|
||||
225 1 -1 1.19738 4.40402 8.70835
|
||||
226 1 1 7.44541 4.94803 8.28211
|
||||
227 1 -1 5.93272 1.18886 1.56518
|
||||
228 1 1 8.50709 8.70343 1.24939
|
||||
229 1 -1 5.54016 3.38865 8.61698
|
||||
230 1 1 9.47644 0.573085 3.05941
|
||||
231 1 -1 9.39695 4.46542 1.84205
|
||||
232 1 1 3.52268 5.60212 0.333999
|
||||
233 1 -1 3.69009 9.40954 6.10446
|
||||
234 1 1 3.96836 6.15307 7.57803
|
||||
235 1 -1 2.02535 0.0418407 3.21642
|
||||
236 1 1 2.97488 8.79711 8.33242
|
||||
237 1 -1 2.4122 1.79458 3.04173
|
||||
238 1 1 9.72355 3.67773 1.52435
|
||||
239 1 -1 8.55216 6.1623 1.53201
|
||||
240 1 1 4.98973 2.41459 9.84381
|
||||
241 1 -1 8.8901 5.9006 1.97649
|
||||
242 1 1 9.09932 2.23783 1.42554
|
||||
243 1 -1 6.70722 8.21769 1.21953
|
||||
244 1 1 6.83768 0.84508 3.25165
|
||||
245 1 -1 0.222115 3.07945 0.51825
|
||||
246 1 1 0.503918 9.34932 6.25278
|
||||
247 1 -1 0.803159 8.7017 9.46211
|
||||
248 1 1 4.88636 5.00147 9.65639
|
||||
249 1 -1 1.62258 0.767285 9.63596
|
||||
250 1 1 2.70143 3.01111 7.74859
|
||||
251 1 -1 4.41574 5.31824 0.538729
|
||||
252 1 1 1.64724 5.18097 3.59205
|
||||
253 1 -1 2.33672 3.21408 6.6081
|
||||
254 1 1 7.46603 1.53668 9.09844
|
||||
255 1 -1 3.61269 8.44556 6.99789
|
||||
256 1 1 6.95465 6.83045 9.31002
|
||||
257 1 -1 5.91831 9.01549 3.4626
|
||||
258 1 1 6.56503 8.42229 3.27105
|
||||
259 1 -1 4.50822 9.59753 3.47025
|
||||
260 1 1 4.17357 5.27384 7.34774
|
||||
261 1 -1 7.70968 6.5292 3.54779
|
||||
262 1 1 4.7977 4.94239 6.24947
|
||||
263 1 -1 9.24016 9.36994 6.71263
|
||||
264 1 1 7.36888 8.75922 0.52403
|
||||
265 1 -1 9.92895 5.87551 6.21586
|
||||
266 1 1 3.86308 6.71601 9.69083
|
||||
267 1 -1 8.90048 0.298719 0.573852
|
||||
268 1 1 6.58753 6.67768 1.83984
|
||||
269 1 -1 8.672 0.367497 2.21864
|
||||
270 1 1 3.44519 3.30359 6.52249
|
||||
271 1 -1 7.24717 3.25113 3.41567
|
||||
272 1 1 9.53447 5.81336 1.79208
|
||||
273 1 -1 1.01722 6.42534 0.715
|
||||
274 1 1 3.58808 4.92392 7.00979
|
||||
275 1 -1 1.21399 3.56951 6.34505
|
||||
276 1 1 3.50336 0.942722 2.76989
|
||||
277 1 -1 9.45475 6.06299 0.659023
|
||||
278 1 1 3.44464 4.03075 6.20179
|
||||
279 1 -1 0.949331 5.40183 8.51385
|
||||
280 1 1 6.41118 2.62135 2.31132
|
||||
281 1 -1 3.58837 9.78355 7.04966
|
||||
282 1 1 9.2267 3.19593 2.10384
|
||||
283 1 -1 1.83092 2.35627 3.93061
|
||||
284 1 1 4.97203 4.92287 1.8049
|
||||
285 1 -1 7.4097 4.757 8.604
|
||||
286 1 1 0.746575 7.69038 0.89134
|
||||
287 1 -1 8.54862 6.59135 2.18888
|
||||
288 1 1 2.18747 4.82994 0.761718
|
||||
289 1 -1 5.71622 2.51116 6.85522
|
||||
290 1 1 6.95554 1.83187 8.31157
|
||||
291 1 -1 7.31818 6.60081 2.63208
|
||||
292 1 1 0.744495 2.73429 9.86022
|
||||
293 1 -1 5.1573 8.70962 2.53418
|
||||
294 1 1 2.40385 1.54057 1.9297
|
||||
295 1 -1 3.42609 2.25856 2.28437
|
||||
296 1 1 6.66173 3.70851 9.70052
|
||||
297 1 -1 7.88966 1.4343 8.91223
|
||||
298 1 1 3.91118 5.22253 6.29642
|
||||
299 1 -1 9.17618 3.98313 9.82158
|
||||
300 1 1 4.95424 5.93521 1.3652
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,37 @@
|
|||
# Point dipoles in a 2d box
|
||||
|
||||
units lj
|
||||
atom_style full
|
||||
|
||||
read_data data.NaCl
|
||||
|
||||
replicate 8 8 8
|
||||
|
||||
velocity all create 1.5 49893
|
||||
|
||||
neighbor 1.0 bin
|
||||
neigh_modify delay 0
|
||||
|
||||
fix 1 all nve
|
||||
|
||||
# LAMMPS computes pairwise and long-range Coulombics
|
||||
|
||||
#pair_style coul/long 3.0
|
||||
#pair_coeff * *
|
||||
#kspace_style pppm 1.0e-3
|
||||
|
||||
# Scafacos computes entire long-range Coulombics
|
||||
# use dummy pair style to perform atom sorting
|
||||
|
||||
pair_style zero 1.0
|
||||
pair_coeff * *
|
||||
|
||||
#fix 2 all scafacos p3m tolerance field 0.001
|
||||
|
||||
kspace_style scafacos p3m 0.001
|
||||
kspace_style scafacos tolerance field
|
||||
|
||||
timestep 0.005
|
||||
thermo 10
|
||||
|
||||
run 100
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue