Merge branch 'master' into next_lammps_version

This commit is contained in:
Axel Kohlmeyer 2020-04-14 21:36:59 -04:00
commit 5951555de4
No known key found for this signature in database
GPG Key ID: D9B44E93BF0C375A
254 changed files with 36308 additions and 7450 deletions

View File

@ -81,51 +81,29 @@ endif()
########################################################################
# User input options #
########################################################################
option(BUILD_EXE "Build lmp binary" ON)
if(BUILD_EXE)
set(LAMMPS_MACHINE "" CACHE STRING "Suffix to append to lmp binary (WON'T enable any features automatically")
mark_as_advanced(LAMMPS_MACHINE)
if(LAMMPS_MACHINE)
set(LAMMPS_MACHINE "_${LAMMPS_MACHINE}")
endif()
set(LAMMPS_BINARY lmp${LAMMPS_MACHINE})
set(LAMMPS_MACHINE "" CACHE STRING "Suffix to append to lmp binary (WON'T enable any features automatically")
mark_as_advanced(LAMMPS_MACHINE)
if(LAMMPS_MACHINE)
set(LAMMPS_MACHINE "_${LAMMPS_MACHINE}")
endif()
set(LAMMPS_BINARY lmp${LAMMPS_MACHINE})
option(BUILD_LIB "Build LAMMPS library" OFF)
if(BUILD_LIB)
option(BUILD_SHARED_LIBS "Build shared library" OFF)
if(BUILD_SHARED_LIBS) # for all pkg libs, mpi_stubs and linalg
set(CMAKE_POSITION_INDEPENDENT_CODE ON)
endif()
set(LAMMPS_LIB_SUFFIX "" CACHE STRING "Suffix to append to liblammps and pkg-config file")
mark_as_advanced(LAMMPS_LIB_SUFFIX)
if(LAMMPS_LIB_SUFFIX)
set(LAMMPS_LIB_SUFFIX "_${LAMMPS_LIB_SUFFIX}")
endif()
option(BUILD_SHARED_LIBS "Build shared library" OFF)
if(BUILD_SHARED_LIBS) # for all pkg libs, mpi_stubs and linalg
set(CMAKE_POSITION_INDEPENDENT_CODE ON)
endif()
option(BUILD_TOOLS "Build and install LAMMPS tools (msi2lmp, binary2txt, chain)" OFF)
include(GNUInstallDirs)
file(GLOB ALL_SOURCES ${LAMMPS_SOURCE_DIR}/[^.]*.cpp)
if(BUILD_LIB)
file(GLOB MAIN_SOURCES ${LAMMPS_SOURCE_DIR}/main.cpp)
list(REMOVE_ITEM ALL_SOURCES ${MAIN_SOURCES})
add_library(lammps ${ALL_SOURCES})
if(BUILD_EXE)
add_executable(lmp ${MAIN_SOURCES})
target_link_libraries(lmp PRIVATE lammps)
set_target_properties(lmp PROPERTIES OUTPUT_NAME ${LAMMPS_BINARY})
install(TARGETS lmp EXPORT LAMMPS_Targets DESTINATION ${CMAKE_INSTALL_BINDIR})
endif()
else()
if(NOT BUILD_EXE)
message(FATAL_ERROR "You need to at least enable one of two following options: BUILD_LIB or BUILD_EXE")
endif()
add_executable(lammps ${ALL_SOURCES})
set_target_properties(lammps PROPERTIES OUTPUT_NAME ${LAMMPS_BINARY})
install(TARGETS lammps DESTINATION ${CMAKE_INSTALL_BINDIR})
endif()
file(GLOB MAIN_SOURCES ${LAMMPS_SOURCE_DIR}/main.cpp)
list(REMOVE_ITEM ALL_SOURCES ${MAIN_SOURCES})
add_library(lammps ${ALL_SOURCES})
add_executable(lmp ${MAIN_SOURCES})
target_link_libraries(lmp PRIVATE lammps)
set_target_properties(lmp PROPERTIES OUTPUT_NAME ${LAMMPS_BINARY})
install(TARGETS lmp EXPORT LAMMPS_Targets DESTINATION ${CMAKE_INSTALL_BINDIR})
option(CMAKE_VERBOSE_MAKEFILE "Generate verbose Makefiles" OFF)
@ -185,10 +163,10 @@ else()
enable_language(C)
file(GLOB MPI_SOURCES ${LAMMPS_SOURCE_DIR}/STUBS/mpi.c)
add_library(mpi_stubs STATIC ${MPI_SOURCES})
if(BUILD_LIB AND NOT BUILD_SHARED_LIBS)
if(NOT BUILD_SHARED_LIBS)
install(TARGETS mpi_stubs EXPORT LAMMPS_Targets LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR})
endif()
set_target_properties(mpi_stubs PROPERTIES OUTPUT_NAME lammps_mpi_stubs${LAMMPS_LIB_SUFFIX})
set_target_properties(mpi_stubs PROPERTIES OUTPUT_NAME lammps_mpi_stubs${LAMMPS_MACHINE})
target_include_directories(mpi_stubs PUBLIC $<BUILD_INTERFACE:${LAMMPS_SOURCE_DIR}/STUBS> $<INSTALL_INTERFACE:${CMAKE_INSTALL_INCLUDEDIR}/lammps/mpi>)
install(FILES ${LAMMPS_SOURCE_DIR}/STUBS/mpi.h DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/lammps/mpi)
if(BUILD_SHARED_LIBS)
@ -274,10 +252,10 @@ if(PKG_MSCG OR PKG_USER-ATC OR PKG_USER-AWPMD OR PKG_USER-QUIP OR PKG_LATTE)
enable_language(Fortran)
file(GLOB LAPACK_SOURCES ${LAMMPS_LIB_SOURCE_DIR}/linalg/[^.]*.[fF])
add_library(linalg STATIC ${LAPACK_SOURCES})
if(BUILD_LIB AND NOT BUILD_SHARED_LIBS)
if(NOT BUILD_SHARED_LIBS)
install(TARGETS linalg EXPORT LAMMPS_Targets LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR})
endif()
set_target_properties(linalg PROPERTIES OUTPUT_NAME lammps_linalg${LAMMPS_LIB_SUFFIX})
set_target_properties(linalg PROPERTIES OUTPUT_NAME lammps_linalg${LAMMPS_MACHINE})
set(BLAS_LIBRARIES "$<TARGET_FILE:linalg>")
set(LAPACK_LIBRARIES "$<TARGET_FILE:linalg>")
else()
@ -435,10 +413,10 @@ foreach(SIMPLE_LIB POEMS USER-ATC USER-AWPMD USER-H5MD)
${LAMMPS_LIB_SOURCE_DIR}/${PKG_LIB}/[^.]*.c
${LAMMPS_LIB_SOURCE_DIR}/${PKG_LIB}/[^.]*.cpp)
add_library(${PKG_LIB} STATIC ${${PKG_LIB}_SOURCES})
if(BUILD_LIB AND NOT BUILD_SHARED_LIBS)
if(NOT BUILD_SHARED_LIBS)
install(TARGETS ${PKG_LIB} EXPORT LAMMPS_Targets LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR})
endif()
set_target_properties(${PKG_LIB} PROPERTIES OUTPUT_NAME lammps_${PKG_LIB}${LAMMPS_LIB_SUFFIX})
set_target_properties(${PKG_LIB} PROPERTIES OUTPUT_NAME lammps_${PKG_LIB}${LAMMPS_MACHINE})
target_link_libraries(lammps PRIVATE ${PKG_LIB})
if(PKG_LIB STREQUAL awpmd)
target_include_directories(awpmd PUBLIC ${LAMMPS_LIB_SOURCE_DIR}/awpmd/systems/interact ${LAMMPS_LIB_SOURCE_DIR}/awpmd/ivutils/include)
@ -535,44 +513,41 @@ list (FIND LANGUAGES "Fortran" _index)
if (${_index} GREATER -1)
target_link_libraries(lammps PRIVATE ${CMAKE_Fortran_IMPLICIT_LINK_LIBRARIES})
endif()
if(BUILD_LIB)
set(LAMMPS_CXX_HEADERS angle.h atom.h bond.h citeme.h comm.h compute.h dihedral.h domain.h error.h fix.h force.h group.h improper.h
input.h info.h kspace.h lammps.h lattice.h library.h lmppython.h lmptype.h memory.h modify.h neighbor.h neigh_list.h output.h
pair.h pointers.h region.h timer.h universe.h update.h variable.h)
if(LAMMPS_EXCEPTIONS)
list(APPEND LAMMPS_CXX_HEADERS exceptions.h)
endif()
set_target_properties(lammps PROPERTIES OUTPUT_NAME lammps${LAMMPS_LIB_SUFFIX})
set_target_properties(lammps PROPERTIES SOVERSION ${SOVERSION})
install(TARGETS lammps EXPORT LAMMPS_Targets LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR})
target_include_directories(lammps PUBLIC $<INSTALL_INTERFACE:${CMAKE_INSTALL_INCLUDEDIR}>)
file(MAKE_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/includes/lammps)
foreach(_HEADER ${LAMMPS_CXX_HEADERS})
add_custom_command(OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/includes/lammps/${_HEADER} COMMAND ${CMAKE_COMMAND} -E copy_if_different ${LAMMPS_SOURCE_DIR}/${_HEADER} ${CMAKE_CURRENT_BINARY_DIR}/includes/lammps/${_HEADER} DEPENDS ${LAMMPS_SOURCE_DIR}/${_HEADER})
add_custom_target(${_HEADER} DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/includes/lammps/${_HEADER})
add_dependencies(lammps ${_HEADER})
install(FILES ${LAMMPS_SOURCE_DIR}/${_HEADER} DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/lammps)
endforeach()
target_include_directories(lammps INTERFACE $<BUILD_INTERFACE:${CMAKE_CURRENT_BINARY_DIR}/includes>)
add_library(LAMMPS::lammps ALIAS lammps)
get_target_property(LAMMPS_DEFINES lammps INTERFACE_COMPILE_DEFINITIONS)
set(LAMMPS_API_DEFINES)
foreach(_DEF ${LAMMPS_DEFINES})
set(LAMMPS_API_DEFINES "${LAMMPS_API_DEFINES} -D${_DEF}")
endforeach()
configure_file(pkgconfig/liblammps.pc.in ${CMAKE_CURRENT_BINARY_DIR}/liblammps${LAMMPS_LIB_SUFFIX}.pc @ONLY)
install(FILES ${CMAKE_CURRENT_BINARY_DIR}/liblammps${LAMMPS_LIB_SUFFIX}.pc DESTINATION ${CMAKE_INSTALL_LIBDIR}/pkgconfig)
install(EXPORT LAMMPS_Targets FILE LAMMPS_Targets.cmake NAMESPACE LAMMPS:: DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/LAMMPS)
include(CMakePackageConfigHelpers)
configure_file(LAMMPSConfig.cmake.in ${CMAKE_CURRENT_BINARY_DIR}/LAMMPSConfig.cmake @ONLY)
write_basic_package_version_file("LAMMPSConfigVersion.cmake" VERSION ${PROJECT_VERSION} COMPATIBILITY ExactVersion)
install(FILES "${CMAKE_CURRENT_BINARY_DIR}/LAMMPSConfig.cmake" "${CMAKE_CURRENT_BINARY_DIR}/LAMMPSConfigVersion.cmake" DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/LAMMPS)
set(LAMMPS_CXX_HEADERS angle.h atom.h bond.h citeme.h comm.h compute.h dihedral.h domain.h error.h fix.h force.h group.h improper.h
input.h info.h kspace.h lammps.h lattice.h library.h lmppython.h lmptype.h memory.h modify.h neighbor.h neigh_list.h output.h
pair.h pointers.h region.h timer.h universe.h update.h variable.h)
if(LAMMPS_EXCEPTIONS)
list(APPEND LAMMPS_CXX_HEADERS exceptions.h)
endif()
if(BUILD_EXE)
install(FILES ${LAMMPS_DOC_DIR}/lammps.1 DESTINATION ${CMAKE_INSTALL_MANDIR}/man1 RENAME ${LAMMPS_BINARY}.1)
endif()
set_target_properties(lammps PROPERTIES OUTPUT_NAME lammps${LAMMPS_MACHINE})
set_target_properties(lammps PROPERTIES SOVERSION ${SOVERSION})
install(TARGETS lammps EXPORT LAMMPS_Targets LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR})
target_include_directories(lammps PUBLIC $<INSTALL_INTERFACE:${CMAKE_INSTALL_INCLUDEDIR}>)
file(MAKE_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/includes/lammps)
foreach(_HEADER ${LAMMPS_CXX_HEADERS})
add_custom_command(OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/includes/lammps/${_HEADER} COMMAND ${CMAKE_COMMAND} -E copy_if_different ${LAMMPS_SOURCE_DIR}/${_HEADER} ${CMAKE_CURRENT_BINARY_DIR}/includes/lammps/${_HEADER} DEPENDS ${LAMMPS_SOURCE_DIR}/${_HEADER})
add_custom_target(${_HEADER} DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/includes/lammps/${_HEADER})
add_dependencies(lammps ${_HEADER})
install(FILES ${LAMMPS_SOURCE_DIR}/${_HEADER} DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/lammps)
endforeach()
target_include_directories(lammps INTERFACE $<BUILD_INTERFACE:${CMAKE_CURRENT_BINARY_DIR}/includes>)
add_library(LAMMPS::lammps ALIAS lammps)
get_target_property(LAMMPS_DEFINES lammps INTERFACE_COMPILE_DEFINITIONS)
set(LAMMPS_API_DEFINES)
foreach(_DEF ${LAMMPS_DEFINES})
set(LAMMPS_API_DEFINES "${LAMMPS_API_DEFINES} -D${_DEF}")
endforeach()
configure_file(pkgconfig/liblammps.pc.in ${CMAKE_CURRENT_BINARY_DIR}/liblammps${LAMMPS_MACHINE}.pc @ONLY)
install(FILES ${CMAKE_CURRENT_BINARY_DIR}/liblammps${LAMMPS_MACHINE}.pc DESTINATION ${CMAKE_INSTALL_LIBDIR}/pkgconfig)
install(EXPORT LAMMPS_Targets FILE LAMMPS_Targets.cmake NAMESPACE LAMMPS:: DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/LAMMPS)
file(GLOB MODULE_FILES ${CMAKE_CURRENT_SOURCE_DIR}/Modules/Find*.cmake)
install(FILES ${MODULE_FILES} DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/LAMMPS)
include(CMakePackageConfigHelpers)
configure_file(LAMMPSConfig.cmake.in ${CMAKE_CURRENT_BINARY_DIR}/LAMMPSConfig.cmake @ONLY)
write_basic_package_version_file("LAMMPSConfigVersion.cmake" VERSION ${PROJECT_VERSION} COMPATIBILITY ExactVersion)
install(FILES "${CMAKE_CURRENT_BINARY_DIR}/LAMMPSConfig.cmake" "${CMAKE_CURRENT_BINARY_DIR}/LAMMPSConfigVersion.cmake" DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/LAMMPS)
install(FILES ${LAMMPS_DOC_DIR}/lammps.1 DESTINATION ${CMAKE_INSTALL_MANDIR}/man1 RENAME ${LAMMPS_BINARY}.1)
if(BUILD_TOOLS)
add_executable(binary2txt ${LAMMPS_TOOLS_DIR}/binary2txt.cpp)
@ -623,16 +598,19 @@ install(
# conventional build. Only available, if a shared library is built.
# This is primarily for people that only want to use the Python wrapper.
###############################################################################
if(BUILD_LIB AND BUILD_SHARED_LIBS)
if(BUILD_SHARED_LIBS)
if(CMAKE_VERSION VERSION_LESS 3.12)
find_package(PythonInterp) # Deprecated since version 3.12
if(PYTHONINTERP_FOUND)
set(Python_EXECUTABLE ${PYTHON_EXECUTABLE})
endif()
else()
find_package(Python COMPONENTS Interpreter)
endif()
if (PYTHON_EXECUTABLE)
if (Python_EXECUTABLE)
add_custom_target(
install-python
${PYTHON_EXECUTABLE} install.py -v ${LAMMPS_SOURCE_DIR}/version.h
${Python_EXECUTABLE} install.py -v ${LAMMPS_SOURCE_DIR}/version.h
-m ${LAMMPS_PYTHON_DIR}/lammps.py
-l ${CMAKE_BINARY_DIR}/liblammps${CMAKE_SHARED_LIBRARY_SUFFIX}
WORKING_DIRECTORY ${LAMMPS_PYTHON_DIR}
@ -653,14 +631,17 @@ endif()
# LAMMPS for package managers and with different prefix settings.
# This requires either a shared library or that the PYTHON package is included.
###############################################################################
if((BUILD_LIB AND BUILD_SHARED_LIBS) OR (PKG_PYTHON))
if(BUILD_SHARED_LIBS OR PKG_PYTHON)
if(CMAKE_VERSION VERSION_LESS 3.12)
find_package(PythonInterp) # Deprecated since version 3.12
if(PYTHONINTERP_FOUND)
set(Python_EXECUTABLE ${PYTHON_EXECUTABLE})
endif()
else()
find_package(Python COMPONENTS Interpreter)
endif()
if (PYTHON_EXECUTABLE)
execute_process(COMMAND ${PYTHON_EXECUTABLE}
if (Python_EXECUTABLE)
execute_process(COMMAND ${Python_EXECUTABLE}
-c "import distutils.sysconfig as cg; print(cg.get_python_lib(1,0,prefix='${CMAKE_INSTALL_PREFIX}'))"
OUTPUT_VARIABLE PYTHON_DEFAULT_INSTDIR OUTPUT_STRIP_TRAILING_WHITESPACE)
set(PYTHON_INSTDIR ${PYTHON_DEFAULT_INSTDIR} CACHE PATH "Installation folder for LAMMPS Python module")
@ -713,9 +694,7 @@ if (${_index} GREATER -1)
C Flags: ${CMAKE_C_FLAGS} ${CMAKE_C_FLAGS_${BTYPE}}")
endif()
message(STATUS "<<< Linker flags: >>>")
if(BUILD_EXE)
message(STATUS "Executable name: ${LAMMPS_BINARY}")
endif()
message(STATUS "Executable name: ${LAMMPS_BINARY}")
if(CMAKE_EXE_LINKER_FLAGS)
message(STATUS "Executable linker flags: ${CMAKE_EXE_LINKER_FLAGS}")
endif()

View File

@ -1,26 +1,17 @@
set(CMAKE_MODULE_PATH ${CMAKE_CURRENT_LIST_DIR})
include(CMakeFindDependencyMacro)
if(@BUILD_MPI@)
find_dependency(MPI REQUIRED CXX)
endif()
if(@PKG_KSPACE@)
if(@FFT@ STREQUAL "FFTW3")
if(@FFTW@ STREQUAL "FFTW3" AND NOT TARGET FFTW3::FFTW3)
add_library(FFTW3::FFTW3 UNKNOWN IMPORTED)
set_target_properties(FFTW3::FFTW3 PROPERTIES
IMPORTED_LINK_INTERFACE_LANGUAGES "C"
IMPORTED_LOCATION "@FFTW3_LIBRARY@"
INTERFACE_INCLUDE_DIRECTORIES "@FFTW3_INCLUDE_DIRS@")
endif()
if(@FFTW@ STREQUAL "FFTW3F" AND NOT TARGET FFTW3F::FFTW3F)
add_library(FFTW3F::FFTW3F UNKNOWN IMPORTED)
set_target_properties(FFTW3F::FFTW3F PROPERTIES
IMPORTED_LINK_INTERFACE_LANGUAGES "C"
IMPORTED_LOCATION "@FFTW3F_LIBRARY@"
INTERFACE_INCLUDE_DIRECTORIES "@FFTW3F_INCLUDE_DIRS@")
endif()
find_dependency(@FFTW@ REQUIRED MODULE)
endif()
endif()
if(NOT @BUILD_SHARED_LIBS@)
if(@BUILD_OMP@)
find_dependency(OpenMP REQUIRED)
endif()
if(@WITH_JPEG@)
find_dependency(JPEG REQUIRED)
endif()
@ -28,36 +19,20 @@ if(NOT @BUILD_SHARED_LIBS@)
find_dependency(PNG REQUIRED)
find_dependency(ZLIB REQUIRED)
endif()
if(@PKG_KIM@ AND NOT @DOWNLOAD_KIM@)
if(@PKG_KIM@)
find_dependency(PkgConfig REQUIRED)
pkg_check_modules(KIM-API REQUIRED IMPORTED_TARGET libkim-api>=@KIM-API_MIN_VERSION@)
if(@CURL_FOUND@)
find_dependency(CURL REQUIRED)
endif()
endif()
if(@PKG_USER-SMD@ AND NOT @DOWNLOAD_EIGEN3@)
if(@PKG_USER-SMD@)
find_dependency(Eigen3 NO_MODULE REQUIRED)
endif()
if(@PKG_KSPACE@)
if(@FFT@ STREQUAL "FFTW3" AND @FFT_FFTW_THREADS@)
if(@FFTW@ STREQUAL "FFTW3" AND NOT TARGET FFTW3::FFTW3_OMP)
add_library(FFTW3::FFTW3_OMP UNKNOWN IMPORTED)
set_target_properties(FFTW3::FFTW3_OMP PROPERTIES
IMPORTED_LINK_INTERFACE_LANGUAGES "C"
IMPORTED_LOCATION "@FFTW3_OMP_LIBRARY@"
INTERFACE_INCLUDE_DIRECTORIES "@FFTW3_OMP_INCLUDE_DIRS@")
endif()
if(@FFTW@ STREQUAL "FFTW3F" AND NOT TARGET FFTW3F::FFTW3F_OMP)
add_library(FFTW3F::FFTW3F_OMP UNKNOWN IMPORTED)
set_target_properties(FFTW3F::FFTW3F_OMP PROPERTIES
IMPORTED_LINK_INTERFACE_LANGUAGES "C"
IMPORTED_LOCATION "@FFTW3F_OMP_LIBRARY@"
INTERFACE_INCLUDE_DIRECTORIES "@FFTW3F_OMP_INCLUDE_DIRS@")
endif()
endif()
endif()
if(@PKG_USER-SCAFACOS@ AND NOT @DOWNLOAD_SCAFACOS@)
if(@PKG_USER-SCAFACOS@)
find_dependency(PkgConfig REQUIRED)
find_ependency(GSL REQUIRED)
find_dependency(MPI REQUIRED C Fortran)
pkg_check_modules(SCAFACOS REQUIRED IMPORTED_TARGET scafacos)
endif()
if(@PKG_PYTHON@ AND NOT CMAKE_VERSION VERSION_LESS 3.12)
@ -71,26 +46,42 @@ if(NOT @BUILD_SHARED_LIBS@)
find_dependency(Kokkos 3 REQUIRED)
endif()
endif()
if(@PKG_VORONOI@ AND NOT @DOWNLOAD_VORO@)
if(NOT TARGET VORO::VORO)
add_library(VORO::VORO UNKNOWN IMPORTED)
set_target_properties(VORO::VORO PROPERTIES
IMPORTED_LOCATION "@VORO_LIBRARY@"
INTERFACE_INCLUDE_DIRECTORIES "@VORO_INCLUDE_DIR@")
endif()
if(@PKG_VORONOI@)
find_dependency(VORO REQUIRED)
endif()
if(@PKG_USER-INTEL@)
if(@INTEL_LRT_MODE@ STREQUAL "THREADS")
find_dependency(Threads REQUIRED)
endif()
if(@TBB_MALLOC_FOUND@)
if(NOT TARGET TBB::TBB_MALLOC)
add_library(TBB::TBB_MALLOC UNKNOWN IMPORTED)
set_target_properties(TBB::TBB_MALLOC PROPERTIES
IMPORTED_LOCATION "@TBB_MALLOC_LIBRARY@"
INTERFACE_INCLUDE_DIRECTORIES "@TBB_MALLOC_INCLUDE_DIR@")
endif()
find_ependency(TBB_MALLOC REQUIRED)
endif()
endif()
if(@PKG_USER-ADIOS@)
find_ependency(ADIOS2 REQUIRED)
endif()
if(@PKG_LATTE@)
find_ependency(LATTE REQUIRED)
endif()
if(@PKG_MESSAGE@)
if(@MESSAGE_ZMQ@)
find_ependency(ZMQ REQUIRED)
endif()
endif()
if(@PKG_MSCG@)
find_ependency(GSL REQUIRED)
find_ependency(MSCG REQUIRED)
endif()
if(@USER-NETCDF@)
if(@NETCDF_FOUND@)
find_ependency(NetCDF REQUIRED)
endif()
if(@PNETCDF_FOUND@)
find_ependency(PNetCDF REQUIRED)
endif()
endif()
if(@PKG_QUIP@)
find_ependency(QUIP REQUIRED)
endif()
endif()
include("${CMAKE_CURRENT_LIST_DIR}/LAMMPS_Targets.cmake")

View File

@ -1,10 +1,8 @@
###############################################################################
# Build documentation
###############################################################################
option(BUILD_DOC "Build LAMMPS documentation" OFF)
option(BUILD_DOC "Build LAMMPS HTML documentation" OFF)
if(BUILD_DOC)
include(ProcessorCount)
ProcessorCount(NPROCS)
find_package(PythonInterp 3 REQUIRED)
set(VIRTUALENV ${PYTHON_EXECUTABLE} -m virtualenv)
@ -26,15 +24,49 @@ if(BUILD_DOC)
COMMAND ${DOCENV_BINARY_DIR}/pip install --upgrade ${LAMMPS_DOC_DIR}/utils/converters
)
# download mathjax distribution and unpack to folder "mathjax"
file(DOWNLOAD "https://github.com/mathjax/MathJax/archive/3.0.5.tar.gz"
"${CMAKE_CURRENT_BINARY_DIR}/mathjax.tar.gz"
EXPECTED_MD5 5d9d3799cce77a1a95eee6be04eb68e7)
if(NOT EXISTS ${CMAKE_CURRENT_BINARY_DIR}/mathjax)
execute_process(COMMAND ${CMAKE_COMMAND} -E tar xzf mathjax.tar.gz WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR})
file(GLOB MATHJAX_VERSION_DIR ${CMAKE_CURRENT_BINARY_DIR}/MathJax-*)
execute_process(COMMAND ${CMAKE_COMMAND} -E rename ${MATHJAX_VERSION_DIR} ${CMAKE_CURRENT_BINARY_DIR}/mathjax)
endif()
file(MAKE_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/html/_static/mathjax)
file(COPY ${CMAKE_CURRENT_BINARY_DIR}/mathjax/es5 DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/html/_static/mathjax/)
# note, this may run in parallel with other tasks, so we must not use multiple processes here
add_custom_command(
OUTPUT html
DEPENDS ${DOC_SOURCES} docenv requirements.txt
COMMAND ${DOCENV_BINARY_DIR}/sphinx-build -j ${NPROCS} -b html -c ${LAMMPS_DOC_DIR}/utils/sphinx-config -d ${CMAKE_BINARY_DIR}/doctrees ${LAMMPS_DOC_DIR}/src html
COMMAND ${DOCENV_BINARY_DIR}/sphinx-build -b html -c ${LAMMPS_DOC_DIR}/utils/sphinx-config -d ${CMAKE_BINARY_DIR}/doctrees ${LAMMPS_DOC_DIR}/src html
)
# copy selected image files to html output tree
file(MAKE_DIRECTORY ${CMAKE_BINARY_DIR}/html/JPG)
set(HTML_EXTRA_IMAGES balance_nonuniform.jpg balance_rcb.jpg
balance_uniform.jpg bow_tutorial_01.png bow_tutorial_02.png
bow_tutorial_03.png bow_tutorial_04.png bow_tutorial_05.png
dump1.jpg dump2.jpg examples_mdpd.gif gran_funnel.png gran_mixer.png
hop1.jpg hop2.jpg saed_ewald_intersect.jpg saed_mesh.jpg
screenshot_atomeye.jpg screenshot_gl.jpg screenshot_pymol.jpg
screenshot_vmd.jpg sinusoid.jpg xrd_mesh.jpg)
set(HTML_IMAGE_TARGETS "")
foreach(_IMG ${HTML_EXTRA_IMAGES})
string(PREPEND _IMG JPG/)
list(APPEND HTML_IMAGE_TARGETS "html/${_IMG}")
add_custom_command(
OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/html/${_IMG}
DEPENDS ${LAMMPS_DOC_DIR}/src/${_IMG} html/JPG
COMMAND ${CMAKE_COMMAND} -E copy ${LAMMPS_DOC_DIR}/src/${_IMG} ${CMAKE_BINARY_DIR}/html/${_IMG}
)
endforeach()
add_custom_target(
doc ALL
DEPENDS html
DEPENDS html html/_static/mathjax/es5 ${HTML_IMAGE_TARGETS}
SOURCES ${LAMMPS_DOC_DIR}/utils/requirements.txt ${DOC_SOURCES}
)

View File

@ -38,7 +38,7 @@ if(GPU_API STREQUAL "CUDA")
set(GPU_CUDA_MPS_FLAGS "-DCUDA_PROXY")
endif()
set(GPU_ARCH "sm_30" CACHE STRING "LAMMPS GPU CUDA SM primary architecture (e.g. sm_60)")
set(GPU_ARCH "sm_50" CACHE STRING "LAMMPS GPU CUDA SM primary architecture (e.g. sm_60)")
file(GLOB GPU_LIB_CU ${LAMMPS_LIB_SOURCE_DIR}/gpu/[^.]*.cu ${CMAKE_CURRENT_SOURCE_DIR}/gpu/[^.]*.cu)
list(REMOVE_ITEM GPU_LIB_CU ${LAMMPS_LIB_SOURCE_DIR}/gpu/lal_pppm.cu)
@ -96,7 +96,6 @@ if(GPU_API STREQUAL "CUDA")
endforeach()
set_directory_properties(PROPERTIES ADDITIONAL_MAKE_CLEAN_FILES "${LAMMPS_LIB_BINARY_DIR}/gpu/*_cubin.h")
add_library(gpu STATIC ${GPU_LIB_SOURCES} ${GPU_LIB_CUDPP_SOURCES} ${GPU_OBJS})
target_link_libraries(gpu PRIVATE ${CUDA_LIBRARIES} ${CUDA_CUDA_LIBRARY})
target_include_directories(gpu PRIVATE ${LAMMPS_LIB_BINARY_DIR}/gpu ${CUDA_INCLUDE_DIRS})
@ -113,7 +112,6 @@ if(GPU_API STREQUAL "CUDA")
target_link_libraries(nvc_get_devices PRIVATE ${CUDA_LIBRARIES} ${CUDA_CUDA_LIBRARY})
target_include_directories(nvc_get_devices PRIVATE ${CUDA_INCLUDE_DIRS})
elseif(GPU_API STREQUAL "OPENCL")
if(${CMAKE_SYSTEM_NAME} STREQUAL "Windows")
# download and unpack support binaries for compilation of windows binaries.
@ -213,7 +211,7 @@ elseif(GPU_API STREQUAL "HIP")
set(HIP_ARCH "gfx906" CACHE STRING "HIP target architecture")
elseif(HIP_PLATFORM STREQUAL "nvcc")
find_package(CUDA REQUIRED)
set(HIP_ARCH "sm_30" CACHE STRING "HIP primary CUDA architecture (e.g. sm_60)")
set(HIP_ARCH "sm_50" CACHE STRING "HIP primary CUDA architecture (e.g. sm_60)")
# build arch/gencode commands for nvcc based on CUDA toolkit version and use choice
# --arch translates directly instead of JIT, so this should be for the preferred or most common architecture
@ -356,10 +354,10 @@ RegisterStylesExt(${GPU_SOURCES_DIR} gpu GPU_SOURCES)
get_property(GPU_SOURCES GLOBAL PROPERTY GPU_SOURCES)
target_link_libraries(gpu PRIVATE MPI::MPI_CXX)
if(BUILD_LIB AND NOT BUILD_SHARED_LIBS)
if(NOT BUILD_SHARED_LIBS)
install(TARGETS gpu EXPORT LAMMPS_Targets LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR})
endif()
target_compile_definitions(gpu PRIVATE -DLAMMPS_${LAMMPS_SIZES})
set_target_properties(gpu PROPERTIES OUTPUT_NAME lammps_gpu${LAMMPS_LIB_SUFFIX})
set_target_properties(gpu PROPERTIES OUTPUT_NAME lammps_gpu${LAMMPS_MACHINE})
target_sources(lammps PRIVATE ${GPU_SOURCES})
target_include_directories(lammps PRIVATE ${GPU_SOURCES_DIR})

View File

@ -51,7 +51,7 @@ if(DOWNLOAD_KIM)
INTERFACE_INCLUDE_DIRECTORIES "${INSTALL_DIR}/include/kim-api")
target_link_libraries(lammps PRIVATE LAMMPS::KIM)
add_dependencies(LAMMPS::KIM kim_build)
if(BUILD_LIB AND NOT BUILD_SHARED_LIBS)
if(NOT BUILD_SHARED_LIBS)
install(CODE "MESSAGE(FATAL_ERROR \"Installing liblammps with downloaded libraries is currently not supported.\")")
endif()
else()

View File

@ -35,7 +35,7 @@ if(DOWNLOAD_KOKKOS)
INTERFACE_LINK_LIBRARIES ${CMAKE_DL_LIBS})
target_link_libraries(lammps PRIVATE LAMMPS::KOKKOS)
add_dependencies(LAMMPS::KOKKOS kokkos_build)
if(BUILD_LIB AND NOT BUILD_SHARED_LIBS)
if(NOT BUILD_SHARED_LIBS)
install(CODE "MESSAGE(FATAL_ERROR \"Installing liblammps with downloaded libraries is currently not supported.\")")
endif()
elseif(EXTERNAL_KOKKOS)

View File

@ -27,7 +27,7 @@ if(DOWNLOAD_LATTE)
INTERFACE_LINK_LIBRARIES "${LAPACK_LIBRARIES}")
target_link_libraries(lammps PRIVATE LAMMPS::LATTE)
add_dependencies(LAMMPS::LATTE latte_build)
if(BUILD_LIB AND NOT BUILD_SHARED_LIBS)
if(NOT BUILD_SHARED_LIBS)
install(CODE "MESSAGE(FATAL_ERROR \"Installing liblammps with downloaded libraries is currently not supported.\")")
endif()
else()

View File

@ -7,11 +7,11 @@ file(GLOB_RECURSE cslib_SOURCES ${LAMMPS_LIB_SOURCE_DIR}/message/cslib/[^.]*.F
${LAMMPS_LIB_SOURCE_DIR}/message/cslib/[^.]*.cpp)
add_library(cslib STATIC ${cslib_SOURCES})
if(BUILD_LIB AND NOT BUILD_SHARED_LIBS)
if(NOT BUILD_SHARED_LIBS)
install(TARGETS cslib EXPORT LAMMPS_Targets LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR})
endif()
target_compile_definitions(cslib PRIVATE -DLAMMPS_${LAMMPS_SIZES})
set_target_properties(cslib PROPERTIES OUTPUT_NAME lammps_cslib${LAMMPS_LIB_SUFFIX})
set_target_properties(cslib PROPERTIES OUTPUT_NAME lammps_cslib${LAMMPS_MACHINE})
if(BUILD_MPI)
target_compile_definitions(cslib PRIVATE -DMPI_YES)
set_target_properties(cslib PROPERTIES OUTPUT_NAME "csmpi")

View File

@ -35,7 +35,7 @@ if(DOWNLOAD_MSCG)
INTERFACE_LINK_LIBRARIES "${LAPACK_LIBRARIES}")
target_link_libraries(lammps PRIVATE LAMMPS::MSCG)
add_dependencies(LAMMPS::MSCG mscg_build)
if(BUILD_LIB AND NOT BUILD_SHARED_LIBS)
if(NOT BUILD_SHARED_LIBS)
install(CODE "MESSAGE(FATAL_ERROR \"Installing liblammps with downloaded libraries is currently not supported.\")")
endif()
else()

View File

@ -9,19 +9,19 @@ if(COLVARS_LEPTON)
set(LEPTON_DIR ${LAMMPS_LIB_SOURCE_DIR}/colvars/lepton)
file(GLOB LEPTON_SOURCES ${LEPTON_DIR}/src/[^.]*.cpp)
add_library(lepton STATIC ${LEPTON_SOURCES})
if(BUILD_LIB AND NOT BUILD_SHARED_LIBS)
if(NOT BUILD_SHARED_LIBS)
install(TARGETS lepton EXPORT LAMMPS_Targets LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR})
endif()
set_target_properties(lepton PROPERTIES OUTPUT_NAME lammps_lepton${LAMMPS_LIB_SUFFIX})
set_target_properties(lepton PROPERTIES OUTPUT_NAME lammps_lepton${LAMMPS_MACHINE})
target_include_directories(lepton PRIVATE ${LEPTON_DIR}/include)
endif()
add_library(colvars STATIC ${COLVARS_SOURCES})
if(BUILD_LIB AND NOT BUILD_SHARED_LIBS)
if(NOT BUILD_SHARED_LIBS)
install(TARGETS colvars EXPORT LAMMPS_Targets LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR})
endif()
target_compile_definitions(colvars PRIVATE -DLAMMPS_${LAMMPS_SIZES})
set_target_properties(colvars PROPERTIES OUTPUT_NAME lammps_colvars${LAMMPS_LIB_SUFFIX})
set_target_properties(colvars PROPERTIES OUTPUT_NAME lammps_colvars${LAMMPS_MACHINE})
target_include_directories(colvars PUBLIC ${LAMMPS_LIB_SOURCE_DIR}/colvars)
target_link_libraries(lammps PRIVATE colvars)

View File

@ -1,6 +1,6 @@
set(MOLFILE_INCLUDE_DIRS "${LAMMPS_LIB_SOURCE_DIR}/molfile" CACHE STRING "Path to VMD molfile plugin headers")
add_library(molfile INTERFACE)
if(BUILD_LIB AND NOT BUILD_SHARED_LIBS)
if(NOT BUILD_SHARED_LIBS)
install(TARGETS molfile EXPORT LAMMPS_Targets LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR})
endif()
target_include_directories(molfile INTERFACE ${MOLFILE_INCLUDE_DIRS})

View File

@ -70,7 +70,7 @@ if(DOWNLOAD_PLUMED)
ExternalProject_get_property(plumed_build INSTALL_DIR)
add_library(LAMMPS::PLUMED UNKNOWN IMPORTED)
add_dependencies(LAMMPS::PLUMED plumed_build)
if(BUILD_LIB AND NOT BUILD_SHARED_LIBS)
if(NOT BUILD_SHARED_LIBS)
install(CODE "MESSAGE(FATAL_ERROR \"Installing liblammps with downloaded libraries is currently not supported.\")")
endif()
if(PLUMED_MODE STREQUAL "STATIC")

View File

@ -1,15 +1,12 @@
enable_language(C)
if(NOT BUILD_LIB)
message(FATAL_ERROR "Building a QM/MM executable with USER-QMMM requires BUILD_LIB=yes")
endif()
if(NOT BUILD_SHARED_LIBS)
message(WARNING "It is recommended to use BUILD_SHARED_LIBS=yes with USER-QMMM")
endif()
add_library(qmmm STATIC ${LAMMPS_LIB_SOURCE_DIR}/qmmm/libqmmm.c)
if(BUILD_LIB AND NOT BUILD_SHARED_LIBS)
if(NOT BUILD_SHARED_LIBS)
install(TARGETS qmmm EXPORT LAMMPS_Targets LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR})
endif()
set_target_properties(qmmm PROPERTIES OUTPUT_NAME lammps_qmmm${LAMMPS_LIB_SUFFIX})
set_target_properties(qmmm PROPERTIES OUTPUT_NAME lammps_qmmm${LAMMPS_MACHINE})
target_link_libraries(lammps PRIVATE qmmm)
target_include_directories(qmmm PUBLIC ${LAMMPS_LIB_SOURCE_DIR}/qmmm)

View File

@ -52,7 +52,7 @@ if(DOWNLOAD_SCAFACOS)
INTERFACE_LINK_LIBRARIES "${INSTALL_DIR}/lib/libfcs.a;${INSTALL_DIR}/lib/libfcs_direct.a;${INSTALL_DIR}/lib/libfcs_ewald.a;${INSTALL_DIR}/lib/libfcs_fmm.a;${INSTALL_DIR}/lib/libfcs_p2nfft.a;${INSTALL_DIR}/lib/libfcs_p3m.a;GSL::gsl;${INSTALL_DIR}/lib/libfcs_near.a;${INSTALL_DIR}/lib/libfcs_gridsort.a;${INSTALL_DIR}/lib/libfcs_resort.a;${INSTALL_DIR}/lib/libfcs_redist.a;${INSTALL_DIR}/lib/libfcs_common.a;${INSTALL_DIR}/lib/libfcs_pnfft.a;${INSTALL_DIR}/lib/libfcs_pfft.a;${INSTALL_DIR}/lib/libfcs_fftw3_mpi.a;${INSTALL_DIR}/lib/libfcs_fftw3.a;MPI::MPI_Fortran;MPI::MPI_C")
target_link_libraries(lammps PRIVATE LAMMPS::SCAFACOS)
add_dependencies(LAMMPS::SCAFACOS scafacos_build)
if(BUILD_LIB AND NOT BUILD_SHARED_LIBS)
if(NOT BUILD_SHARED_LIBS)
install(CODE "MESSAGE(FATAL_ERROR \"Installing liblammps with downloaded libraries is currently not supported.\")")
endif()
else()

View File

@ -18,7 +18,7 @@ if(DOWNLOAD_EIGEN3)
set_target_properties(LAMMPS::EIGEN3 PROPERTIES INTERFACE_INCLUDE_DIRECTORIES "${SOURCE_DIR}")
target_link_libraries(lammps PRIVATE LAMMPS::EIGEN3)
add_dependencies(LAMMPS::EIGEN3 Eigen3_build)
if(BUILD_LIB AND NOT BUILD_SHARED_LIBS)
if(NOT BUILD_SHARED_LIBS)
install(CODE "MESSAGE(FATAL_ERROR \"Installing liblammps with downloaded libraries is currently not supported.\")")
endif()
else()

View File

@ -35,7 +35,7 @@ if(DOWNLOAD_VORO)
INTERFACE_INCLUDE_DIRECTORIES "${SOURCE_DIR}/src")
target_link_libraries(lammps PRIVATE LAMMPS::VORO)
add_dependencies(LAMMPS::VORO voro_build)
if(BUILD_LIB AND NOT BUILD_SHARED_LIBS)
if(NOT BUILD_SHARED_LIBS)
install(CODE "MESSAGE(FATAL_ERROR \"Installing liblammps with downloaded libraries is currently not supported.\")")
endif()
else()

View File

@ -2,7 +2,7 @@
# Testing
###############################################################################
option(ENABLE_TESTING "Enable testing" OFF)
if(ENABLE_TESTING AND BUILD_EXE)
if(ENABLE_TESTING)
enable_testing()
option(LAMMPS_TESTING_SOURCE_DIR "Location of lammps-testing source directory" "")
option(LAMMPS_TESTING_GIT_TAG "Git tag of lammps-testing" "master")

File diff suppressed because it is too large Load Diff

View File

@ -27,6 +27,6 @@ Description: Large-scale Atomic/Molecular Massively Parallel Simulator Library
URL: http://lammps.sandia.gov
Version: @PROJECT_VERSION@
Requires:
Libs: -L${libdir} -llammps@LAMMPS_LIB_SUFFIX@
Libs: -L${libdir} -llammps@LAMMPS_MACHINE@
Libs.private: -lm
Cflags: -I${includedir} @LAMMPS_API_DEFINES@

View File

@ -57,7 +57,7 @@ help:
# ------------------------------------------
clean-all: clean
rm -rf $(BUILDDIR)/docenv $(BUILDDIR)/doctrees $(BUILDDIR)/mathjax
rm -rf $(BUILDDIR)/docenv $(BUILDDIR)/doctrees $(BUILDDIR)/mathjax Manual.pdf Developer.pdf
clean: clean-spelling
rm -rf html epub latex
@ -83,7 +83,7 @@ html: $(ANCHORCHECK) $(MATHJAX)
@rm -rf html/JPG
@cp -r src/PDF html/PDF
@mkdir -p html/JPG
@cp `grep -A2 '\.\. .*image::' src/*.rst | grep ':target:' | sed -e 's,.*:target: JPG/,src/JPG/,' | sort | uniq` html/JPG/
@cp `grep -A2 '\.\. .*\(image\|figure\)::' src/*.rst | grep ':target:' | sed -e 's,.*:target: JPG/,src/JPG/,' | sort | uniq` html/JPG/
@rm -rf html/PDF/.[sg]*
@mkdir -p html/_static/mathjax
@cp -r $(MATHJAX)/es5 html/_static/mathjax/
@ -194,7 +194,7 @@ $(VENV):
$(VIRTUALENV) -p $(PYTHON) $(VENV); \
. $(VENV)/bin/activate; \
pip install --upgrade pip; \
pip install Sphinx==2.4.4; \
pip install Sphinx; \
pip install sphinxcontrib-spelling ;\
pip install breathe; \
deactivate;\

2
doc/src/.gitignore vendored
View File

@ -1,3 +1 @@
/Eqs
/JPG
/false_positives.txt

View File

@ -1,12 +1,14 @@
Build LAMMPS
************
LAMMPS can be built as an executable or library from source code via
either traditional makefiles (which may require manual editing)
for use with GNU make or gmake, or a build environment generated by CMake
(Unix Makefiles, Xcode, Visual Studio, KDevelop or more). As an
alternative you can download a package with pre-built executables
as described on the :doc:`Install <Install>` doc page.
LAMMPS is built as a library and an executable from source code using
either traditional makefiles for use with GNU make (which may require
manual editing), or using a build environment generated by CMake (Unix
Makefiles, Ninja, Xcode, Visual Studio, KDevelop, CodeBlocks and more).
As an alternative you can download a package with pre-built executables
or automated build trees as described on the :doc:`Install <Install>`
doc page.
.. toctree::
:maxdepth: 1

View File

@ -6,7 +6,8 @@ CMake and make:
* :ref:`Serial vs parallel build <serial>`
* :ref:`Choice of compiler and compile/link options <compile>`
* :ref:`Build LAMMPS as an executable or a library <exe>`
* :ref:`Build the LAMMPS executable and library <exe>`
* :ref:`Including and removing debug support <debug>`
* :ref:`Build the LAMMPS documentation <doc>`
* :ref:`Install LAMMPS after a build <install>`
@ -15,7 +16,7 @@ CMake and make:
.. _serial:
Serial vs parallel build
-------------------------------------
------------------------
LAMMPS is written to use the ubiquitous `MPI (Message Passing Interface)
<https://en.wikipedia.org/wiki/Message_Passing_Interface>`_ library API
@ -28,8 +29,8 @@ MPI STUBS library.
Independent of the distributed memory MPI parallelization, parts of
LAMMPS are also written with support for shared memory parallelization
using the OpenMP threading standard. A more detailed discussion of that
is below.
using the `OpenMP <https://en.wikipedia.org/wiki/OpenMP>`_ threading
standard. A more detailed discussion of that is below.
**CMake build**\ :
@ -41,7 +42,7 @@ is below.
# no default value
The executable created by CMake (after running make) is named ``lmp`` unless
the LAMMPS_MACHINE option is set. When setting ``LAMMPS_MACHINE=name``
the ``LAMMPS_MACHINE`` option is set. When setting ``LAMMPS_MACHINE=name``
the executable will be called ``lmp_name``. Using ``BUILD_MPI=no`` will
enforce building a serial executable using the MPI STUBS library.
@ -55,18 +56,20 @@ The build with traditional makefiles has to be done inside the source folder ``s
make serial # serial build, produces lmp_serial using Makefile/serial
make mybox # uses Makefile.mybox to produce lmp_mybox
Any "make machine" command will look up the make settings from a file
Makefile.machine, create a folder Obj_machine with all objects and
generated files and an executable called ``lmp_machine``\ . The standard
parallel build with ``make mpi`` assumes a standard MPI installation with
MPI compiler wrappers where all necessary compiler and linker flags to
get access and link with the suitable MPI headers and libraries are set
by the wrapper programs. For other cases or the serial build, you have
to adjust the make file variables ``MPI_INC``, ``MPI_PATH``, ``MPI_LIB``
as well as ``CC`` and ``LINK``\ . To enable OpenMP threading usually
a compiler specific flag needs to be added to the compile and link
commands. For the GNU compilers, this is ``-fopenmp``\ , which can be
added to the ``CC`` and ``LINK`` makefile variables.
Any ``make machine`` command will look up the make settings from a file
``Makefile.machine`` in the folder ``src/MAKE`` or one of its
sub-directories ``MINE``, ``MACHINES``, or ``OPTIONS``, create a folder
``Obj_machine`` with all objects and generated files and an executable
called ``lmp_machine``\ . The standard parallel build with ``make mpi``
assumes a standard MPI installation with MPI compiler wrappers where all
necessary compiler and linker flags to get access and link with the
suitable MPI headers and libraries are set by the wrapper programs. For
other cases or the serial build, you have to adjust the make file
variables ``MPI_INC``, ``MPI_PATH``, ``MPI_LIB`` as well as ``CC`` and
``LINK``\ . To enable OpenMP threading usually a compiler specific flag
needs to be added to the compile and link commands. For the GNU
compilers, this is ``-fopenmp``\ , which can be added to the ``CC`` and
``LINK`` makefile variables.
For the serial build the following make variables are set (see src/MAKE/Makefile.serial):
@ -80,18 +83,19 @@ For the serial build the following make variables are set (see src/MAKE/Makefile
You also need to build the STUBS library for your platform before making
LAMMPS itself. A ``make serial`` build does this for you automatically,
otherwise, type ``make mpi-stubs`` from the src directory, or ``make`` from
the src/STUBS dir. If the build fails, you will need to edit the
STUBS/Makefile for your platform. The stubs library does not provide
MPI/IO functions required by some LAMMPS packages, e.g. MPIIO or USER-LB,
and thus is not compatible with those packages.
otherwise, type ``make mpi-stubs`` from the src directory, or ``make``
from the ``src/STUBS`` dir. If the build fails, you may need to edit
the ``STUBS/Makefile`` for your platform. The stubs library does not
provide MPI/IO functions required by some LAMMPS packages,
e.g. ``MPIIO`` or ``USER-LB``, and thus is not compatible with those
packages.
.. note::
The file ``src/STUBS/mpi.c`` provides a CPU timer function called
MPI_Wtime() that calls gettimeofday() . If your operating system
does not support gettimeofday() , you will need to insert code to
call another timer. Note that the ANSI-standard function clock()
``MPI_Wtime()`` that calls ``gettimeofday()``. If your operating system
does not support ``gettimeofday()``, you will need to insert code to
call another timer. Note that the ANSI-standard function ``clock()``
rolls over after an hour or so, and is therefore insufficient for
timing long LAMMPS simulations.
@ -111,20 +115,22 @@ self-installed MPICH or OpenMPI, so you should study the provided
documentation to find out how to build and link with it.
The majority of OpenMP (threading) support in LAMMPS is provided by the
USER-OMP package; see the :doc:`Speed omp <Speed_omp>` doc page for
details. The USER-INTEL package also includes OpenMP threading (it is
compatible with USER-OMP) and adds vectorization support when compiled
with compatible compilers, in particular the Intel compilers on top of
OpenMP. Also, the KOKKOS package can be compiled to include OpenMP
threading.
``USER-OMP`` package; see the :doc:`Speed omp <Speed_omp>` doc page for
details. The ``USER-INTEL`` package also includes OpenMP threading (it
is compatible with ``USER-OMP`` and will usually fall back on styles
from that package, if a ``USER-INTEL`` does not exist) and adds
vectorization support when compiled with compatible compilers, in
particular the Intel compilers on top of OpenMP. Also, the ``KOKKOS``
package can be compiled to include OpenMP threading.
In addition, there are a few commands in LAMMPS that have native OpenMP
support included as well. These are commands in the MPIIO, SNAP,
USER-DIFFRACTION, and USER-DPD packages. In addition some packages
support OpenMP threading indirectly through the libraries they interface
to: e.g. LATTE and USER-COLVARS. See the :doc:`Packages details
<Packages_details>` doc page for more info on these packages and the doc
pages for their respective commands for OpenMP threading info.
support included as well. These are commands in the ``MPIIO``,
``SNAP``, ``USER-DIFFRACTION``, and ``USER-DPD`` packages. In addition
some packages support OpenMP threading indirectly through the libraries
they interface to: e.g. ``LATTE``, ``KSPACE``, and ``USER-COLVARS``.
See the :doc:`Packages details <Packages_details>` doc page for more
info on these packages and the doc pages for their respective commands
for OpenMP threading info.
For CMake, if you use ``BUILD_OMP=yes``, you can use these packages
and turn on their native OpenMP support and turn on their native OpenMP
@ -159,15 +165,18 @@ and Intel compilers.
Choice of compiler and compile/link options
---------------------------------------------------------
The choice of compiler and compiler flags can be important for
The choice of compiler and compiler flags can be important for maximum
performance. Vendor provided compilers for a specific hardware can
produce faster code than open-source compilers like the GNU compilers.
On x86 hardware most popular compilers are quite similar in performance
of C/C++ code at high optimization levels. When using the USER-INTEL
package, there is a distinct advantage in using the `Intel C++ compiler
<intel_>`_ due to much improved vectorization through SSE and AVX
instructions on compatible hardware as the source code includes changes
and compiler directives to enable high degrees of vectorization.
On the most common x86 hardware most popular C++ compilers are quite
similar in performance of C/C++ code at high optimization levels. When
using the ``USER-INTEL`` package, there is a distinct advantage in using
the `Intel C++ compiler <intel_>`_ due to much improved vectorization
through SSE and AVX instructions on compatible hardware as the source
code includes changes and Intel compiler specific directives to enable
high degrees of vectorization. This may change over time as equivalent
vectorization directives are included into OpenMP standard revisions and
other compilers adopt them.
.. _intel: https://software.intel.com/en-us/intel-compilers
@ -178,17 +187,20 @@ LAMMPS.
**CMake build**\ :
By default CMake will use a compiler it finds and it will add
optimization flags appropriate to that compiler and any
:doc:`accelerator packages <Speed_packages>` you have included in the
build.
By default CMake will use a compiler it finds according to internal
preferences and it will add optimization flags appropriate to that
compiler and any :doc:`accelerator packages <Speed_packages>` you have
included in the build.
You can tell CMake to look for a specific compiler with these variable
settings. Likewise you can specify the corresponding ``CMAKE_*_FLAGS``
variables if you want to experiment with alternate optimization flags.
You should specify all 3 compilers, so that the small number of LAMMPS
source files written in C or Fortran are built with a compiler consistent
with the one used for all the C++ files:
You can tell CMake to look for a specific compiler with setting CMake
variable during configuration. For a few common choices, there are also
presets in the ``cmake/presets`` folder. For convenience, there is a
``CMAKE_TUNE_FLAGS`` variable that can be set to apply global compiler
options. More on that below, but you can also specify the corresponding
``CMAKE_*_FLAGS`` variables individually if you want to experiment with
alternate optimization flags. You should specify all 3 compilers, so
that the (few) LAMMPS source files written in C or Fortran are built
with a compiler consistent with the one used for the C++ files:
.. code-block:: bash
@ -211,16 +223,17 @@ A few example command lines are:
# Building with LLVM/Clang Compilers:
cmake ../cmake -DCMAKE_C_COMPILER=clang -DCMAKE_CXX_COMPILER=clang++ -DCMAKE_Fortran_COMPILER=flang
For compiling with the Clang/LLVM compilers a special CMake preset is
included that can be loaded with `-C ../cmake/presets/clang.cmake`.
For compiling with the Clang/LLVM compilers a CMake preset is provided that
can be loaded with `-C ../cmake/presets/clang.cmake`. Similarly,
`-C ../cmake/presets/intel.cmake` should switch the
In addition you can set ``CMAKE_TUNE_FLAGS`` to specifically add compiler
flags to tune for optimal performance on given hosts. By default these are
initialized to some compiler specific flags, where known, to optimize the
LAMMPS executable with optimizations and instructions available on the host
where LAMMPS is compiled. For example, for Intel compilers this would be
``-xHost`` and for GNU compilers this would be ``-march=native``. To turn
these flags off, set ``-D CMAKE_TUNE_FLAGS=``.
In addition you can set ``CMAKE_TUNE_FLAGS`` to specifically add
compiler flags to tune for optimal performance on given hosts. By
default these are initialized to some compiler specific flags, to
optimize the LAMMPS executable with optimizations and instructions
available on the host where LAMMPS is compiled. For example, for Intel
compilers this would be ``-xHost`` and for GNU compilers this would be
``-march=native``. To turn these flags off, do ``-D CMAKE_TUNE_FLAGS=``.
.. note::
@ -236,11 +249,11 @@ these flags off, set ``-D CMAKE_TUNE_FLAGS=``.
The "compiler/linker settings" section of a Makefile.machine lists
compiler and linker settings for your C++ compiler, including
optimization flags. For a parallel build it is recommended to use
mpicxx or mpiCC, since these compiler wrappers will include a variety of
settings appropriate for your MPI installation and thus avoiding the
guesswork of finding the right flags.
``mpicxx`` or ``mpiCC``, since these compiler wrappers will include a
variety of settings appropriate for your MPI installation and thus
avoiding the guesswork of finding the right flags.
Parallel build (see src/MAKE/Makefile.mpi):
Parallel build (see ``src/MAKE/Makefile.mpi``):
.. code-block:: bash
@ -249,7 +262,7 @@ Parallel build (see src/MAKE/Makefile.mpi):
LINK = mpicxx
LINKFLAGS = -g -O
Serial build (see src/MAKE/Makefile.serial):
Serial build with GNU gcc (see ``src/MAKE/Makefile.serial``):
.. code-block:: make
@ -258,17 +271,35 @@ Serial build (see src/MAKE/Makefile.serial):
LINK = g++
LINKFLAGS = -g -O
.. note::
If you build LAMMPS with any :doc:`accelerator packages <Speed_packages>`
included, there may be specific optimization flags that are either
required or recommended to enable required features and to achieve
optimal performance. You need to include these in the CCFLAGS and
LINKFLAGS settings above. For details, see the individual package
doc pages listed on the :doc:`Speed packages <Speed_packages>` doc
page. Or examine these files in the src/MAKE/OPTIONS directory.
They correspond to each of the 5 accelerator packages and their
hardware variants:
If compilation stops with a message like the following:
.. code-block::
g++ -g -O3 -DLAMMPS_GZIP -DLAMMPS_MEMALIGN=64 -I../STUBS -c ../main.cpp
In file included from ../pointers.h:24:0,
from ../input.h:17,
from ../main.cpp:16:
../lmptype.h:34:2: error: #error LAMMPS requires a C++11 (or later) compliant compiler. Enable C++11 compatibility or upgrade the compiler.
then you have either an unsupported (old) compiler or you have to
turn on C++11 mode. The latter applies to GCC 4.8.x shipped with
RHEL 7.x and CentOS 7.x. For those compilers, you need to add the
``-std=c++11`` flag. Otherwise, you would have to install a newer
compiler that supports C++11; either as a binary package or through
compiling from source.
If you build LAMMPS with any :doc:`accelerator packages
<Speed_packages>` included, there may be specific optimization flags
that are either required or recommended to enable required features and
to achieve optimal performance. You need to include these in the
CCFLAGS and LINKFLAGS settings above. For details, see the individual
package doc pages listed on the :doc:`Speed packages <Speed_packages>`
doc page. Or examine these files in the src/MAKE/OPTIONS directory.
They correspond to each of the 5 accelerator packages and their hardware
variants:
.. code-block:: bash
@ -285,69 +316,74 @@ Serial build (see src/MAKE/Makefile.serial):
.. _exe:
Build LAMMPS as an executable or a library
----------------------------------------------------
Build the LAMMPS executable and library
---------------------------------------
LAMMPS can be built as either an executable or as a static or shared
library. The LAMMPS library can be called from another application or
a scripting language. See the :doc:`Howto couple <Howto_couple>` doc
LAMMPS is always built as a library of C++ classes plus an executable.
The executable is a simple ``main()`` function that sets up MPI and then
creates a LAMMPS class instance from the LAMMPS library, which
will then process commands provided via a file or from the console
input. The LAMMPS library can also be called from another application
or a scripting language. See the :doc:`Howto couple <Howto_couple>` doc
page for more info on coupling LAMMPS to other codes. See the
:doc:`Python <Python_head>` doc page for more info on wrapping and
running LAMMPS from Python via its library interface.
**CMake build**\ :
For CMake builds, you can select through setting CMake variables which
files the compilation produces during the configuration step. If none
are set, defaults are applied.
For CMake builds, you can select through setting CMake variables between
building a shared or a static LAMMPS library and what kind of suffix is
added to them (in case you want to concurrently install multiple variants
of binaries with different settings). If none are set, defaults are applied.
.. code-block:: bash
-D BUILD_EXE=value # yes (default) or no
-D BUILD_LIB=value # yes or no (default)
-D BUILD_SHARED_LIBS=value # yes or no (default)
-D LAMMPS_LIB_SUFFIX=name # name = mpi, serial, mybox, titan, laptop, etc
-D LAMMPS_MACHINE=name # name = mpi, serial, mybox, titan, laptop, etc
# no default value
Setting ``BUILD_EXE=no`` will not produce an executable. Setting
``BUILD_LIB=yes`` will produce a static library named ``liblammps.a``\ .
Setting both ``BUILD_LIB=yes`` and ``BUILD_SHARED_LIBS=yes`` will produce a
shared library named ``liblammps.so`` instead. If ``LAMMPS_LIB_SUFFIX=name``
is set in addition, the name of the generated libraries will be changed to
either ``liblammps_name.a`` or ``liblammps_name.so``\ , respectively.
The compilation will always produce a LAMMPS library and an executable
linked to it. By default this will be a static library named
``liblammps.a`` and an executable named ``lmp`` Setting
``BUILD_SHARED_LIBS=yes`` will instead produce a shared library called
``liblammps.so`` (or ``liblammps.dylib`` or ``liblammps.dll`` depending
on the platform) If ``LAMMPS_MACHINE=name`` is set in addition, the name
of the generated libraries will be changed to either
``liblammps_name.a`` or ``liblammps_name.so``\ , respectively and the
executable will be called ``lmp_name``.
**Traditional make**\ :
With the traditional makefile based build process, the choice of
the generated executable or library depends on the "mode" setting.
Several options are available and ``mode=exe`` is the default.
Several options are available and ``mode=static`` is the default.
.. code-block:: bash
make machine # build LAMMPS executable lmp_machine
make mode=exe machine # same as "make machine"
make mode=lib machine # build LAMMPS static lib liblammps_machine.a
make mode=shlib machine # build LAMMPS shared lib liblammps_machine.so
make mode=shexe machine # same as "mode=exe" but uses objects from "mode=shlib"
make mode=static machine # same as "make machine"
make mode=shared machine # build LAMMPS shared lib liblammps_machine.so instead
The two "exe" builds will generate and executable ``lmp_machine``\ ,
while the two library builds will create a file ``liblammps_machine.a``
or ``liblammps_machine.so``\ . They will also create generic soft links,
named ``liblammps.a`` and ``liblammps.so``\ , which point to the specific
The "static" build will generate a static library called
``liblammps_machine.a`` and an executable named ``lmp_machine``\ , while
the "shared" build will generate a shared library
``liblammps_machine.so`` instead and ``lmp_machine`` will be linked to
it. The build step will also create generic soft links, named
``liblammps.a`` and ``liblammps.so``\ , which point to the specific
``liblammps_machine.a/so`` files.
**CMake and make info**\ :
Note that for a shared library to be usable by a calling program, all
the auxiliary libraries it depends on must also exist as shared
libraries. This will be the case for libraries included with LAMMPS,
such as the dummy MPI library in src/STUBS or any package libraries in
the lib/packages directory, since they are always built in a shared
library compatible way using the ``-fPIC`` switch. However, if a library
like MPI or FFTW does not exist as a shared library, the shared library
build may generate an error. This means you will need to install a
shared library version of the auxiliary library. The build instructions
for the library should tell you how to do this.
Note that for creating a shared library, all the libraries it depends on
must be compiled to be compatible with shared libraries. This should be
the case for libraries included with LAMMPS, such as the dummy MPI
library in ``src/STUBS`` or any package libraries in the ``lib``
directory, since they are always built in a shared library compatible
way using the ``-fPIC`` compiler switch. However, if an auxiliary
library (like MPI or FFTW) does not exist as a compatible format, the
shared library linking step may generate an error. This means you will
need to install a compatible version of the auxiliary library. The
build instructions for that library should tell you how to do this.
As an example, here is how to build and install the `MPICH library
<mpich_>`_, a popular open-source version of MPI, as a shared library
@ -361,10 +397,12 @@ in the default /usr/local/lib location:
make
make install
You may need to use ``sudo make install`` in place of the last line if you
do not have write privileges for ``/usr/local/lib``. The end result should
be the file ``/usr/local/lib/libmpich.so``. On many Linux installations the
folder ``${HOME}/.local`` is an alternative to using ``/usr/local`` and does
You may need to use ``sudo make install`` in place of the last line if
you do not have write privileges for ``/usr/local/lib`` or use the
``--prefix`` configuration option to select an installation folder,
where you do have write access. The end result should be the file
``/usr/local/lib/libmpich.so``. On many Linux installations the folder
``${HOME}/.local`` is an alternative to using ``/usr/local`` and does
not require superuser or sudo access. In that case the configuration
step becomes:
@ -372,12 +410,41 @@ step becomes:
./configure --enable-shared --prefix=${HOME}/.local
Avoiding using "sudo" for custom software installation (i.e. from source
Avoiding to use "sudo" for custom software installation (i.e. from source
and not through a package manager tool provided by the OS) is generally
recommended to ensure the integrity of the system software installation.
----------
.. _debug:
Excluding or removing debug support
-----------------------------------
By default the compilation settings will include the *-g* flag which
instructs the compiler to include debug information (e.g. which line of
source code a particular instruction correspond to). This can be
extremely useful in case LAMMPS crashes and can help to provide crucial
information in :doc:`tracking down the origin of a crash <Errors_debug>`
and help the LAMMPS developers fix bugs in the source code. However,
this increases the storage requirements for object files, libraries, and
the executable 3-5 fold.
If this is a concern, you can change the compilation settings or remove
the debug information from the LAMMPS executable:
- **Traditional make**: edit your ``Makefile.<machine>`` to remove the
*-g* flag from the ``CCFLAGS`` and ``LINKFLAGS`` definitions
- **CMake**: use ``-D CMAKE_BUILD_TYPE=Release`` or explicitly reset
the applicable compiler flags (best done using the text mode or
graphical user interface).
- **Remove debug info**: If you are only concerned about the executable
being too large, you can use the ``strip`` tool (e.g. ``strip
lmp_serial``) to remove the debug information from the executable file.
Do not strip libraries or object files, as that will render them unusable.
----------
.. _doc:
Build the LAMMPS documentation
@ -428,7 +495,8 @@ It is also possible to create the HTML version of the manual within
the :doc:`CMake build directory <Build_cmake>`. The reason for this
option is to include the installation of the HTML manual pages into
the "install" step when installing LAMMPS after the CMake build via
``make install``.
``make install``. The documentation build is included in the default
build target, but can also be requested independently with ``make doc``.
.. code-block:: bash

View File

@ -1,205 +1,158 @@
Build LAMMPS with CMake
=======================
This page is a short summary of how to use CMake to build LAMMPS.
Details on CMake variables that enable specific LAMMPS build options
are given on the pages linked to from the :doc:`Build <Build>` doc page.
This page describes how to use `CMake <https://cmake.org>`_ in general
to build LAMMPS. Details for specific compile time settings and options
to enable and configure add-on packages are discussed with those
packages. Links to those pages on the :doc:`Build overview <Build>`
page.
Richard Berger (Temple U) has also written a `more comprehensive guide <https://github.com/lammps/lammps/blob/master/cmake/README.md>`_
for how to use CMake to build LAMMPS. If you are new to CMake it is a
good place to start.
The following text assumes some familiarity with CMake and focuses on
using the command line tool ``cmake`` and what settings are supported
for building LAMMPS. A more detailed tutorial on how to use ``cmake``
itself, the text mode or graphical user interface, change the generated
output files for different build tools and development environments is
on a :doc:`separate page <Howto_cmake>`.
----------
.. note::
LAMMPS currently requires that CMake version 3.10 or later is available;
version 3.12 or later is preferred.
.. warning::
You must not mix the :doc:`traditional make based <Build_make>`
LAMMPS build procedure with using CMake. Thus no packages may be
installed or a build been previously attempted in the LAMMPS source
directory by using ``make <machine>``. CMake will detect if this is
the case and generate an error. To remove conflicting files from the
``src`` you can use the command ``make no-all purge`` which will
un-install all packages and delete all auto-generated files.
Advantages of using CMake
^^^^^^^^^^^^^^^^^^^^^^^^^
CMake is an alternative to compiling LAMMPS in the traditional way
through :doc:`(manually customized) makefiles <Build_make>` and a recent
addition to LAMMPS thanks to the efforts of Christoph Junghans (LANL)
and Richard Berger (Temple U). Using CMake has multiple advantages that
are specifically helpful for people with limited experience in compiling
software or for people that want to modify or extend LAMMPS.
- CMake can detect available hardware, tools, features, and libraries
and adapt the LAMMPS default build configuration accordingly.
- CMake can generate files for different build tools and integrated
development environments (IDE).
- CMake supports customization of settings with a text mode or graphical
user interface. No knowledge of file formats or and complex command
line syntax required.
- All enabled components are compiled in a single build operation.
- Automated dependency tracking for all files and configuration options.
- Support for true out-of-source compilation. Multiple configurations
and settings with different choices of LAMMPS packages, settings, or
compilers can be configured and built concurrently from the same
source tree.
- Simplified packaging of LAMMPS for Linux distributions, environment
modules, or automated build tools like `Homebrew <https://brew.sh/>`_.
- Integration of automated regression testing (the LAMMPS side for that
is still under development).
.. _cmake_build:
Getting started
^^^^^^^^^^^^^^^
Building LAMMPS with CMake is a two-step process. First you use CMake
to create a build environment in a new directory. On Linux systems,
this will be by default based on Unix-style makefiles for use with make.
Then you use the *make* command to build LAMMPS, which uses the created
Makefile(s). Example:
to generate a build environment in a new directory. For that purpose
you can use either the command-line utility ``cmake`` (or ``cmake3``),
the text-mode UI utility ``ccmake`` (or ``ccmake3``) or the graphical
utility ``cmake-gui``, or use them interchangeably. The second step is
then the compilation and linking of all objects, libraries, and
executables. Here is a minimal example using the command line version of
CMake to build LAMMPS with no add-on packages enabled and no
customization:
.. code-block:: bash
cd lammps # change to the LAMMPS distribution directory
mkdir build; cd build # create a new directory (folder) for build
cmake [options ...] ../cmake # configuration with (command-line) cmake
cmake --build . # compilation (or type "make")
cd lammps # change to the LAMMPS distribution directory
mkdir build; cd build # create and use a build directory
cmake ../cmake # configuration reading CMake scripts from ../cmake
cmake --build . # compilation (or type "make")
The ``cmake`` command will detect available features, enable selected
packages and options, and will generate the build environment. By default
this build environment will be created for "Unix Makefiles" on most
platforms and particularly on Linux. However, alternate build tools
(e.g. Ninja) and project files for Integrated Development Environments
(IDEs) like Eclipse, CodeBlocks, or Kate can be generated, too. This is
selected via the ``-G`` command line flag. Further details about features
and settings for CMake are in the `CMake online documentation <cmake_doc_>`_
This will create and change into a folder called ``build``, then run the
configuration step to generate build files for the default build command
and then launch that build command to compile LAMMPS. During the
configuration step CMake will try to detect whether support for MPI,
OpenMP, FFTW, gzip, JPEG, PNG, and ffmpeg are available and enable the
corresponding configuration settings. The progress of this
configuration can be followed on the screen and a summary of selected
options and settings will be printed at the end. The ``cmake --build
.`` command will launch the compilation, which, if successful, will
ultimately produce a library ``liblammps.a`` and the LAMMPS executable
``lmp`` inside the ``build`` folder.
.. _cmake_doc: https://cmake.org/documentation/
Compilation can take a long time, since LAMMPS is a large project with
many features. If your machine has multiple CPU cores (most do these
days), you can speed this up by compiling sources in parallel with
``make -j N`` (with N being the maximum number of concurrently executed
tasks). Also installation of the `ccache <https://ccache.dev/>`_ (=
Compiler Cache) software may speed up repeated compilation even more,
e.g. during code development.
For the rest of the documentation
we will assume that the build environment is generated for "Unix Makefiles"
and thus the ``make`` command will be used to compile and link LAMMPS as
indicated above, producing (by default) an executable called ``lmp`` and
a library called ``liblammps.a`` in the ``build`` folder.
If your machine has multiple CPU cores (most do these days), you can
compile sources in parallel with a command like ``make -j N`` (with N
being the maximum number of concurrently executed tasks). Also
installation of the ``ccache`` (= Compiler Cache) software may speed
up repeated compilation, e.g. during code development, significantly.
After the initial build, whenever you edit LAMMPS source files, enable
or disable packages, change compiler flags or build options, you must
re-compile and relink the LAMMPS executable with ``cmake --build .`` (or
``make``). If the compilation fails for some reason, try running
``cmake .`` and then compile again. The included dependency tracking
should make certain that only the necessary subset of files are
re-compiled. You can also delete compiled objects, libraries and
executables with ``cmake --build . clean`` (or ``make clean``).
After compilation, you may optionally install the LAMMPS executable into
your system with:
.. code-block:: bash
make install # optional, copy LAMMPS executable & library elsewhere
make install # optional, copy compiled files into installation location
This will install the lammps executable and library (if requested), some
tools (if configured) and additional files like library API headers,
manpages, potential and force field files. The location of the installation
tree is set by the CMake variable "CMAKE_INSTALL_PREFIX" which defaults
to ${HOME}/.local
This will install the LAMMPS executable and library, some tools (if
configured) and additional files like LAMMPS API headers, manpages,
potential and force field files. The location of the installation tree
defaults to ``${HOME}/.local``.
----------
.. _cmake_options:
.. _cmake_build:
Configuration and build options
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
There are 3 variants of the CMake command itself: a command-line version
(``cmake`` or ``cmake3``), a text mode UI version (``ccmake`` or ``ccmake3``),
and a graphical GUI version (``cmake-gui``). You can use any of them
interchangeably to configure and create the LAMMPS build environment.
On Linux all the versions produce a Makefile as their output by default.
See more details on each below.
The CMake commands have one mandatory argument: a folder containing a
file called ``CMakeLists.txt`` (for LAMMPS it is located in the
``cmake`` folder) or a build folder containing a file called
``CMakeCache.txt``, which is generated at the end of the CMake
configuration step. The cache file contains all current CMake settings.
You can specify a variety of options with any of the 3 versions, which
affect how the build is performed and what is included in the LAMMPS
executable. Links to pages explaining all the options are listed on
the :doc:`Build <Build>` doc page.
To modify settings, enable or disable features, you need to set *variables*
with either the *-D* command line flag (``-D VARIABLE1_NAME=value``) or
change them in the text mode of graphical user interface. The *-D* flag
can be used several times in one command.
You must perform the CMake build system generation and compilation in
a new directory you create. It can be anywhere on your local machine.
In these Build pages we assume that you are building in a directory
called ``lammps/build``. You can perform separate builds independently
with different options, so long as you perform each of them in a
separate directory you create. All the auxiliary files created by one
build process (executable, object files, log files, etc) are stored in
this directory or sub-directories within it that CMake creates.
For your convenience we provide :ref:`CMake presets <cmake_presets>`
that combine multiple settings to enable optional LAMMPS packages or use
a different compiler tool chain. Those are loaded with the *-C* flag
(``-C ../cmake/presets/minimal.cmake``). This step would only be needed
once, as the settings from the preset files are stored in the
``CMakeCache.txt`` file. It is also possible to customize the build
by adding one or more *-D* flags to the CMake command line.
.. note::
Generating files for alternate build tools (e.g. Ninja) and project files
for IDEs like Eclipse, CodeBlocks, or Kate can be selected using the *-G*
command line flag. A list of available generator settings for your
specific CMake version is given when running ``cmake --help``.
To perform a CMake build, no packages can be installed or a build
been previously attempted in the LAMMPS src directory by using ``make``
commands to :doc:`perform a conventional LAMMPS build <Build_make>`.
CMake detects if this is the case and generates an error, telling you
to type ``make no-all purge`` in the src directory to un-install all
packages. The purge removes all the \*.h files auto-generated by
make.
You must have CMake version 3.10 or later on your system to build
LAMMPS. Installation instructions for CMake are below.
After the initial build, if you edit LAMMPS source files, or add your
own new files to the source directory, you can just re-type make from
your build directory and it will re-compile only the files that have
changed. If you want to change CMake options you can run cmake (or
ccmake or cmake-gui) again from the same build directory and alter
various options; see details below. Or you can remove the entire build
folder, recreate the directory and start over.
----------
**Command-line version of CMake**\ :
.. code-block:: bash
cmake [options ...] /path/to/lammps/cmake # build from any dir
cmake [options ...] ../cmake # build from lammps/build
cmake3 [options ...] ../cmake # build from lammps/build
The cmake command takes one required argument, which is the LAMMPS
cmake directory which contains the CMakeLists.txt file.
The argument can be prefixed or followed by various CMake
command-line options. Several useful ones are:
.. code-block:: bash
-D CMAKE_INSTALL_PREFIX=path # where to install LAMMPS executable/lib if desired
-D CMAKE_BUILD_TYPE=type # type = RelWithDebInfo (default), Release, MinSizeRel, or Debug
-G output # style of output CMake generates (e.g. "Unix Makefiles" or "Ninja")
-D CMAKE_MAKE_PROGRAM=builder # name of the builder executable (e.g. when using "gmake" instead of "make")
-DVARIABLE=value # setting for a LAMMPS feature to enable
-D VARIABLE=value # ditto, but cannot come after CMakeLists.txt dir
-C path/to/preset/file # load some CMake settings before configuring
All the LAMMPS-specific -D variables that a LAMMPS build supports are
described on the pages linked to from the :doc:`Build <Build>` doc page.
All of these variable names are upper-case and their values are
lower-case, e.g. -D LAMMPS_SIZES=smallbig. For boolean values, any of
these forms can be used: yes/no, on/off, 1/0.
On Unix/Linux machines, CMake generates a Makefile by default to
perform the LAMMPS build. Alternate forms of build info can be
generated via the -G switch, e.g. Visual Studio on a Windows machine,
Xcode on MacOS, or KDevelop on Linux. Type ``cmake --help`` to see the
"Generator" styles of output your system supports.
.. note::
When CMake runs, it prints configuration info to the screen.
You should review this to verify all the features you requested were
enabled, including packages. You can also see what compilers and
compile options will be used for the build. Any errors in CMake
variable syntax will also be flagged, e.g. mis-typed variable names or
variable values.
CMake creates a CMakeCache.txt file when it runs. This stores all the
settings, so that when running CMake again you can use the current
folder '.' instead of the path to the LAMMPS cmake folder as the
required argument to the CMake command. Either way the existing
settings will be inherited unless the CMakeCache.txt file is removed.
If you later want to change a setting you can rerun cmake in the build
directory with different setting. Please note that some automatically
detected variables will not change their value when you rerun cmake.
In these cases it is usually better to first remove all the
files/directories in the build directory, or start with a fresh build
directory.
----------
**Curses version (terminal-style menu) of CMake**\ :
.. code-block:: bash
ccmake ../cmake
You initiate the configuration and build environment generation steps
separately. For the first you have to type **c**\ , for the second you
have to type **g**\ . You may need to type **c** multiple times, and may be
required to edit some of the entries of CMake configuration variables
in between. Please see the `ccmake manual <https://cmake.org/cmake/help/latest/manual/ccmake.1.html>`_ for
more information.
----------
**GUI version of CMake**\ :
.. code-block:: bash
cmake-gui ../cmake
You initiate the configuration and build environment generation steps
separately. For the first you have to click on the **Configure** button,
for the second you have to click on the **Generate** button. You may
need to click on **Configure** multiple times, and may be required to
edit some of the entries of CMake configuration variables in between.
Please see the `cmake-gui manual <https://cmake.org/cmake/help/latest/manual/cmake-gui.1.html>`_
for more information.
----------
**Installing CMake**
Installing CMake
^^^^^^^^^^^^^^^^
Check if your machine already has CMake installed:
@ -216,11 +169,12 @@ software packages, do this:
module list # is a module for cmake already loaded?
module avail # is a module for cmake available?
module load cmake3 # load cmake module with appropriate name
module load cmake # load cmake module with appropriate name
Most Linux distributions offer pre-compiled cmake packages through
their package management system. If you do not have CMake or a new
enough version, you can download the latest version at
`https://cmake.org/download/ <https://cmake.org/download/>`_.
Instructions on how to install it on various platforms can be found
`on this page <https://cmake.org/install/>`_.
Most Linux distributions offer pre-compiled cmake packages through their
package management system. If you do not have CMake or a recent enough
version (Note: for CentOS 7.x you need to enable the EPEL repository),
you can download the latest version from `https://cmake.org/download/
<https://cmake.org/download/>`_. Instructions on how to install it on
various platforms can be found `on this page
<https://cmake.org/install/>`_.

View File

@ -1,15 +1,15 @@
Development build options (CMake only)
======================================
The CMake build of LAMMPS has a few extra options which are useful during
development, testing or debugging.
The CMake build procedure of LAMMPS offers a few extra options which are
useful during development, testing or debugging.
----------
.. _compilation:
Verify compilation flags
------------------------------------------
------------------------
Sometimes it is necessary to verify the complete sequence of compilation flags
generated by the CMake build. To enable a more verbose output during
@ -30,7 +30,7 @@ Another way of doing this without reconfiguration is calling make with variable
.. _sanitizer:
Address, Undefined Behavior, and Thread Sanitizer Support
-------------------------------------------------------------------------
---------------------------------------------------------
Compilers such as GCC and Clang support generating instrumented binaries
which use different sanitizer libraries to detect problems in code
@ -41,10 +41,11 @@ during run-time. They can detect issues like:
- `data races <https://clang.llvm.org/docs/ThreadSanitizer.html>`_
Please note that this kind of instrumentation usually comes with a small
performance hit (much less than using tools like `Valgrind <valgrind_>`_).
The to enable these features additional compiler flags need to be added
to the compilation and linking stages. This is most easily done through
setting the ``CMAKE_TUNE_FLAGS`` variable during configuration. Examples:
performance hit (much less than using tools like `Valgrind
<https://valgrind.org>`_). The to enable these features additional
compiler flags need to be added to the compilation and linking stages.
This is most easily done through setting the ``CMAKE_TUNE_FLAGS``
variable during configuration. Examples:
.. code-block:: bash
@ -52,8 +53,6 @@ setting the ``CMAKE_TUNE_FLAGS`` variable during configuration. Examples:
-D CMAKE_TUNE_FLAGS=-fsanitize=undefined # enable undefined behavior sanitizer
-D CMAKE_TUNE_FLAGS=-fsanitize=thread # enable thread sanitizer
.. _valgrind: https://valgrind.org
----------
.. _testing:
@ -75,24 +74,26 @@ developers can run the tests directly on their workstation.
-D LAMMPS_TESTING_SOURCE_DIR=path # path to lammps-testing repository (option if in custom location)
-D LAMMPS_TESTING_GIT_TAG=value # version of lammps-testing repository that should be used, value = master (default) or custom git commit or tag
If you enable testing in the CMake build it will create an additional target called "test". You can run them with:
If you enable testing in the CMake build it will create an additional
target called "test". You can run them with:
.. code-block:: bash
make test
cmake --build . test
The test cases used come from the lammps-testing repository. They are
derivatives of the examples folder with some modifications to make the run
faster.
derivatives of the examples folder with some modifications to make the
run faster.
You can also collect code coverage metrics while running the tests by enabling
coverage support during building.
You can also collect code coverage metrics while running the tests by
enabling coverage support during building.
.. code-block:: bash
-D ENABLE_COVERAGE=value # enable coverage measurements, value = no (default) or yes
This will also add the following targets to generate coverage reports after running the LAMMPS executable:
This will also add the following targets to generate coverage reports
after running the LAMMPS executable:
.. code-block:: bash
@ -100,7 +101,8 @@ This will also add the following targets to generate coverage reports after runn
make gen_coverage_html # generate coverage report in HTML format
make gen_coverage_xml # generate coverage report in XML format
These reports require GCOVR to be installed. The easiest way to do this to install it via pip:
These reports require GCOVR to be installed. The easiest way to do this
to install it via pip:
.. code-block:: bash

View File

@ -1,33 +1,35 @@
Link LAMMPS as a library to another code
========================================
LAMMPS can be used as a library by another application, including
Python scripts. The files src/library.cpp and library.h define the
LAMMPS is designed as a library of C++ objects and can thus be
integrated into other applications including Python scripts.
The files ``src/library.cpp`` and ``src/library.h`` define a
C-style API for using LAMMPS as a library. See the :doc:`Howto
library <Howto_library>` doc page for a description of the interface
and how to extend it for your needs.
The :doc:`Build basics <Build_basics>` doc page explains how to build
LAMMPS as either a shared or static library. This results in one of
these 2 files:
.. code-block:: bash
liblammps.so # shared library
liblammps.a # static library
LAMMPS as either a shared or static library. This results in a file
in the compilation folder called ``liblammps.a`` or ``liblammps_<name>.a``
in case of building a static library. In case of a shared library
the name is the same only that the suffix is going to be either ``.so``
or ``.dylib`` or ``.dll`` instead of ``.a`` depending on the OS.
In some cases the ``.so`` file may be a symbolic link to a file with
the suffix ``.so.0`` (or some other number).
.. note::
Care should be taken to use the same MPI library for the calling
code and the LAMMPS library. The library.h file includes mpi.h and
uses definitions from it so those need to be available and
consistent. When LAMMPS is compiled with the MPI STUBS library,
then its mpi.h file needs to be included. While it is technically
possible to use a full MPI library in the calling code and link to
a serial LAMMPS library compiled with MPI STUBS, it is recommended
to use the *same* MPI library for both, and then use MPI_Comm_split()
in the calling code to pass a suitable communicator with a subset
of MPI ranks to the function creating the LAMMPS instance.
Care should be taken to use the same MPI library for the calling code
and the LAMMPS library. The ``library.h`` file includes ``mpi.h``
and uses definitions from it so those need to be available and
consistent. When LAMMPS is compiled with the included STUBS MPI
library, then its ``mpi.h`` file needs to be included. While it is
technically possible to use a full MPI library in the calling code
and link to a serial LAMMPS library compiled with MPI STUBS, it is
recommended to use the *same* MPI library for both, and then use
``MPI_Comm_split()`` in the calling code to pass a suitable
communicator with a subset of MPI ranks to the function creating the
LAMMPS instance.
----------
@ -42,11 +44,11 @@ executable code from the library is copied into the calling executable.
*CMake build*\ :
This assumes that LAMMPS has been configured with "-D BUILD_LIB=yes"
and installed with "make install" and the PKG_CONFIG_PATH environment
variable updated to include the *liblammps.pc* file installed into the
configured destination folder, if needed. The commands to compile and
link the coupled executable are then:
This assumes that LAMMPS has been configured without setting a
``LAMMPS_MACHINE`` name, installed with "make install", and the
``PKG_CONFIG_PATH`` environment variable has been updated to include the
``liblammps.pc`` file installed into the configured destination folder.
The commands to compile and link a coupled executable are then:
.. code-block:: bash
@ -56,30 +58,35 @@ link the coupled executable are then:
*Traditional make*\ :
This assumes that LAMMPS has been compiled in the folder
"${HOME}/lammps/src" with "make mode=lib mpi". The commands to compile
and link the coupled executable are then:
``${HOME}/lammps/src`` with "make mpi". The commands to compile and link
a coupled executable are then:
.. code-block:: bash
mpicc -c -O -I${HOME}/lammps/src caller.c
mpicxx -o caller caller.o -L${HOME}/lammps/src -llammps
mpicxx -o caller caller.o -L${HOME}/lammps/src -llammps_mpi
The *-I* argument is the path to the location of the *library.h*
The *-I* argument is the path to the location of the ``library.h``
header file containing the interface to the LAMMPS C-style library
interface. The *-L* argument is the path to where the *liblammps.a*
file is located. The *-llammps* argument is shorthand for telling the
compiler to link the file *liblammps.a*\ .
interface. The *-L* argument is the path to where the ``liblammps_mpi.a``
file is located. The *-llammps_mpi* argument is shorthand for telling the
compiler to link the file ``liblammps_mpi.a``. If LAMMPS has been
built as a shared library, then the linker will use ``liblammps_mpi.so``
instead. If both files are available, the linker will usually prefer
the shared library. In case of a shared library, you may need to update
the ``LD_LIBRARY_PATH`` environment variable or running the ``caller``
executable will fail since it cannot find the shared library at runtime.
However, it is only as simple as shown above for the case of a plain
LAMMPS library without any optional packages that depend on libraries
(bundled or external). Otherwise, you need to include all flags,
libraries, and paths for the coupled executable, that are also
required to link the LAMMPS executable.
(bundled or external) or when using a shared library. Otherwise, you
need to include all flags, libraries, and paths for the coupled
executable, that are also required to link the LAMMPS executable.
*CMake build*\ :
When using CMake, additional libraries with sources in the lib folder
are built, but not included in liblammps.a and (currently) not
are built, but not included in ``liblammps.a`` and (currently) not
installed with "make install" and not included in the *pkgconfig*
configuration file. They can be found in the top level build folder,
but you have to determine the necessary link flags manually. It is
@ -87,23 +94,26 @@ therefore recommended to either use the traditional make procedure to
build and link with a static library or build and link with a shared
library instead.
.. TODO: this needs to be updated to reflect that latest CMake changes after they are complete.
*Traditional make*\ :
After you have compiled a static LAMMPS library using the conventional
build system for example with "make mode=lib serial". And you also
have installed the POEMS package after building its bundled library in
lib/poems. Then the commands to build and link the coupled executable
build system for example with "make mode=static serial". And you also
have installed the ``POEMS`` package after building its bundled library
in ``lib/poems``. Then the commands to build and link the coupled executable
change to:
.. code-block:: bash
gcc -c -O -I${HOME}/lammps/src/STUBS -I${HOME}/lammps/src -caller.c
g++ -o caller caller.o -L${HOME}/lammps/lib/poems \
-L${HOME}/lammps/src/STUBS -L${HOME}/lammps/src -llammps -lpoems -lmpi_stubs
-L${HOME}/lammps/src/STUBS -L${HOME}/lammps/src -llammps_serial -lpoems -lmpi_stubs
Note, that you need to link with "g++" instead of "gcc", since LAMMPS
is C++ code. You can display the currently applied settings for building
LAMMPS for the "serial" machine target by using the command:
Note, that you need to link with "g++" instead of "gcc", since the
LAMMPS library is C++ code. You can display the currently applied
settings for building LAMMPS for the "serial" machine target by using
the command:
.. code-block:: bash
@ -113,16 +123,16 @@ Which should output something like:
.. code-block:: bash
# Compiler:
# Compiler:
CXX=g++
# Linker:
# Linker:
LD=g++
# Compilation:
CXXFLAGS=-g -O3 -DLAMMPS_GZIP -DLAMMPS_MEMALIGN=64 -I${HOME}/lammps/lib/poems -I${HOME}/lammps/src/STUBS
# Linking:
# Compilation:
CXXFLAGS=-g -O3 -DLAMMPS_GZIP -DLAMMPS_MEMALIGN=64 -I${HOME}/compile/lammps/lib/poems -I${HOME}/compile/lammps/src/STUBS
# Linking:
LDFLAGS=-g -O
# Libraries:
LDLIBS=-L${HOME}/lammps/lib/poems -L${HOME}/lammps/src/STUBS -lpoems -lmpi_stubs
# Libraries:
LDLIBS=-L${HOME}/compile/lammps/src -llammps_serial -L${HOME}/compile/lammps/lib/poems -L${HOME}/compile/lammps/src/STUBS -lpoems -lmpi_stubs
From this you can gather the necessary paths and flags. With
makefiles for other *machine* configurations you need to do the
@ -133,14 +143,13 @@ of the makefile.
**Link with LAMMPS as a shared library**\ :
When linking to LAMMPS built as a shared library, the situation
becomes much simpler, as all dependent libraries and objects are
included in the shared library, which is - technically speaking -
effectively a regular LAMMPS executable that is missing the `main()`
function. Thus those libraries need not to be specified when linking
the calling executable. Only the *-I* flags are needed. So the
example case from above of the serial version static LAMMPS library
with the POEMS package installed becomes:
When linking to LAMMPS built as a shared library, the situation becomes
much simpler, as all dependent libraries and objects are either included
in the shared library or registered as a dependent library in the shared
library file. Thus those libraries need not to be specified when
linking the calling executable. Only the *-I* flags are needed. So the
example case from above of the serial version static LAMMPS library with
the POEMS package installed becomes:
*CMake build*\ :
@ -155,19 +164,19 @@ build process are the same as for the static library.
*Traditional make*\ :
The commands with a shared LAMMPS library compiled with the
traditional make build using "make mode=shlib serial" becomes:
traditional make build using "make mode=shared serial" becomes:
.. code-block:: bash
gcc -c -O -I${HOME}/lammps/src/STUBS -I${HOME}/lammps/src -caller.c
g++ -o caller caller.o -L${HOME}/lammps/src -llammps
g++ -o caller caller.o -L${HOME}/lammps/src -llammps_serial
*Locating liblammps.so at runtime*\ :
However, now the `liblammps.so` file is required at runtime and needs
However, now the ``liblammps.so`` file is required at runtime and needs
to be in a folder, where the shared linker program of the operating
system can find it. This would be either a folder like "/usr/local/lib64"
or "${HOME}/.local/lib64" or a folder pointed to by the LD_LIBRARY_PATH
system can find it. This would be either a folder like ``/usr/local/lib64``
or ``${HOME}/.local/lib64`` or a folder pointed to by the ``LD_LIBRARY_PATH``
environment variable. You can type
.. code-block:: bash
@ -177,11 +186,11 @@ environment variable. You can type
to see what directories are in that list.
Or you can add the LAMMPS src directory (or the directory you performed
a CMake style build in) to your LD_LIBRARY_PATH, so that the current
a CMake style build in) to your ``LD_LIBRARY_PATH``, so that the current
version of the shared library is always available to programs that use it.
For the Bourne or Korn shells (/bin/sh, /bin/ksh, /bin/bash etc.), you
would add something like this to your ~/.profile file:
would add something like this to your ``${HOME}/.profile`` file:
.. code-block:: bash
@ -189,14 +198,14 @@ would add something like this to your ~/.profile file:
export LD_LIBRARY_PATH
For the csh or tcsh shells, you would equivalently add something like this
to your ~/.cshrc file:
to your ``${HOME}/.cshrc`` file:
.. code-block:: csh
setenv LD_LIBRARY_PATH ${LD_LIBRARY_PATH}:${HOME}/lammps/src
You can verify whether all required shared libraries are found with the
`ldd` tool. Example:
``ldd`` tool. Example:
.. code-block:: bash
@ -229,20 +238,20 @@ If a required library is missing, you would get a 'not found' entry:
Either flavor of library (static or shared) allows one or more LAMMPS
objects to be instantiated from the calling program. When used from a
C++ program, most of the symbols and functions in LAMMPS are wrapped
in a LAMMPS_NS namespace; you can safely use any of its classes and
in a ``LAMMPS_NS`` namespace; you can safely use any of its classes and
methods from within the calling code, as needed, and you will not incur
conflicts with functions and variables in your code that share the name.
This, however, does not extend to all additional libraries bundled with
LAMMPS in the lib folder and some of the low-level code of some packages.
To be compatible with C, Fortran, Python programs, the library has a simple
C-style interface, provided in src/library.cpp and src/library.h.
C-style interface, provided in ``src/library.cpp`` and ``src/library.h``.
See the :doc:`Python library <Python_library>` doc page for a
description of the Python interface to LAMMPS, which wraps the C-style
interface from a shared library through the `ctypes python module <ctypes_>`_.
See the sample codes in examples/COUPLE/simple for examples of C++ and
See the sample codes in ``examples/COUPLE/simple`` for examples of C++ and
C and Fortran codes that invoke LAMMPS through its library interface.
Other examples in the COUPLE directory use coupling ideas discussed on
the :doc:`Howto couple <Howto_couple>` doc page.

View File

@ -2,10 +2,14 @@ Build LAMMPS with make
======================
Building LAMMPS with traditional makefiles requires that you have a
Makefile."machine" file appropriate for your system in the src/MAKE,
src/MAKE/MACHINES, src/MAKE/OPTIONS, or src/MAKE/MINE directory (see
below). It can include various options for customizing your LAMMPS
build with a number of global compilation options and features.
``Makefile.<machine>`` file appropriate for your system in either the
``src/MAKE``, ``src/MAKE/MACHINES``, ``src/MAKE/OPTIONS``, or
``src/MAKE/MINE`` directory (see below). It can include various options
for customizing your LAMMPS build with a number of global compilation
options and features.
Requirements
^^^^^^^^^^^^
Those makefiles are written for and tested with GNU make and may not
be compatible with other make programs. In most cases, if the "make"
@ -16,44 +20,54 @@ with :doc:`CMake <Build_cmake>`. The makefiles of the traditional
make based build process and the scripts they are calling expect a few
additional tools to be available and functioning.
* a Bourne shell compatible "Unix" shell program (often this is bash)
* a few shell utilities: ls, mv, ln, rm, grep, sed, tr, cat, touch, diff, dirname
* python (optional, required for "make lib-XXX" in the src folder)
* a working C/C++ compiler toolchain supporting the C++11 standard; on
Linux these are often the GNU compilers. Some older compilers
require adding flags like ``-std=c++11`` to enable the C++11 mode.
* a Bourne shell compatible "Unix" shell program (often this is ``bash``)
* a few shell utilities: ``ls``, ``mv``, ``ln``, ``rm``, ``grep``, ``sed``, ``tr``, ``cat``, ``touch``, ``diff``, ``dirname``
* python (optional, required for ``make lib-<pkg>`` in the src folder).
python scripts are currently tested with python 2.7 and 3.6. The procedure
for :doc:`building the documentation <Manual_build>` requires python 3.
To include LAMMPS packages (i.e. optional commands and styles) you
must enable them first, as discussed on the :doc:`Build package
<Build_package>` doc page. If a packages requires (provided or
Getting started
^^^^^^^^^^^^^^^
To include LAMMPS packages (i.e. optional commands and styles) you must
enable (or "install") them first, as discussed on the :doc:`Build
package <Build_package>` doc page. If a packages requires (provided or
external) libraries, you must configure and build those libraries
**before** building LAMMPS itself and especially **before** enabling
such a package with "make yes-<package>". Building :doc:`LAMMPS
with CMake <Build_cmake>` can automate much of this for many types of
such a package with ``make yes-<package>``. Building :doc:`LAMMPS with
CMake <Build_cmake>` can automate much of this for many types of
machines, especially workstations, desktops, and laptops, so we suggest
you try it first when building LAMMPS in those cases.
The commands below perform a default LAMMPS build, producing the LAMMPS
executable lmp_serial and lmp_mpi in lammps/src:
executable ``lmp_serial`` and ``lmp_mpi`` in ``lammps/src``:
.. code-block:: bash
cd lammps/src
make serial # build a serial LAMMPS executable
cd lammps/src # change to main LAMMPS source folder
make serial # build a serial LAMMPS executable using GNU g++
make mpi # build a parallel LAMMPS executable with MPI
make # see a variety of make options
This initial compilation can take a long time, since LAMMPS is a large
project with many features. If your machine has multiple CPU cores
(most do these days), using a command like "make -jN mpi" (with N =
the number of available CPU cores) can be much faster. If you plan to
do development on LAMMPS or need to re-compile LAMMPS repeatedly, the
installation of the ccache (= Compiler Cache) software may speed up
compilation even more.
Compilation can take a long time, since LAMMPS is a large project with
many features. If your machine has multiple CPU cores (most do these
days), you can speed this up by compiling sources in parallel with
``make -j N`` (with N being the maximum number of concurrently executed
tasks). Also installation of the `ccache <https://ccache.dev/>`_ (=
Compiler Cache) software may speed up repeated compilation even more,
e.g. during code development.
After the initial build, whenever you edit LAMMPS source files, or add
or remove new files to the source directory (e.g. by installing or
uninstalling packages), you must re-compile and relink the LAMMPS
executable with the same "make" command. This makefiles dependencies
should insure that only the subset of files that need to be are
re-compiled.
executable with the same ``make <machine>`` command. The makefile's
dependency tracking should insure that only the necessary subset of
files are re-compiled. If you change settings in the makefile, you have
to recompile *everything*. To delete all objects you can use ``make
clean-<machine>``.
.. note::
@ -65,13 +79,15 @@ re-compiled.
correctly detect which parts need to be recompiled after changes
were made to the sources.
----------
Customized builds and alternate makefiles
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The lammps/src/MAKE tree contains the Makefile.machine files included
in the LAMMPS distribution. Typing "make machine" uses
*Makefile.machine*\ . Thus the "make serial" or "make mpi" lines above
use Makefile.serial and Makefile.mpi, respectively. Other makefiles
are in these directories:
The ``src/MAKE`` directory tree contains the ``Makefile.<machine>``
files included in the LAMMPS distribution. Typing ``make example`` uses
``Makefile.example`` from one of those folders, if available. Thus the
``make serial`` and ``make mpi`` lines above use
``src/MAKE/Makefile.serial`` and ``src/MAKE/Makefile.mpi``,
respectively. Other makefiles are in these directories:
.. code-block:: bash
@ -79,11 +95,16 @@ are in these directories:
MACHINES # Makefiles for specific machines
MINE # customized Makefiles you create (you may need to create this folder)
Typing "make" lists all the available Makefile.machine files. A file
with the same name can appear in multiple folders (not a good idea).
The order the directories are searched is as follows: src/MAKE/MINE,
src/MAKE, src/MAKE/OPTIONS, src/MAKE/MACHINES. This gives preference
to a customized file you put in src/MAKE/MINE.
Simply typing ``make`` lists all the available ``Makefile.<machine>``
files with a single line description toward the end of the output. A
file with the same name can appear in multiple folders (not a good
idea). The order the directories are searched is as follows:
``src/MAKE/MINE``, ``src/MAKE``, ``src/MAKE/OPTIONS``,
``src/MAKE/MACHINES``. This gives preference to a customized file you
put in ``src/MAKE/MINE``. If you create your own custom makefile under
a new name, please edit the first line with the description and machine
name, so you will not confuse yourself, when looking at the machine
summary.
Makefiles you may wish to try include these (some require a package
first be installed). Many of these include specific compiler flags

View File

@ -130,6 +130,8 @@ src directory.
----------
.. _cmake_presets:
**CMake shortcuts for installing many packages**\ :
Instead of specifying all the CMake options via the command-line,

View File

@ -12,5 +12,6 @@ additional details for many of them.
Errors_common
Errors_bugs
Errors_debug
Errors_messages
Errors_warnings

View File

@ -1,7 +1,8 @@
Reporting bugs
==============
If you are confident that you have found a bug in LAMMPS, please follow the steps outlined below:
If you are confident that you have found a bug in LAMMPS, please follow
the steps outlined below:
* Check the `New features and bug fixes
<https://lammps.sandia.gov/bug.html>`_ section of the `LAMMPS WWW site
@ -17,20 +18,22 @@ If you are confident that you have found a bug in LAMMPS, please follow the step
* Check the `mailing list archives <https://lammps.sandia.gov/mail.html>`_
to see if the issue has been discussed before.
If none of these steps yields any useful information, please file
a new bug report on the `GitHub Issue page <gip_>`_\ .
The website will offer you to select a suitable template with explanations
and then you should replace those explanations with the information
that you can provide to reproduce your issue.
If none of these steps yields any useful information, please file a new
bug report on the `GitHub Issue page <gip_>`_. The website will offer
you to select a suitable template with explanations and then you should
replace those explanations with the information that you can provide to
reproduce your issue.
The most useful thing you can do to help us verify and fix a bug is to
isolate the problem. Run it on the smallest number of atoms and fewest
number of processors with the simplest input script that reproduces the
bug. Try to identify what command or combination of commands is
causing the problem and upload the complete input deck as a tar or zip
archive. Please avoid using binary restart files unless the issue requires
it. In the latter case you should also include an input deck to quickly
bug. Try to identify what command or combination of commands is causing
the problem and upload the complete input deck as a tar or zip archive.
Please avoid using binary restart files unless the issue requires it.
In the latter case you should also include an input deck to quickly
generate this restart from a data file or a simple additional input.
This input deck can be used with tools like a debugger or `valgrind
<valgrind_>`_ to further :doc:`debug the crash <Errors_debug>`.
You may also send an email to the LAMMPS mailing list at
"lammps-users at lists.sourceforge.net" describing the problem with the
@ -43,3 +46,4 @@ have looked at it.
.. _lws: https://lammps.sandia.gov
.. _gip: https://github.com/lammps/issues
.. _valgrind: https://valgrind.org

237
doc/src/Errors_debug.rst Normal file
View File

@ -0,0 +1,237 @@
Debugging crashes
=================
If LAMMPS crashes with a "segmentation fault" or a "bus error" or
similar message, then you can use the following two methods to further
narrow down the origin of the issue. This will help the LAMMPS
developers (or yourself) to understand the reason for the crash and
apply a fix (either to the input script or the source code).
This requires that your LAMMPS executable includes the required
:ref:`debug information <debug>`. Otherwise it is not possible to
look up the names of functions or variables.
The following patch will introduce a bug into the code for pair style
:doc:`lj/cut <pair_lj>` when using the ``examples/melt/in.melt`` input.
We use it to show how to identify the origin of a segmentation fault.
.. code-block:: diff
--- a/src/pair_lj_cut.cpp
+++ b/src/pair_lj_cut.cpp
@@ -81,6 +81,7 @@ void PairLJCut::compute(int eflag, int vflag)
int nlocal = atom->nlocal;
double *special_lj = force->special_lj;
int newton_pair = force->newton_pair;
+ double comx = 0.0;
inum = list->inum;
ilist = list->ilist;
@@ -134,8 +135,10 @@ void PairLJCut::compute(int eflag, int vflag)
evdwl,0.0,fpair,delx,dely,delz);
}
}
- }
+ comx += atom->rmass[i]*x[i][0]; /* BUG */
+ }
+ printf("comx = %g\n",comx);
if (vflag_fdotr) virial_fdotr_compute();
}
After recompiling LAMMPS and running the input you should get something like this:
.. code-block:
$ ./lmp -in in.melt
LAMMPS (19 Mar 2020)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (src/comm.cpp:94)
using 1 OpenMP thread(s) per MPI task
Lattice spacing in x,y,z = 1.6796 1.6796 1.6796
Created orthogonal box = (0 0 0) to (16.796 16.796 16.796)
1 by 1 by 1 MPI processor grid
Created 4000 atoms
create_atoms CPU = 0.000432253 secs
Neighbor list info ...
update every 20 steps, delay 0 steps, check no
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 2.8
ghost atom cutoff = 2.8
binsize = 1.4, bins = 12 12 12
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair lj/cut, perpetual
attributes: half, newton on
pair build: half/bin/atomonly/newton
stencil: half/bin/3d/newton
bin: standard
Setting up Verlet run ...
Unit style : lj
Current step : 0
Time step : 0.005
Segmentation fault (core dumped)
Using the GDB debugger to get a stack trace
-------------------------------------------
There are two options to use the GDB debugger for identifying the origin
of the segmentation fault or similar crash. The GDB debugger has many
more features and options, as can be seen for example its `online
documentation <http://sourceware.org/gdb/current/onlinedocs/gdb/>`_.
Run LAMMPS from within the debugger
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Running LAMMPS under the control of the debugger as shown below only
works for a single MPI rank (for debugging a program running in parallel
you usually need a parallel debugger program). A simple way to launch
GDB is to prefix the LAMMPS command line with ``gdb --args`` and then
type the command "run" at the GDB prompt. This will launch the
debugger, load the LAMMPS executable and its debug info, and then run
it. When it reaches the code causing the segmentation fault, it will
stop with a message why it stopped, print the current line of code, and
drop back to the GDB prompt.
.. code-block::
[...]
Setting up Verlet run ...
Unit style : lj
Current step : 0
Time step : 0.005
Program received signal SIGSEGV, Segmentation fault.
0x00000000006653ab in LAMMPS_NS::PairLJCut::compute (this=0x829740, eflag=1, vflag=<optimized out>) at /home/akohlmey/compile/lammps/src/pair_lj_cut.cpp:139
139 comx += atom->rmass[i]*x[i][0]; /* BUG */
(gdb)
Now typing the command "where" will show the stack of functions starting from
the current function back to "main()".
.. code-block::
(gdb) where
#0 0x00000000006653ab in LAMMPS_NS::PairLJCut::compute (this=0x829740, eflag=1, vflag=<optimized out>) at /home/akohlmey/compile/lammps/src/pair_lj_cut.cpp:139
#1 0x00000000004cf0a2 in LAMMPS_NS::Verlet::setup (this=0x7e6c90, flag=1) at /home/akohlmey/compile/lammps/src/verlet.cpp:131
#2 0x000000000049db42 in LAMMPS_NS::Run::command (this=this@entry=0x7fffffffcca0, narg=narg@entry=1, arg=arg@entry=0x7e8750)
at /home/akohlmey/compile/lammps/src/run.cpp:177
#3 0x000000000041258a in LAMMPS_NS::Input::command_creator<LAMMPS_NS::Run> (lmp=<optimized out>, narg=1, arg=0x7e8750)
at /home/akohlmey/compile/lammps/src/input.cpp:878
#4 0x0000000000410ad3 in LAMMPS_NS::Input::execute_command (this=0x7d1410) at /home/akohlmey/compile/lammps/src/input.cpp:864
#5 0x00000000004111fb in LAMMPS_NS::Input::file (this=0x7d1410) at /home/akohlmey/compile/lammps/src/input.cpp:229
#6 0x000000000040933a in main (argc=<optimized out>, argv=<optimized out>) at /home/akohlmey/compile/lammps/src/main.cpp:65
(gdb)
You can also print the value of variables and see if there is anything
unexpected. Segmentation faults, for example, commonly happen when a
pointer variable is not assigned and still initialized to NULL.
.. code-block::
(gdb) print x
$1 = (double **) 0x7ffff7ca1010
(gdb) print i
$2 = 0
(gdb) print x[0]
$3 = (double *) 0x7ffff6d80010
(gdb) print x[0][0]
$4 = 0
(gdb) print x[1][0]
$5 = 0.83979809569125363
(gdb) print atom->rmass
$6 = (double *) 0x0
(gdb)
Inspect a core dump file with the debugger
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
When an executable crashes with a "core dumped" message, it creates a
file "core" or "core.<PID#>" which contains the information about the
current state. This file may be located in the folder where you ran
LAMMPS or in some hidden folder managed by the systemd daemon. In the
latter case, you need to "extract" the core file with the ``coredumpctl``
utility to the current folder. Example: ``coredumpctl -o core dump lmp``.
Now you can launch the debugger to load the executable, its debug info
and the core dump and drop you to a prompt like before.
.. code-block::
$ gdb lmp core
Reading symbols from lmp...
[New LWP 1928535]
[Thread debugging using libthread_db enabled]
Using host libthread_db library "/lib64/libthread_db.so.1".
Core was generated by `./lmp -in in.melt'.
Program terminated with signal SIGSEGV, Segmentation fault.
#0 0x00000000006653ab in LAMMPS_NS::PairLJCut::compute (this=0x1b10740, eflag=1, vflag=<optimized out>)
at /home/akohlmey/compile/lammps/src/pair_lj_cut.cpp:139
139 comx += atom->rmass[i]*x[i][0]; /* BUG */
(gdb)
From here on, you use the same commands as shown before to get a stack
trace and print current values of (pointer) variables.
Using valgrind to get a stack trace
-----------------------------------
The `valgrind <https://valgrind.org>`_ suite of tools allows to closely
inspect the behavior of a compiled program by essentially emulating a
CPU and instrumenting the program while running. This slows down
execution quite significantly, but can also report issues that are not
resulting in a crash. The default valgrind tool is a memory checker and
you can use it by prefixing the normal command line with ``valgrind``.
Unlike GDB, this will also work for parallel execution, but it is
recommended to redirect the valgrind output to a file (e.g. with
``--log-file=crash-%p.txt``, the %p will be substituted with the
process ID) so that the messages of the multiple valgrind instances to
the console are not mixed.
.. code-block::
$ valgrind ./lmp -in in.melt
==1933642== Memcheck, a memory error detector
==1933642== Copyright (C) 2002-2017, and GNU GPL'd, by Julian Seward et al.
==1933642== Using Valgrind-3.15.0 and LibVEX; rerun with -h for copyright info
==1933642== Command: ./lmp -in in.melt
==1933642==
LAMMPS (19 Mar 2020)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (src/comm.cpp:94)
using 1 OpenMP thread(s) per MPI task
Lattice spacing in x,y,z = 1.6796 1.6796 1.6796
Created orthogonal box = (0 0 0) to (16.796 16.796 16.796)
1 by 1 by 1 MPI processor grid
Created 4000 atoms
create_atoms CPU = 0.032964 secs
Neighbor list info ...
update every 20 steps, delay 0 steps, check no
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 2.8
ghost atom cutoff = 2.8
binsize = 1.4, bins = 12 12 12
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair lj/cut, perpetual
attributes: half, newton on
pair build: half/bin/atomonly/newton
stencil: half/bin/3d/newton
bin: standard
Setting up Verlet run ...
Unit style : lj
Current step : 0
Time step : 0.005
==1933642== Invalid read of size 8
==1933642== at 0x6653AB: LAMMPS_NS::PairLJCut::compute(int, int) (pair_lj_cut.cpp:139)
==1933642== by 0x4CF0A1: LAMMPS_NS::Verlet::setup(int) (verlet.cpp:131)
==1933642== by 0x49DB41: LAMMPS_NS::Run::command(int, char**) (run.cpp:177)
==1933642== by 0x412589: void LAMMPS_NS::Input::command_creator<LAMMPS_NS::Run>(LAMMPS_NS::LAMMPS*, int, char**) (input.cpp:881)
==1933642== by 0x410AD2: LAMMPS_NS::Input::execute_command() (input.cpp:864)
==1933642== by 0x4111FA: LAMMPS_NS::Input::file() (input.cpp:229)
==1933642== by 0x409339: main (main.cpp:65)
==1933642== Address 0x0 is not stack'd, malloc'd or (recently) free'd
==1933642==
As you can see, the stack trace information is similar to that obtained
from GDB. In addition you get a more specific hint about what cause the
segmentation fault, i.e. that it is a NULL pointer dereference. To find
out which pointer exactly was NULL, you need to use the debugger, though.

View File

@ -16,6 +16,7 @@ Tutorials howto
:name: tutorials
:maxdepth: 1
Howto_cmake
Howto_github
Howto_pylammps
Howto_bash

483
doc/src/Howto_cmake.rst Normal file
View File

@ -0,0 +1,483 @@
Using CMake with LAMMPS tutorial
================================
The support for building LAMMPS with CMake is a recent addition to
LAMMPS thanks to the efforts of Christoph Junghans (LANL) and Richard
Berger (Temple U). One of the key strengths of CMake is that it is not
tied to a specific platform or build system and thus generate the files
necessary to build and develop for different build systems and on
different platforms. Note, that this applies to the build system itself
not the LAMMPS code. In other words, without additional porting effort,
it is not possible - for example - to compile LAMMPS with Visual C++ on
Windows. The build system output can also include support files
necessary to program LAMMPS as a project in integrated development
environments (IDE) like Eclipse, Visual Studio, QtCreator, Xcode,
CodeBlocks, Kate and others.
A second important feature of CMake is, that it can detect and validate
available libraries, optimal settings, available support tools and so
on, so that by default LAMMPS will take advantage of available tools
without requiring to provide the details about how to enable/integrate
them.
The downside of this approach is, that there is some complexity
associated with running CMake itself and how to customize the building
of LAMMPS. This tutorial will show how to manage this through some
selected examples. Please see the chapter about :doc:`building LAMMPS
<Build>` for descriptions of specific flags and options for LAMMPS in
general and for specific packages.
CMake can be used through either the command-line interface (CLI)
program ``cmake`` (or ``cmake3``), a text mode interactive user
interface (TUI) program ``ccmake`` (or ``ccmake3``), or a graphical user
interface (GUI) program ``cmake-gui``. All of them are portable
software available on all supported platforms and can be used
interchangeably. The minimum supported CMake version is 3.10 (3.12 or
later is recommended).
All details about features and settings for CMake are in the `CMake
online documentation <https://cmake.org/documentation/>`_. We focus
below on the most important aspects with respect to compiling LAMMPS.
Prerequisites
-------------
This tutorial assumes that you are operating in a command-line environment
using a shell like Bash.
- Linux: any Terminal window will work
- MacOS X: launch the Terminal application.
- Windows 10: install and run the :doc:`Windows subsystem for Linux <Howto_bash>`
We also assume that you have downloaded and unpacked a recent LAMMPS source code package
or used Git to create a clone of the LAMMPS sources on your compilation machine.
You should change into the top level directory of the LAMMPS source tree all
paths mentioned in the tutorial are relative to that. Immediately after downloading
it should look like this:
.. code-block:: bash
$ ls
bench doc lib potentials README tools
cmake examples LICENSE python src
Build versus source directory
-----------------------------
When using CMake the build procedure is separated into multiple distinct phases:
#. **Configuration:** detect or define which features and settings
should be enable and used and how LAMMPS should be compiled
#. **Compilation:** generate and compile all necessary source files
and build libraries and executables.
#. **Installation:** copy selected files from the compilation into
your file system, so they can be used without having to keep the
source and build tree around.
The configuration and compilation of LAMMPS has to happen in a dedicated
*build directory* which must be different from the source directory.
Also the source directory (``src``) must remain pristine, so it is not
allowed to "install" packages using the traditional make process and
after an compilation attempt all created source files must be removed.
This can be achieved with ``make no-all purge``.
You can pick **any** folder outside the source tree. We recommend to
create a folder ``build`` in the top-level directory, or multiple
folders in case you want to have separate builds of LAMMPS with
different options (``build-parallel``, ``build-serial``) or with
different compilers (``build-gnu``, ``build-clang``, ``build-intel``)
and so on. All the auxiliary files created by one build process
(executable, object files, log files, etc) are stored in this directory
or sub-directories within it that CMake creates.
Running CMake
-------------
CLI version
^^^^^^^^^^^
In the (empty) ``build`` directory, we now run the command ``cmake
../cmake``, which will start the configuration phase and you will see
the progress of the configuration printed to the screen followed by a
summary of the enabled features, options and compiler settings. A typical
summary screen will look like this:
.. code-block::
$ cmake ../cmake/
-- The CXX compiler identification is GNU 8.2.0
-- Check for working CXX compiler: /opt/tools/gcc-8.2.0/bin/c++
-- Check for working CXX compiler: /opt/tools/gcc-8.2.0/bin/c++ - works
-- Detecting CXX compiler ABI info
-- Detecting CXX compiler ABI info - done
-- Detecting CXX compile features
-- Detecting CXX compile features - done
-- Found Git: /usr/bin/git (found version "2.25.2")
-- Running check for auto-generated files from make-based build system
-- Found MPI_CXX: /usr/lib64/mpich/lib/libmpicxx.so (found version "3.1")
-- Found MPI: TRUE (found version "3.1")
-- Looking for C++ include omp.h
-- Looking for C++ include omp.h - found
-- Found OpenMP_CXX: -fopenmp (found version "4.5")
-- Found OpenMP: TRUE (found version "4.5")
-- Found JPEG: /usr/lib64/libjpeg.so (found version "62")
-- Found PNG: /usr/lib64/libpng.so (found version "1.6.37")
-- Found ZLIB: /usr/lib64/libz.so (found version "1.2.11")
-- Found GZIP: /usr/bin/gzip
-- Found FFMPEG: /usr/bin/ffmpeg
-- Performing Test COMPILER_SUPPORTS-ffast-math
-- Performing Test COMPILER_SUPPORTS-ffast-math - Success
-- Performing Test COMPILER_SUPPORTS-march=native
-- Performing Test COMPILER_SUPPORTS-march=native - Success
-- Looking for C++ include cmath
-- Looking for C++ include cmath - found
-- Generating style_angle.h...
[...]
-- Generating lmpinstalledpkgs.h...
-- The following tools and libraries have been found and configured:
* Git
* MPI
* OpenMP
* JPEG
* PNG
* ZLIB
-- <<< Build configuration >>>
Build type: RelWithDebInfo
Install path: /home/akohlmey/.local
Generator: Unix Makefiles using /usr/bin/gmake
-- <<< Compilers and Flags: >>>
-- C++ Compiler: /opt/tools/gcc-8.2.0/bin/c++
Type: GNU
Version: 8.2.0
C++ Flags: -O2 -g -DNDEBUG
Defines: LAMMPS_SMALLBIG;LAMMPS_MEMALIGN=64;LAMMPS_JPEG;LAMMPS_PNG;LAMMPS_GZIP;LAMMPS_FFMPEG
Options: -ffast-math;-march=native
-- <<< Linker flags: >>>
-- Executable name: lmp
-- Static library flags:
-- <<< MPI flags >>>
-- MPI includes: /usr/include/mpich-x86_64
-- MPI libraries: /usr/lib64/mpich/lib/libmpicxx.so;/usr/lib64/mpich/lib/libmpi.so;
-- Configuring done
-- Generating done
-- Build files have been written to: /home/akohlmey/compile/lammps/build
The ``cmake`` command has one mandatory argument, and that is a folder
with either the file ``CMakeLists.txt`` or ``CMakeCache.txt``. The
``CMakeCache.txt`` file is created during the CMake configuration run
and contains all active settings, thus after a first run of CMake
all future runs in the build folder can use the folder ``.`` and CMake
will know where to find the CMake scripts and reload the settings
from the previous step. This means, that one can modify an existing
configuration by re-running CMake, but only needs to provide flags
indicating the desired change, everything else will be retained. One
can also mix compilation and configuration, i.e. start with a minimal
configuration and then, if needed, enable additional features and
recompile.
The steps above **will NOT compile the code**\ . The compilation can be
started in a portable fashion with ``cmake --build .``, or you use the
selected built tool, e.g. ``make``.
TUI version
^^^^^^^^^^^
For the text mode UI CMake program the basic principle is the same.
You start the command ``ccmake ../cmake`` in the ``build`` folder.
.. list-table::
* - .. figure:: JPG/ccmake-initial.png
:target: JPG/ccmake-initial.png
:align: center
Initial ``ccmake`` screen
- .. figure:: JPG/ccmake-config.png
:target: JPG/ccmake-config.png
:align: center
Configure output of ``ccmake``
- .. figure:: JPG/ccmake-options.png
:target: JPG/ccmake-options.png
:align: center
Options screen of ``ccmake``
This will show you the initial screen (left image) with the empty
configuration cache. Now you type the 'c' key to run the configuration
step. That will do a first configuration run and show the summary
(center image). You exit the summary screen with 'e' and see now the
main screen with detected options and settings. You can now make changes
by moving and down with the arrow keys of the keyboard and modify
entries. For on/off settings, the enter key will toggle the state.
For others, hitting enter will allow you to modify the value and
you commit the change by hitting the enter key again or cancel using
the escape key. All "new" settings will be marked with a star '\*'
and for as long as one setting is marked like this, you have to
re-run the configuration by hitting the 'c' key again, sometimes
multiple times unless the TUI shows the word "generate" next to the
letter 'g' and by hitting the 'g' key the build files will be written
to the folder and the TUI exits. You can quit without generating
build files by hitting 'q'.
GUI version
^^^^^^^^^^^
For the graphical CMake program the steps are similar to the TUI
version. You can type the command ``cmake-gui ../cmake`` in the
``build`` folder. In this case the path to the CMake script folder is
not required, it can also be entered from the GUI.
.. list-table::
* - .. figure:: JPG/cmake-gui-initial.png
:target: JPG/cmake-gui-initial.png
:align: center
Initial ``cmake-gui`` screen
- .. figure:: JPG/cmake-gui-popup.png
:target: JPG/cmake-gui-popup.png
:align: center
Generator selection in ``cmake-gui``
- .. figure:: JPG/cmake-gui-options.png
:target: JPG/cmake-gui-options.png
:align: center
Options screen of ``cmake-gui``
Again, you start with an empty configuration cache (left image) and need
to start the configuration step. For the very first configuration in a
folder, you will have a pop-up dialog (center image) asking to select
the desired build tool and some configuration settings (stick with the
default) and then you get the option screen with all new settings
highlighted in red. You can modify them (or not) and click on the
"configure" button again until satisfied and click on the "generate"
button to write out the build files. You can exit the GUI from the
"File" menu or hit "ctrl-q".
Setting options
---------------
Options that enable, disable or modify settings are modified by setting
the value of CMake variables. This is done on the command line with the
*-D* flag in the format ``-D VARIABLE=value``, e.g. ``-D
CMAKE_BUILD_TYPE=Release`` or ``-D BUILD_MPI=on``. There is one quirk:
when used before the CMake directory, there may be a space between the
*-D* flag and the variable, after it must not be. Such CMake variables
can have boolean values (on/off, yes/no, or 1/0 are all valid) or are
strings representing a choice, or a path, or are free format. If the
string would contain whitespace, it must be put in quotes, for example
``-D CMAKE_TUNE_FLAGS="-ftree-vectorize -ffast-math"``.
CMake variables fall into two categories: 1) common CMake variables that
are used by default for any CMake configuration setup and 2) project
specific variables, i.e. settings that are specific for LAMMPS.
Also CMake variables can be flagged as *advanced*, which means they are
not shown in the text mode or graphical CMake program in the overview
of all settings by default, but only when explicitly requested (by hitting
the 't' key or clicking on the 'Advanced' check-box).
Some common CMake variables
^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. list-table::
:header-rows: 1
* - Variable
- Description
* - ``CMAKE_INSTALL_PREFIX``
- root directory of install location for ``make install`` (default: ``$HOME/.local``)
* - ``CMAKE_BUILD_TYPE``
- controls compilation options:
one of ``RelWithDebInfo`` (default), ``Release``, ``Debug``, ``MinSizeRel``
* - ``BUILD_SHARED_LIBS``
- if set to ``on`` build the LAMMPS library as shared library (default: ``off``)
* - ``CMAKE_MAKE_PROGRAM``
- name/path of the compilation command (default depends on *-G* option, usually ``make``)
* - ``CMAKE_VERBOSE_MAKEFILE``
- if set to ``on`` echo commands while executing during build (default: ``off``)
* - ``CMAKE_C_COMPILER``
- C compiler to be used for compilation (default: system specific, ``gcc`` on Linux)
* - ``CMAKE_CXX_COMPILER``
- C++ compiler to be used for compilation (default: system specific, ``g++`` on Linux)
* - ``CMAKE_Fortran_COMPILER``
- Fortran compiler to be used for compilation (default: system specific, ``gfortran`` on Linux)
* - ``CXX_COMPILER_LAUNCHER``
- tool to launch the C++ compiler, e.g. ``ccache`` or ``distcc`` for faster compilation (default: empty)
Some common LAMMPS specific variables
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. list-table::
:header-rows: 1
* - Variable
- Description
* - ``BUILD_MPI``
- build LAMMPS with MPI support (default: ``on`` if a working MPI available, else ``off``)
* - ``BUILD_OMP``
- build LAMMPS with OpenMP support (default: ``on`` if compiler supports OpenMP fully, else ``off``)
* - ``BUILD_TOOLS``
- compile some additional executables from the ``tools`` folder (default: ``off``)
* - ``BUILD_DOC``
- include building the HTML format documentation for packaging/installing (default: ``off``)
* - ``CMAKE_TUNE_FLAGS``
- common compiler flags, for optimization or instrumentation (default: compiler specific)
* - ``LAMMPS_MACHINE``
- when set to ``name`` the LAMMPS executable and library will be called ``lmp_name`` and ``liblammps_name.a``
* - ``LAMMPS_EXCEPTIONS``
- when set to ``on`` errors will throw a C++ exception instead of aborting (default: ``off``)
* - ``FFT``
- select which FFT library to use: ``FFTW3``, ``MKL``, ``KISS`` (default, unless FFTW3 is found)
* - ``FFT_SINGLE``
- select whether to use single precision FFTs (default: ``off``)
* - ``WITH_JPEG``
- whether to support JPEG format in :doc:`dump image <dump_image>` (default: ``on`` if found)
* - ``WITH_PNG``
- whether to support PNG format in :doc:`dump image <dump_image>` (default: ``on`` if found)
* - ``WITH_GZIP``
- whether to support reading and writing compressed files (default: ``on`` if found)
* - ``WITH_FFMPEG``
- whether to support generating movies with :doc:`dump movie <dump_image>` (default: ``on`` if found)
Enabling or disabling LAMMPS packages
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The LAMMPS software is organized into a common core that is always
included and a large number of :doc:`add-on packages <Packages>` that
have to be enabled to be included into a LAMMPS executable. Packages
are enabled through setting variables of the kind ``PKG_<NAME>`` to
``on`` and disabled by setting them to ``off`` (or using ``yes``,
``no``, ``1``, ``0`` correspondingly). ``<NAME>`` has to be replaced by
the name of the package, e.g. ``MOLECULE`` or ``USER-MISC``.
Using presets
-------------
Since LAMMPS has a lot of optional features and packages, specifying
them all on the command line can be tedious. Or when selecting a
different compiler toolchain, multiple options have to be changed
consistently and that is rather error prone. Or when enabling certain
packages, they require consistent settings to be operated in a
particular mode. For this purpose, we are providing a selection of
"preset files" for CMake in the folder ``cmake/presets``. They
represent a way to pre-load or override the CMake configuration cache by
setting or changing CMake variables. Preset files are loaded using the
*-C* command line flag. You can combine loading multiple preset files or
change some variables later with additional *-D* flags. A few examples:
.. code-block:: bash
cmake -C ../cmake/preset/minimal.cmake -D PKG_MISC=on ../cmake
cmake -C ../cmake/preset/clang.cmake -C ../cmake/preset/most.cmake ../cmake
cmake -C ../cmake/preset/minimal.cmake -D BUILD_MPI=off ../cmake
The first command will install the packages ``KSPACE``, ``MANYBODY``,
``MOLECULE``, ``RIGID`` and ``MISC``; the first four from the preset
file and the fifth from the explicit variable definition. The second
command will first switch the compiler toolchain to use the Clang
compilers and install a large number of packages that are not depending
on any special external libraries or tools and are not very unusual.
The third command will enable the first four packages like above and
then enforce compiling LAMMPS as a serial program (using the MPI STUBS
library).
It is also possible to do this incrementally.
.. code-block:: bash
cmake -C ../cmake/preset/minimal.cmake ../cmake
cmake -D PKG_MISC=on .
will achieve the same configuration like in the first example above. In
this scenario it is particularly convenient to do the second
configuration step using either the text mode or graphical user
interface (``ccmake`` or ``cmake-gui``).
Compilation and build targets
-----------------------------
The actual compilation will be started by running the selected build
command (on Linux this is by default ``make``, see below how to select
alternatives). You can also use the portable command ``cmake --build .``
which will adapt to whatever the selected build command is.
This is particularly convenient, if you have set a custom build command
via the ``CMAKE_MAKE_PROGRAM`` variable.
When calling the build program, you can also select which "target" is to
be build through appending the name of the target to the build command.
Example: ``cmake --build . all``. The following abstract targets are available:
.. list-table::
:header-rows: 1
* - Target
- Description
* - ``all``
- build "everything" (default)
* - ``lammps``
- build the LAMMPS library and executable
* - ``doc``
- build the HTML documentation (if configured)
* - ``install``
- install all target files into folders in ``CMAKE_INSTALL_PREFIX``
* - ``test``
- run some simple tests (if configured with ``-D ENABLE_TESTING=on``)
* - ``clean``
- remove all generated files
Choosing generators
-------------------
While CMake usually defaults to creating makefiles to compile software
with the ``make`` program, it supports multiple alternate build tools
(e.g. ``ninja-build`` which tends to be faster and more efficient in
parallelizing builds than ``make``) and can generate project files for
integrated development environments (IDEs) like VisualStudio, Eclipse or
CodeBlocks. This is specific to how the local CMake version was
configured and compiled. The list of available options can be seen at
the end of the output of ``cmake --help``. Example on Fedora 31 this is:
.. code-block::
Generators
The following generators are available on this platform (* marks default):
* Unix Makefiles = Generates standard UNIX makefiles.
Green Hills MULTI = Generates Green Hills MULTI files
(experimental, work-in-progress).
Ninja = Generates build.ninja files.
Ninja Multi-Config = Generates build-<Config>.ninja files.
Watcom WMake = Generates Watcom WMake makefiles.
CodeBlocks - Ninja = Generates CodeBlocks project files.
CodeBlocks - Unix Makefiles = Generates CodeBlocks project files.
CodeLite - Ninja = Generates CodeLite project files.
CodeLite - Unix Makefiles = Generates CodeLite project files.
Sublime Text 2 - Ninja = Generates Sublime Text 2 project files.
Sublime Text 2 - Unix Makefiles
= Generates Sublime Text 2 project files.
Kate - Ninja = Generates Kate project files.
Kate - Unix Makefiles = Generates Kate project files.
Eclipse CDT4 - Ninja = Generates Eclipse CDT 4.0 project files.
Eclipse CDT4 - Unix Makefiles= Generates Eclipse CDT 4.0 project files.
Below is a screenshot of using the CodeBlocks IDE with the ninja build tool
after running CMake as follows:
.. code-block:: bash
cmake -G 'CodeBlocks - Ninja' ../cmake/presets/most.cmake ../cmake/
.. image:: JPG/cmake-codeblocks.png
:align: center

View File

@ -34,9 +34,9 @@ install the `openkim-models` package
% brew install openkim-models
If you have problems with the installation you can post issues to
`this link <homebrew_>`_.
`this link <https://github.com/Homebrew/homebrew-core/issues>`_.
.. _homebrew: https://github.com/Homebrew/homebrew-core/issues
.. _homebrew: https://brew.sh
Thanks to Derek Thomas (derekt at cello.t.u-tokyo.ac.jp) for setting
up the Homebrew capability.

0
doc/src/JPG/bow_tutorial_01.png Executable file → Normal file
View File

Before

Width:  |  Height:  |  Size: 32 KiB

After

Width:  |  Height:  |  Size: 32 KiB

0
doc/src/JPG/bow_tutorial_01_small.png Executable file → Normal file
View File

Before

Width:  |  Height:  |  Size: 15 KiB

After

Width:  |  Height:  |  Size: 15 KiB

0
doc/src/JPG/bow_tutorial_02.png Executable file → Normal file
View File

Before

Width:  |  Height:  |  Size: 41 KiB

After

Width:  |  Height:  |  Size: 41 KiB

0
doc/src/JPG/bow_tutorial_02_small.png Executable file → Normal file
View File

Before

Width:  |  Height:  |  Size: 16 KiB

After

Width:  |  Height:  |  Size: 16 KiB

0
doc/src/JPG/bow_tutorial_03.png Executable file → Normal file
View File

Before

Width:  |  Height:  |  Size: 42 KiB

After

Width:  |  Height:  |  Size: 42 KiB

0
doc/src/JPG/bow_tutorial_03_small.png Executable file → Normal file
View File

Before

Width:  |  Height:  |  Size: 20 KiB

After

Width:  |  Height:  |  Size: 20 KiB

0
doc/src/JPG/bow_tutorial_04.png Executable file → Normal file
View File

Before

Width:  |  Height:  |  Size: 54 KiB

After

Width:  |  Height:  |  Size: 54 KiB

0
doc/src/JPG/bow_tutorial_04_small.png Executable file → Normal file
View File

Before

Width:  |  Height:  |  Size: 21 KiB

After

Width:  |  Height:  |  Size: 21 KiB

0
doc/src/JPG/bow_tutorial_05.png Executable file → Normal file
View File

Before

Width:  |  Height:  |  Size: 16 KiB

After

Width:  |  Height:  |  Size: 16 KiB

0
doc/src/JPG/bow_tutorial_06.png Executable file → Normal file
View File

Before

Width:  |  Height:  |  Size: 13 KiB

After

Width:  |  Height:  |  Size: 13 KiB

0
doc/src/JPG/bow_tutorial_07.png Executable file → Normal file
View File

Before

Width:  |  Height:  |  Size: 9.7 KiB

After

Width:  |  Height:  |  Size: 9.7 KiB

0
doc/src/JPG/bow_tutorial_08.png Executable file → Normal file
View File

Before

Width:  |  Height:  |  Size: 24 KiB

After

Width:  |  Height:  |  Size: 24 KiB

0
doc/src/JPG/bow_tutorial_09.png Executable file → Normal file
View File

Before

Width:  |  Height:  |  Size: 18 KiB

After

Width:  |  Height:  |  Size: 18 KiB

0
doc/src/JPG/bow_tutorial_10.png Executable file → Normal file
View File

Before

Width:  |  Height:  |  Size: 7.6 KiB

After

Width:  |  Height:  |  Size: 7.6 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 24 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 16 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 26 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 129 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 64 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 136 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 34 KiB

0
doc/src/JPG/offload_knc.png Executable file → Normal file
View File

Before

Width:  |  Height:  |  Size: 12 KiB

After

Width:  |  Height:  |  Size: 12 KiB

0
doc/src/JPG/rhodo_staggered.jpg Executable file → Normal file
View File

Before

Width:  |  Height:  |  Size: 41 KiB

After

Width:  |  Height:  |  Size: 41 KiB

0
doc/src/JPG/saed_ewald_intersect.jpg Executable file → Normal file
View File

Before

Width:  |  Height:  |  Size: 110 KiB

After

Width:  |  Height:  |  Size: 110 KiB

0
doc/src/JPG/saed_ewald_intersect_small.jpg Executable file → Normal file
View File

Before

Width:  |  Height:  |  Size: 40 KiB

After

Width:  |  Height:  |  Size: 40 KiB

0
doc/src/JPG/saed_mesh.jpg Executable file → Normal file
View File

Before

Width:  |  Height:  |  Size: 108 KiB

After

Width:  |  Height:  |  Size: 108 KiB

0
doc/src/JPG/saed_mesh_small.jpg Executable file → Normal file
View File

Before

Width:  |  Height:  |  Size: 42 KiB

After

Width:  |  Height:  |  Size: 42 KiB

0
doc/src/JPG/user_intel.png Executable file → Normal file
View File

Before

Width:  |  Height:  |  Size: 19 KiB

After

Width:  |  Height:  |  Size: 19 KiB

0
doc/src/JPG/xrd_mesh.jpg Executable file → Normal file
View File

Before

Width:  |  Height:  |  Size: 118 KiB

After

Width:  |  Height:  |  Size: 118 KiB

0
doc/src/JPG/xrd_mesh_small.jpg Executable file → Normal file
View File

Before

Width:  |  Height:  |  Size: 45 KiB

After

Width:  |  Height:  |  Size: 45 KiB

View File

@ -1,6 +1,8 @@
Build LAMMPS as a shared library
================================
.. TODO this is mostly redundant and should be addressed in the 'progguide' branch if it has not already
Build LAMMPS as a shared library using make
-------------------------------------------
@ -14,7 +16,7 @@ From the src directory, type
.. code-block:: bash
make foo mode=shlib
make foo mode=shared
where foo is the machine target name, such as mpi or serial.
This should create the file liblammps_foo.so in the src directory, as
@ -39,7 +41,6 @@ shared library:
.. code-block:: bash
-D BUILD_LIB=on # enable building LAMMPS as a library
-D BUILD_SHARED_LIBS=on # enable building of LAMMPS shared library (both options are needed!)
What this does is create a liblammps.so which contains the majority of LAMMPS
@ -59,7 +60,7 @@ CMAKE_INSTALL_PREFIX.
# build library
mkdir build
cd build
cmake -D PKG_PYTHON=on -D BUILD_LIB=on -D BUILD_SHARED_LIBS=on -D CMAKE_INSTALL_PREFIX=$VIRTUAL_ENV ../cmake
cmake -D PKG_PYTHON=on -D BUILD_SHARED_LIBS=on -D CMAKE_INSTALL_PREFIX=$VIRTUAL_ENV ../cmake
make -j 4
# install into prefix

View File

@ -175,23 +175,27 @@ a logical 3d grid of processors. They operate by changing the cutting
planes (or lines) between processors in 3d (or 2d), to adjust the
volume (area in 2d) assigned to each processor, as in the following 2d
diagram where processor sub-domains are shown and particles are
colored by the processor that owns them. The leftmost diagram is the
default partitioning of the simulation box across processors (one
sub-box for each of 16 processors); the middle diagram is after a
"grid" method has been applied.
colored by the processor that owns them.
.. image:: JPG/balance_uniform_small.jpg
:target: JPG/balance_uniform.jpg
.. image:: JPG/balance_nonuniform_small.jpg
:target: JPG/balance_nonuniform.jpg
.. image:: JPG/balance_rcb_small.jpg
:target: JPG/balance_rcb.jpg
.. list-table::
The *rcb* style is a "tiling" method which does not produce a logical
3d grid of processors. Rather it tiles the simulation domain with
rectangular sub-boxes of varying size and shape in an irregular
fashion so as to have equal numbers of particles (or weight) in each
sub-box, as in the rightmost diagram above.
* - .. figure:: JPG/balance_uniform_small.jpg
:target: JPG/balance_uniform.jpg
- .. figure:: JPG/balance_nonuniform_small.jpg
:target: JPG/balance_nonuniform.jpg
- .. figure:: JPG/balance_rcb_small.jpg
:target: JPG/balance_rcb.jpg
The leftmost diagram is the default partitioning of the simulation box
across processors (one sub-box for each of 16 processors); the middle
diagram is after a "grid" method has been applied. The *rcb* style is a
"tiling" method which does not produce a logical 3d grid of processors.
Rather it tiles the simulation domain with rectangular sub-boxes of
varying size and shape in an irregular fashion so as to have equal
numbers of particles (or weight) in each sub-box, as in the rightmost
diagram above.
The "grid" methods can be used with either of the
:doc:`comm_style <comm_style>` command options, *brick* or *tiled*\ . The

View File

@ -154,28 +154,27 @@ of processors. It operates by changing the cutting planes (or lines)
between processors in 3d (or 2d), to adjust the volume (area in 2d)
assigned to each processor, as in the following 2d diagram where
processor sub-domains are shown and atoms are colored by the processor
that owns them. The leftmost diagram is the default partitioning of
the simulation box across processors (one sub-box for each of 16
processors); the middle diagram is after a "grid" method has been
applied.
that owns them.
.. |bal_uni| image:: JPG/balance_uniform_small.jpg
:target: JPG/balance_uniform.jpg
:width: 31%
.. |bal_non| image:: JPG/balance_nonuniform_small.jpg
:target: JPG/balance_nonuniform.jpg
:width: 31%
.. |bal_rcb| image:: JPG/balance_rcb_small.jpg
:target: JPG/balance_rcb.jpg
:width: 31%
.. list-table::
|bal_uni| |bal_non| |bal_rcb|
* - .. figure:: JPG/balance_uniform_small.jpg
:target: JPG/balance_uniform.jpg
The *rcb* style is a "tiling" method which does not produce a logical
3d grid of processors. Rather it tiles the simulation domain with
rectangular sub-boxes of varying size and shape in an irregular
fashion so as to have equal numbers of particles (or weight) in each
sub-box, as in the rightmost diagram above.
- .. figure:: JPG/balance_nonuniform_small.jpg
:target: JPG/balance_nonuniform.jpg
- .. figure:: JPG/balance_rcb_small.jpg
:target: JPG/balance_rcb.jpg
The leftmost diagram is the default partitioning of the simulation box
across processors (one sub-box for each of 16 processors); the middle
diagram is after a "grid" method has been applied. The *rcb* style is a
"tiling" method which does not produce a logical 3d grid of processors.
Rather it tiles the simulation domain with rectangular sub-boxes of
varying size and shape in an irregular fashion so as to have equal
numbers of particles (or weight) in each sub-box, as in the rightmost
diagram above.
The "grid" methods can be used with either of the
:doc:`comm_style <comm_style>` command options, *brick* or *tiled*\ . The

View File

@ -76,6 +76,7 @@ function for the same parameters.
.. image:: JPG/zeeman_langevin.jpg
:align: center
:width: 600
The temperature effects are accounted for by connecting the spin
:math:`i` to a thermal bath using a Langevin thermostat (see
@ -154,6 +155,11 @@ The *precession/spin* style is part of the SPIN package. This style
is only enabled if LAMMPS was built with this package, and if the
atom_style "spin" was declared. See the :doc:`Build package <Build_package>` doc page for more info.
The *precession/spin* style can only be declared once. If more
than one precession type (for example combining an anisotropy and a Zeeman interactions)
has to be declared, they have to be chained in the same command
line (as shown in the examples above).
Related commands
""""""""""""""""

View File

@ -87,8 +87,7 @@ The choice of a norm can be modified for the min styles *cg*\ , *sd*\
the 2-norm (Euclidean length) of the global force vector:
.. math::
|| \vec{F} ||_{2} = \sqrt{\vec{F}_1+ \cdots + \vec{F}_N}
|| \vec{F} ||_{2} = \sqrt{\vec{F}_1^2+ \cdots + \vec{F}_N^2}
The *max* norm computes the length of the 3-vector force
for each atom (2-norm), and takes the maximum value of those across

View File

@ -492,6 +492,7 @@ cstdlib
cstring
cstyle
csvr
ctrl
Ctypes
ctypes
cuda
@ -584,6 +585,7 @@ dephasing
dequidt
Dequidt
der
dereference
derekt
Derjagin
Derjaguin
@ -2839,6 +2841,7 @@ Synechococcus
sys
sysdim
Syst
systemd
Sz
Tabbernor
tabinner

View File

@ -27,7 +27,8 @@ pair_coeff * * spin/dmi dmi 4.5 0.00005 1.0 1.0 1.0
neighbor 0.1 bin
neigh_modify every 10 check yes delay 20
fix 1 all precession/spin anisotropy 0.0000033 0.0 0.0 1.0
fix 1 all precession/spin zeeman 0.0 0.0 0.0 1.0 anisotropy 0.00033 0.0 0.0 1.0
fix_modify 1 energy yes
fix 2 all langevin/spin 0.0 0.1 21
fix 3 all nve/spin lattice frozen
@ -43,9 +44,8 @@ variable magnorm equal c_out_mag[4]
variable emag equal c_out_mag[5]
variable tmag equal c_out_mag[6]
#thermo_style custom step time v_magnorm v_emag temp etotal
thermo_style custom step time v_magnorm pe ke v_emag temp etotal
thermo 10
thermo_style custom step time v_magnorm pe v_emag temp etotal
thermo 50
compute outsp all property/atom spx spy spz sp fmx fmy fmz
dump 1 all custom 100 dump_bfo.lammpstrj type x y z c_outsp[1] c_outsp[2] c_outsp[3]

View File

@ -0,0 +1,129 @@
LAMMPS (19 Mar 2020)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (../comm.cpp:94)
using 1 OpenMP thread(s) per MPI task
# layer sc iron atoms (in the [001] plane) in bismuth oxide
units metal
atom_style spin
dimension 3
boundary p p f
# necessary for the serial algorithm (sametag)
atom_modify map array
lattice sc 3.96
Lattice spacing in x,y,z = 3.96 3.96 3.96
region box block 0.0 34.0 0.0 34.0 0.0 5.0
create_box 1 box
Created orthogonal box = (0 0 0) to (134.64 134.64 19.8)
1 by 1 by 1 MPI processor grid
create_atoms 1 box
Created 5780 atoms
create_atoms CPU = 0.00263691 secs
# setting mass, mag. moments, and interactions for bfo
mass 1 1.0
set group all spin/random 11 2.50
5780 settings made for spin/random
#pair_style hybrid/overlay spin/exchange 6.0 spin/magelec 4.5
pair_style hybrid/overlay spin/exchange 6.0 spin/magelec 4.5 spin/dmi 4.5
pair_coeff * * spin/exchange exchange 6.0 -0.01575 0.0 1.965
pair_coeff * * spin/magelec magelec 4.5 0.000109 1.0 1.0 1.0
pair_coeff * * spin/dmi dmi 4.5 0.00005 1.0 1.0 1.0
neighbor 0.1 bin
neigh_modify every 10 check yes delay 20
fix 1 all precession/spin zeeman 0.0 0.0 0.0 1.0 anisotropy 0.00033 0.0 0.0 1.0
fix_modify 1 energy yes
fix 2 all langevin/spin 0.0 0.1 21
fix 3 all nve/spin lattice frozen
timestep 0.0002
compute out_mag all spin
compute out_pe all pe
compute out_ke all ke
compute out_temp all temp
variable magz equal c_out_mag[3]
variable magnorm equal c_out_mag[4]
variable emag equal c_out_mag[5]
variable tmag equal c_out_mag[6]
thermo_style custom step time v_magnorm pe v_emag temp etotal
thermo 50
compute outsp all property/atom spx spy spz sp fmx fmy fmz
dump 1 all custom 100 dump_bfo.lammpstrj type x y z c_outsp[1] c_outsp[2] c_outsp[3]
run 500
Neighbor list info ...
update every 10 steps, delay 20 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 6.1
ghost atom cutoff = 6.1
binsize = 3.05, bins = 45 45 7
3 neighbor lists, perpetual/occasional/extra = 3 0 0
(1) pair spin/exchange, perpetual
attributes: full, newton on
pair build: full/bin/atomonly
stencil: full/bin/3d
bin: standard
(2) pair spin/magelec, perpetual, copy from (1)
attributes: full, newton on
pair build: copy
stencil: none
bin: none
(3) pair spin/dmi, perpetual, copy from (1)
attributes: full, newton on
pair build: copy
stencil: none
bin: none
Per MPI rank memory allocation (min/avg/max) = 8.154 | 8.154 | 8.154 Mbytes
Step Time v_magnorm PotEng v_emag Temp TotEng
0 0 0.010071723 -0.69133656 -0.69133656 0 -0.69133656
50 0.01 0.0098309931 -1.3200811 -1.3200811 0 -1.3200811
100 0.02 0.0095903925 -1.9446727 -1.9446727 0 -1.9446727
150 0.03 0.0093489139 -2.5653446 -2.5653446 0 -2.5653446
200 0.04 0.0091051407 -3.1824298 -3.1824298 0 -3.1824298
250 0.05 0.0088575394 -3.7962506 -3.7962506 0 -3.7962506
300 0.06 0.0086053184 -4.4070501 -4.4070501 0 -4.4070501
350 0.07 0.0083490959 -5.0149813 -5.0149813 0 -5.0149813
400 0.08 0.0080907742 -5.6201417 -5.6201417 0 -5.6201417
450 0.09 0.0078327124 -6.222622 -6.222622 0 -6.222622
500 0.1 0.0075768488 -6.8225497 -6.8225497 0 -6.8225497
Loop time of 12.9141 on 1 procs for 500 steps with 5780 atoms
Performance: 0.669 ns/day, 35.873 hours/ns, 38.717 timesteps/s
100.0% CPU use with 1 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 3.1844 | 3.1844 | 3.1844 | 0.0 | 24.66
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0.012542 | 0.012542 | 0.012542 | 0.0 | 0.10
Output | 0.048014 | 0.048014 | 0.048014 | 0.0 | 0.37
Modify | 9.6569 | 9.6569 | 9.6569 | 0.0 | 74.78
Other | | 0.01233 | | | 0.10
Nlocal: 5780 ave 5780 max 5780 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 1065 ave 1065 max 1065 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 0 ave 0 max 0 min
Histogram: 1 0 0 0 0 0 0 0 0 0
FullNghs: 92480 ave 92480 max 92480 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 92480
Ave neighs/atom = 16
Neighbor list builds = 0
Dangerous builds = 0
Please see the log.cite file for references relevant to this simulation
Total wall time: 0:00:12

View File

@ -0,0 +1,129 @@
LAMMPS (19 Mar 2020)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (../comm.cpp:94)
using 1 OpenMP thread(s) per MPI task
# layer sc iron atoms (in the [001] plane) in bismuth oxide
units metal
atom_style spin
dimension 3
boundary p p f
# necessary for the serial algorithm (sametag)
atom_modify map array
lattice sc 3.96
Lattice spacing in x,y,z = 3.96 3.96 3.96
region box block 0.0 34.0 0.0 34.0 0.0 5.0
create_box 1 box
Created orthogonal box = (0 0 0) to (134.64 134.64 19.8)
2 by 2 by 1 MPI processor grid
create_atoms 1 box
Created 5780 atoms
create_atoms CPU = 0.000847816 secs
# setting mass, mag. moments, and interactions for bfo
mass 1 1.0
set group all spin/random 11 2.50
5780 settings made for spin/random
#pair_style hybrid/overlay spin/exchange 6.0 spin/magelec 4.5
pair_style hybrid/overlay spin/exchange 6.0 spin/magelec 4.5 spin/dmi 4.5
pair_coeff * * spin/exchange exchange 6.0 -0.01575 0.0 1.965
pair_coeff * * spin/magelec magelec 4.5 0.000109 1.0 1.0 1.0
pair_coeff * * spin/dmi dmi 4.5 0.00005 1.0 1.0 1.0
neighbor 0.1 bin
neigh_modify every 10 check yes delay 20
fix 1 all precession/spin zeeman 0.0 0.0 0.0 1.0 anisotropy 0.00033 0.0 0.0 1.0
fix_modify 1 energy yes
fix 2 all langevin/spin 0.0 0.1 21
fix 3 all nve/spin lattice frozen
timestep 0.0002
compute out_mag all spin
compute out_pe all pe
compute out_ke all ke
compute out_temp all temp
variable magz equal c_out_mag[3]
variable magnorm equal c_out_mag[4]
variable emag equal c_out_mag[5]
variable tmag equal c_out_mag[6]
thermo_style custom step time v_magnorm pe v_emag temp etotal
thermo 50
compute outsp all property/atom spx spy spz sp fmx fmy fmz
dump 1 all custom 100 dump_bfo.lammpstrj type x y z c_outsp[1] c_outsp[2] c_outsp[3]
run 500
Neighbor list info ...
update every 10 steps, delay 20 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 6.1
ghost atom cutoff = 6.1
binsize = 3.05, bins = 45 45 7
3 neighbor lists, perpetual/occasional/extra = 3 0 0
(1) pair spin/exchange, perpetual
attributes: full, newton on
pair build: full/bin/atomonly
stencil: full/bin/3d
bin: standard
(2) pair spin/magelec, perpetual, copy from (1)
attributes: full, newton on
pair build: copy
stencil: none
bin: none
(3) pair spin/dmi, perpetual, copy from (1)
attributes: full, newton on
pair build: copy
stencil: none
bin: none
Per MPI rank memory allocation (min/avg/max) = 7.744 | 7.744 | 7.744 Mbytes
Step Time v_magnorm PotEng v_emag Temp TotEng
0 0 0.010071723 -0.69133656 -0.69133656 0 -0.69133656
50 0.01 0.009830993 -1.3200811 -1.3200811 0 -1.3200811
100 0.02 0.0095903924 -1.9446727 -1.9446727 0 -1.9446727
150 0.03 0.0093489138 -2.5653446 -2.5653446 0 -2.5653446
200 0.04 0.0091051405 -3.1824298 -3.1824298 0 -3.1824298
250 0.05 0.0088575392 -3.7962506 -3.7962506 0 -3.7962506
300 0.06 0.0086053183 -4.4070501 -4.4070501 0 -4.4070501
350 0.07 0.0083490958 -5.0149813 -5.0149813 0 -5.0149813
400 0.08 0.008090774 -5.6201417 -5.6201417 0 -5.6201417
450 0.09 0.0078327123 -6.222622 -6.222622 0 -6.222622
500 0.1 0.0075768487 -6.8225497 -6.8225497 0 -6.8225497
Loop time of 3.92182 on 4 procs for 500 steps with 5780 atoms
Performance: 2.203 ns/day, 10.894 hours/ns, 127.492 timesteps/s
99.9% CPU use with 4 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.80837 | 0.82745 | 0.8485 | 1.6 | 21.10
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0.045699 | 0.067487 | 0.090503 | 6.1 | 1.72
Output | 0.01387 | 0.0139 | 0.01394 | 0.0 | 0.35
Modify | 3.0065 | 3.0105 | 3.0138 | 0.2 | 76.76
Other | | 0.002516 | | | 0.06
Nlocal: 1445 ave 1445 max 1445 min
Histogram: 4 0 0 0 0 0 0 0 0 0
Nghost: 555 ave 555 max 555 min
Histogram: 4 0 0 0 0 0 0 0 0 0
Neighs: 0 ave 0 max 0 min
Histogram: 4 0 0 0 0 0 0 0 0 0
FullNghs: 23120 ave 23120 max 23120 min
Histogram: 4 0 0 0 0 0 0 0 0 0
Total # of neighbors = 92480
Ave neighs/atom = 16
Neighbor list builds = 0
Dangerous builds = 0
Please see the log.cite file for references relevant to this simulation
Total wall time: 0:00:03

View File

@ -1,167 +0,0 @@
LAMMPS (30 Oct 2019)
# layer sc iron atoms (in the [001] plane) in bismuth oxide
units metal
atom_style spin
dimension 3
boundary p p f
# necessary for the serial algorithm (sametag)
atom_modify map array
lattice sc 3.96
Lattice spacing in x,y,z = 3.96 3.96 3.96
region box block 0.0 34.0 0.0 34.0 0.0 5.0
create_box 1 box
Created orthogonal box = (0 0 0) to (134.64 134.64 19.8)
1 by 1 by 1 MPI processor grid
create_atoms 1 box
Created 5780 atoms
create_atoms CPU = 0.00226784 secs
# setting mass, mag. moments, and interactions for bfo
mass 1 1.0
set group all spin/random 11 2.50
5780 settings made for spin/random
#pair_style hybrid/overlay spin/exchange 6.0 spin/magelec 4.5
pair_style hybrid/overlay spin/exchange 6.0 spin/magelec 4.5 spin/dmi 4.5
pair_coeff * * spin/exchange exchange 6.0 -0.01575 0.0 1.965
pair_coeff * * spin/magelec magelec 4.5 0.000109 1.0 1.0 1.0
pair_coeff * * spin/dmi dmi 4.5 0.00005 1.0 1.0 1.0
neighbor 0.1 bin
neigh_modify every 10 check yes delay 20
fix 1 all precession/spin anisotropy 0.0000033 0.0 0.0 1.0
fix 2 all langevin/spin 0.0 0.1 21
fix 3 all nve/spin lattice frozen
timestep 0.0002
compute out_mag all spin
compute out_pe all pe
compute out_ke all ke
compute out_temp all temp
variable magz equal c_out_mag[3]
variable magnorm equal c_out_mag[4]
variable emag equal c_out_mag[5]
variable tmag equal c_out_mag[6]
#thermo_style custom step time v_magnorm v_emag temp etotal
thermo_style custom step time v_magnorm pe ke v_emag temp etotal
thermo 10
compute outsp all property/atom spx spy spz sp fmx fmy fmz
dump 1 all custom 100 dump_bfo.lammpstrj type x y z c_outsp[1] c_outsp[2] c_outsp[3]
run 500
Neighbor list info ...
update every 10 steps, delay 20 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 6.1
ghost atom cutoff = 6.1
binsize = 3.05, bins = 45 45 7
3 neighbor lists, perpetual/occasional/extra = 3 0 0
(1) pair spin/exchange, perpetual
attributes: full, newton on
pair build: full/bin/atomonly
stencil: full/bin/3d
bin: standard
(2) pair spin/magelec, perpetual, copy from (1)
attributes: full, newton on
pair build: copy
stencil: none
bin: none
(3) pair spin/dmi, perpetual, copy from (1)
attributes: full, newton on
pair build: copy
stencil: none
bin: none
Per MPI rank memory allocation (min/avg/max) = 8.154 | 8.154 | 8.154 Mbytes
Step Time v_magnorm PotEng KinEng v_emag Temp TotEng
0 0 0.010071723 -0.059343109 0 -0.13132609 0 -0.059343109
10 0.002 0.01003044 -0.18537022 0 -0.38338861 0 -0.18537022
20 0.004 0.0099890716 -0.31121926 0 -0.63509581 0 -0.31121926
30 0.006 0.0099475919 -0.43689013 0 -0.88644739 0 -0.43689013
40 0.008 0.0099059782 -0.5623833 0 -1.1374442 0 -0.5623833
50 0.01 0.0098642085 -0.68769978 0 -1.388088 0 -0.68769978
60 0.012 0.0098222618 -0.81284106 0 -1.6383818 0 -0.81284106
70 0.014 0.0097801186 -0.93780907 0 -1.8883294 0 -0.93780907
80 0.016 0.0097377603 -1.0626062 0 -2.1379352 0 -1.0626062
90 0.018 0.0096951693 -1.187235 0 -2.3872045 0 -1.187235
100 0.02 0.0096523288 -1.3116986 0 -2.6361432 0 -1.3116986
110 0.022 0.0096092227 -1.4360002 0 -2.8847577 0 -1.4360002
120 0.024 0.009565836 -1.5601431 0 -3.1330547 0 -1.5601431
130 0.026 0.0095221542 -1.6841309 0 -3.3810411 0 -1.6841309
140 0.028 0.0094781635 -1.8079673 0 -3.6287241 0 -1.8079673
150 0.03 0.0094338509 -1.9316557 0 -3.8761109 0 -1.9316557
160 0.032 0.0093892044 -2.0551997 0 -4.1232085 0 -2.0551997
170 0.034 0.0093442126 -2.178603 0 -4.370024 0 -2.178603
180 0.036 0.0092988654 -2.3018687 0 -4.6165639 0 -2.3018687
190 0.038 0.0092531537 -2.4250002 0 -4.8628348 0 -2.4250002
200 0.04 0.0092070698 -2.5480003 0 -5.1088426 0 -2.5480003
210 0.042 0.0091606073 -2.670872 0 -5.3545929 0 -2.670872
220 0.044 0.0091137617 -2.7936178 0 -5.6000909 0 -2.7936178
230 0.046 0.0090665298 -2.9162399 0 -5.8453412 0 -2.9162399
240 0.048 0.0090189108 -3.0387405 0 -6.0903478 0 -3.0387405
250 0.05 0.0089709056 -3.1611214 0 -6.3351146 0 -3.1611214
260 0.052 0.0089225173 -3.2833841 0 -6.5796445 0 -3.2833841
270 0.054 0.0088737511 -3.4055299 0 -6.8239403 0 -3.4055299
280 0.056 0.0088246147 -3.52756 0 -7.0680043 0 -3.52756
290 0.058 0.0087751176 -3.6494754 0 -7.3118383 0 -3.6494754
300 0.06 0.008725272 -3.7712768 0 -7.5554438 0 -3.7712768
310 0.062 0.0086750916 -3.8929648 0 -7.7988222 0 -3.8929648
320 0.064 0.0086245927 -4.0145399 0 -8.0419744 0 -4.0145399
330 0.066 0.0085737928 -4.1360026 0 -8.2849013 0 -4.1360026
340 0.068 0.0085227116 -4.2573532 0 -8.5276035 0 -4.2573532
350 0.07 0.0084713698 -4.378592 0 -8.7700818 0 -4.378592
360 0.072 0.0084197895 -4.4997194 0 -9.0123367 0 -4.4997194
370 0.074 0.0083679936 -4.6207358 0 -9.2543688 0 -4.6207358
380 0.076 0.0083160058 -4.7416414 0 -9.496179 0 -4.7416414
390 0.078 0.0082638503 -4.8624367 0 -9.7377681 0 -4.8624367
400 0.08 0.0082115512 -4.9831222 0 -9.9791371 0 -4.9831222
410 0.082 0.0081591329 -5.1036986 0 -10.220287 0 -5.1036986
420 0.084 0.0081066195 -5.2241665 0 -10.46122 0 -5.2241665
430 0.086 0.0080540347 -5.3445267 0 -10.701936 0 -5.3445267
440 0.088 0.008001402 -5.4647802 0 -10.942439 0 -5.4647802
450 0.09 0.0079487439 -5.5849281 0 -11.18273 0 -5.5849281
460 0.092 0.0078960829 -5.7049716 0 -11.422811 0 -5.7049716
470 0.094 0.0078434404 -5.824912 0 -11.662686 0 -5.824912
480 0.096 0.0077908378 -5.9447508 0 -11.902357 0 -5.9447508
490 0.098 0.0077382955 -6.0644896 0 -12.141828 0 -6.0644896
500 0.1 0.0076858338 -6.1841301 0 -12.381101 0 -6.1841301
Loop time of 13.543 on 1 procs for 500 steps with 5780 atoms
Performance: 0.638 ns/day, 37.619 hours/ns, 36.919 timesteps/s
100.0% CPU use with 1 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 3.8138 | 3.8138 | 3.8138 | 0.0 | 28.16
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0.011875 | 0.011875 | 0.011875 | 0.0 | 0.09
Output | 0.049726 | 0.049726 | 0.049726 | 0.0 | 0.37
Modify | 9.655 | 9.655 | 9.655 | 0.0 | 71.29
Other | | 0.01262 | | | 0.09
Nlocal: 5780 ave 5780 max 5780 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 1065 ave 1065 max 1065 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 0 ave 0 max 0 min
Histogram: 1 0 0 0 0 0 0 0 0 0
FullNghs: 92480 ave 92480 max 92480 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 92480
Ave neighs/atom = 16
Neighbor list builds = 0
Dangerous builds = 0
Please see the log.cite file for references relevant to this simulation
Total wall time: 0:00:13

View File

@ -1,167 +0,0 @@
LAMMPS (30 Oct 2019)
# layer sc iron atoms (in the [001] plane) in bismuth oxide
units metal
atom_style spin
dimension 3
boundary p p f
# necessary for the serial algorithm (sametag)
atom_modify map array
lattice sc 3.96
Lattice spacing in x,y,z = 3.96 3.96 3.96
region box block 0.0 34.0 0.0 34.0 0.0 5.0
create_box 1 box
Created orthogonal box = (0 0 0) to (134.64 134.64 19.8)
2 by 2 by 1 MPI processor grid
create_atoms 1 box
Created 5780 atoms
create_atoms CPU = 0.00149798 secs
# setting mass, mag. moments, and interactions for bfo
mass 1 1.0
set group all spin/random 11 2.50
5780 settings made for spin/random
#pair_style hybrid/overlay spin/exchange 6.0 spin/magelec 4.5
pair_style hybrid/overlay spin/exchange 6.0 spin/magelec 4.5 spin/dmi 4.5
pair_coeff * * spin/exchange exchange 6.0 -0.01575 0.0 1.965
pair_coeff * * spin/magelec magelec 4.5 0.000109 1.0 1.0 1.0
pair_coeff * * spin/dmi dmi 4.5 0.00005 1.0 1.0 1.0
neighbor 0.1 bin
neigh_modify every 10 check yes delay 20
fix 1 all precession/spin anisotropy 0.0000033 0.0 0.0 1.0
fix 2 all langevin/spin 0.0 0.1 21
fix 3 all nve/spin lattice frozen
timestep 0.0002
compute out_mag all spin
compute out_pe all pe
compute out_ke all ke
compute out_temp all temp
variable magz equal c_out_mag[3]
variable magnorm equal c_out_mag[4]
variable emag equal c_out_mag[5]
variable tmag equal c_out_mag[6]
#thermo_style custom step time v_magnorm v_emag temp etotal
thermo_style custom step time v_magnorm pe ke v_emag temp etotal
thermo 10
compute outsp all property/atom spx spy spz sp fmx fmy fmz
dump 1 all custom 100 dump_bfo.lammpstrj type x y z c_outsp[1] c_outsp[2] c_outsp[3]
run 500
Neighbor list info ...
update every 10 steps, delay 20 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 6.1
ghost atom cutoff = 6.1
binsize = 3.05, bins = 45 45 7
3 neighbor lists, perpetual/occasional/extra = 3 0 0
(1) pair spin/exchange, perpetual
attributes: full, newton on
pair build: full/bin/atomonly
stencil: full/bin/3d
bin: standard
(2) pair spin/magelec, perpetual, copy from (1)
attributes: full, newton on
pair build: copy
stencil: none
bin: none
(3) pair spin/dmi, perpetual, copy from (1)
attributes: full, newton on
pair build: copy
stencil: none
bin: none
Per MPI rank memory allocation (min/avg/max) = 7.744 | 7.744 | 7.744 Mbytes
Step Time v_magnorm PotEng KinEng v_emag Temp TotEng
0 0 0.010071723 -0.11868622 0 -0.12966919 0 -0.11868622
10 0.002 0.010030399 -0.37068593 0 -0.38171598 0 -0.37068593
20 0.004 0.0099889925 -0.6223216 0 -0.6334048 0 -0.6223216
30 0.006 0.0099474775 -0.87359359 0 -0.8847354 0 -0.87359359
40 0.008 0.0099058307 -1.1245034 0 -1.1357086 0 -1.1245034
50 0.01 0.0098640297 -1.3750538 0 -1.3863265 0 -1.3750538
60 0.012 0.0098220535 -1.6252482 0 -1.6365919 0 -1.6252482
70 0.014 0.0097798823 -1.8750914 0 -1.8865086 0 -1.8750914
80 0.016 0.0097374973 -2.1245886 0 -2.1360814 0 -2.1245886
90 0.018 0.0096948808 -2.3737458 0 -2.3853155 0 -2.3737458
100 0.02 0.0096520159 -2.6225698 0 -2.6342168 0 -2.6225698
110 0.022 0.0096088866 -2.8710677 0 -2.8827919 0 -2.8710677
120 0.024 0.0095654776 -3.1192469 0 -3.1310475 0 -3.1192469
130 0.026 0.0095217746 -3.367115 0 -3.3789906 0 -3.367115
140 0.028 0.0094777638 -3.61468 0 -3.6266285 0 -3.61468
150 0.03 0.0094334323 -3.8619496 0 -3.8739683 0 -3.8619496
160 0.032 0.0093887679 -4.1089316 0 -4.1210173 0 -4.1089316
170 0.034 0.0093437596 -4.3556335 0 -4.3677824 0 -4.3556335
180 0.036 0.0092983972 -4.6020625 0 -4.6142704 0 -4.6020625
190 0.038 0.0092526717 -4.8482255 0 -4.8604877 0 -4.8482255
200 0.04 0.0092065755 -5.0941291 0 -5.1064403 0 -5.0941291
210 0.042 0.0091601024 -5.3397792 0 -5.3521339 0 -5.3397792
220 0.044 0.0091132478 -5.5851813 0 -5.5975736 0 -5.5851813
230 0.046 0.0090660089 -5.8303404 0 -5.842764 0 -5.8303404
240 0.048 0.0090183847 -6.0752609 0 -6.0877092 0 -6.0752609
250 0.05 0.0089703764 -6.3199467 0 -6.3324129 0 -6.3199467
260 0.052 0.0089219873 -6.5644011 0 -6.5768782 0 -6.5644011
270 0.054 0.0088732228 -6.808627 0 -6.8211078 0 -6.808627
280 0.056 0.0088240906 -7.0526266 0 -7.0651038 0 -7.0526266
290 0.058 0.0087746006 -7.296402 0 -7.3088682 0 -7.296402
300 0.06 0.0087247648 -7.5399545 0 -7.5524024 0 -7.5399545
310 0.062 0.0086745976 -7.7832854 0 -7.7957077 0 -7.7832854
320 0.064 0.0086241149 -8.0263956 0 -8.038785 0 -8.0263956
330 0.066 0.008573335 -8.2692858 0 -8.281635 0 -8.2692858
340 0.068 0.0085222772 -8.5119564 0 -8.5242586 0 -8.5119564
350 0.07 0.0084709627 -8.7544078 0 -8.7666562 0 -8.7544078
360 0.072 0.0084194136 -8.9966403 0 -9.0088285 0 -8.9966403
370 0.074 0.008367653 -9.2386543 0 -9.2507761 0 -9.2386543
380 0.076 0.0083157046 -9.4804501 0 -9.4924997 0 -9.4804501
390 0.078 0.0082635925 -9.7220281 0 -9.7340001 0 -9.7220281
400 0.08 0.0082113412 -9.9633888 0 -9.9752784 0 -9.9633888
410 0.082 0.0081589747 -10.204533 0 -10.216336 0 -10.204533
420 0.084 0.0081065173 -10.445462 0 -10.457173 0 -10.445462
430 0.086 0.0080539925 -10.686176 0 -10.697793 0 -10.686176
440 0.088 0.0080014235 -10.926676 0 -10.938197 0 -10.926676
450 0.09 0.0079488329 -11.166966 0 -11.178387 0 -11.166966
460 0.092 0.0078962427 -11.407045 0 -11.418366 0 -11.407045
470 0.094 0.0078436743 -11.646917 0 -11.658136 0 -11.646917
480 0.096 0.0077911486 -11.886583 0 -11.8977 0 -11.886583
490 0.098 0.007738686 -12.126047 0 -12.137063 0 -12.126047
500 0.1 0.0076863062 -12.365311 0 -12.376226 0 -12.365311
Loop time of 3.94852 on 4 procs for 500 steps with 5780 atoms
Performance: 2.188 ns/day, 10.968 hours/ns, 126.630 timesteps/s
99.9% CPU use with 4 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.97416 | 0.98668 | 1.0022 | 1.0 | 24.99
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0.032618 | 0.04948 | 0.062614 | 5.0 | 1.25
Output | 0.014166 | 0.014229 | 0.014374 | 0.1 | 0.36
Modify | 2.8947 | 2.8957 | 2.8965 | 0.0 | 73.34
Other | | 0.002385 | | | 0.06
Nlocal: 1445 ave 1445 max 1445 min
Histogram: 4 0 0 0 0 0 0 0 0 0
Nghost: 555 ave 555 max 555 min
Histogram: 4 0 0 0 0 0 0 0 0 0
Neighs: 0 ave 0 max 0 min
Histogram: 4 0 0 0 0 0 0 0 0 0
FullNghs: 23120 ave 23120 max 23120 min
Histogram: 4 0 0 0 0 0 0 0 0 0
Total # of neighbors = 92480
Ave neighs/atom = 16
Neighbor list builds = 0
Dangerous builds = 0
Please see the log.cite file for references relevant to this simulation
Total wall time: 0:00:03

View File

@ -45,8 +45,6 @@ compute out_pe all pe
compute out_ke all ke
compute out_temp all temp
thermo_style custom f_1
variable magx equal c_out_mag[1]
variable magy equal c_out_mag[2]
variable magz equal c_out_mag[3]
@ -54,7 +52,7 @@ variable magnorm equal c_out_mag[4]
variable emag equal c_out_mag[5]
variable tmag equal c_out_mag[6]
thermo_style custom step time f_1 v_magx v_magy v_magnorm v_emag temp etotal
thermo_style custom step time v_magx v_magy v_magnorm pe v_emag temp etotal
thermo 50
# compute outsp all property/atom spx spy spz sp fmx fmy fmz

View File

@ -0,0 +1,144 @@
LAMMPS (19 Mar 2020)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (../comm.cpp:94)
using 1 OpenMP thread(s) per MPI task
# fcc cobalt in a 3d periodic box
clear
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (../comm.cpp:94)
using 1 OpenMP thread(s) per MPI task
units metal
atom_style spin
dimension 3
boundary p p p
# necessary for the serial algorithm (sametag)
atom_modify map array
lattice fcc 3.54
Lattice spacing in x,y,z = 3.54 3.54 3.54
region box block 0.0 5.0 0.0 5.0 0.0 5.0
create_box 1 box
Created orthogonal box = (0 0 0) to (17.7 17.7 17.7)
1 by 1 by 1 MPI processor grid
create_atoms 1 box
Created 500 atoms
create_atoms CPU = 0.00057292 secs
# setting mass, mag. moments, and interactions for fcc cobalt
mass 1 58.93
#set group all spin/random 31 1.72
set group all spin 1.72 0.0 0.0 1.0
500 settings made for spin
velocity all create 100 4928459 rot yes dist gaussian
pair_style hybrid/overlay eam/alloy spin/exchange 4.0
pair_coeff * * eam/alloy Co_PurjaPun_2012.eam.alloy Co
pair_coeff * * spin/exchange exchange 4.0 0.0446928 0.003496 1.4885
neighbor 0.1 bin
neigh_modify every 10 check yes delay 20
fix 1 all precession/spin zeeman 1.0 0.0 0.0 1.0
fix_modify 1 energy yes
fix 2 all langevin/spin 0.0 0.0 21
fix 3 all nve/spin lattice moving
timestep 0.0001
# compute and output options
compute out_mag all spin
compute out_pe all pe
compute out_ke all ke
compute out_temp all temp
variable magx equal c_out_mag[1]
variable magy equal c_out_mag[2]
variable magz equal c_out_mag[3]
variable magnorm equal c_out_mag[4]
variable emag equal c_out_mag[5]
variable tmag equal c_out_mag[6]
thermo_style custom step time v_magx v_magy v_magnorm pe v_emag temp etotal
thermo 50
# compute outsp all property/atom spx spy spz sp fmx fmy fmz
# dump 1 all custom 100 dump_cobalt_fcc.lammpstrj type x y z c_outsp[1] c_outsp[2] c_outsp[3]
run 1000
Neighbor list info ...
update every 10 steps, delay 20 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 6.59954
ghost atom cutoff = 6.59954
binsize = 3.29977, bins = 6 6 6
2 neighbor lists, perpetual/occasional/extra = 2 0 0
(1) pair eam/alloy, perpetual, half/full from (2)
attributes: half, newton on
pair build: halffull/newton
stencil: none
bin: none
(2) pair spin/exchange, perpetual
attributes: full, newton on
pair build: full/bin/atomonly
stencil: full/bin/3d
bin: standard
Per MPI rank memory allocation (min/avg/max) = 5.718 | 5.718 | 5.718 Mbytes
Step Time v_magx v_magy v_magnorm PotEng v_emag Temp TotEng
0 0 0 0 1 -2285.0679 -94.095041 100.00543 -2278.6175
50 0.005 0 0 1 -2284.7512 -94.095025 95.095146 -2278.6175
100 0.01 0 0 1 -2283.8798 -94.094815 81.584879 -2278.6175
150 0.015 0 0 1 -2282.6701 -94.09401 62.831647 -2278.6175
200 0.02 0 0 1 -2281.4185 -94.092175 43.426664 -2278.6174
250 0.025 0 0 1 -2280.4165 -94.089084 27.892653 -2278.6174
300 0.03 0 0 1 -2279.8662 -94.084906 19.36049 -2278.6174
350 0.035 0 0 1 -2279.8238 -94.080244 18.702888 -2278.6174
400 0.04 0 0 1 -2280.1962 -94.076013 24.47725 -2278.6174
450 0.045 0 0 1 -2280.7898 -94.073181 33.678697 -2278.6175
500 0.05 0 0 1 -2281.3871 -94.072475 42.940446 -2278.6175
550 0.055 0 0 1 -2281.8215 -94.074138 49.675032 -2278.6175
600 0.06 0 0 1 -2282.0209 -94.077829 52.765336 -2278.6175
650 0.065 0 0 1 -2282.0136 -94.082703 52.652501 -2278.6175
700 0.07 0 0 1 -2281.8997 -94.087644 50.88749 -2278.6174
750 0.075 0 0 1 -2281.8013 -94.091583 49.361723 -2278.6174
800 0.08 0 0 1 -2281.8108 -94.093792 49.508108 -2278.6174
850 0.085 0 0 1 -2281.9558 -94.094064 51.756992 -2278.6174
900 0.09 0 0 1 -2282.1927 -94.092723 55.428834 -2278.6175
950 0.095 0 0 1 -2282.4277 -94.090469 59.072774 -2278.6175
1000 0.1 0 0 1 -2282.5578 -94.088121 61.090565 -2278.6175
Loop time of 4.4001 on 1 procs for 1000 steps with 500 atoms
Performance: 1.964 ns/day, 12.222 hours/ns, 227.268 timesteps/s
100.0% CPU use with 1 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 2.0815 | 2.0815 | 2.0815 | 0.0 | 47.31
Neigh | 0.011835 | 0.011835 | 0.011835 | 0.0 | 0.27
Comm | 0.033331 | 0.033331 | 0.033331 | 0.0 | 0.76
Output | 0.00045395 | 0.00045395 | 0.00045395 | 0.0 | 0.01
Modify | 2.2677 | 2.2677 | 2.2677 | 0.0 | 51.54
Other | | 0.005309 | | | 0.12
Nlocal: 500 ave 500 max 500 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 1956 ave 1956 max 1956 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 24088 ave 24088 max 24088 min
Histogram: 1 0 0 0 0 0 0 0 0 0
FullNghs: 48176 ave 48176 max 48176 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 48176
Ave neighs/atom = 96.352
Neighbor list builds = 6
Dangerous builds = 0
Please see the log.cite file for references relevant to this simulation
Total wall time: 0:00:04

View File

@ -0,0 +1,144 @@
LAMMPS (19 Mar 2020)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (../comm.cpp:94)
using 1 OpenMP thread(s) per MPI task
# fcc cobalt in a 3d periodic box
clear
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (../comm.cpp:94)
using 1 OpenMP thread(s) per MPI task
units metal
atom_style spin
dimension 3
boundary p p p
# necessary for the serial algorithm (sametag)
atom_modify map array
lattice fcc 3.54
Lattice spacing in x,y,z = 3.54 3.54 3.54
region box block 0.0 5.0 0.0 5.0 0.0 5.0
create_box 1 box
Created orthogonal box = (0 0 0) to (17.7 17.7 17.7)
1 by 2 by 2 MPI processor grid
create_atoms 1 box
Created 500 atoms
create_atoms CPU = 0.000688791 secs
# setting mass, mag. moments, and interactions for fcc cobalt
mass 1 58.93
#set group all spin/random 31 1.72
set group all spin 1.72 0.0 0.0 1.0
500 settings made for spin
velocity all create 100 4928459 rot yes dist gaussian
pair_style hybrid/overlay eam/alloy spin/exchange 4.0
pair_coeff * * eam/alloy Co_PurjaPun_2012.eam.alloy Co
pair_coeff * * spin/exchange exchange 4.0 0.0446928 0.003496 1.4885
neighbor 0.1 bin
neigh_modify every 10 check yes delay 20
fix 1 all precession/spin zeeman 1.0 0.0 0.0 1.0
fix_modify 1 energy yes
fix 2 all langevin/spin 0.0 0.0 21
fix 3 all nve/spin lattice moving
timestep 0.0001
# compute and output options
compute out_mag all spin
compute out_pe all pe
compute out_ke all ke
compute out_temp all temp
variable magx equal c_out_mag[1]
variable magy equal c_out_mag[2]
variable magz equal c_out_mag[3]
variable magnorm equal c_out_mag[4]
variable emag equal c_out_mag[5]
variable tmag equal c_out_mag[6]
thermo_style custom step time v_magx v_magy v_magnorm pe v_emag temp etotal
thermo 50
# compute outsp all property/atom spx spy spz sp fmx fmy fmz
# dump 1 all custom 100 dump_cobalt_fcc.lammpstrj type x y z c_outsp[1] c_outsp[2] c_outsp[3]
run 1000
Neighbor list info ...
update every 10 steps, delay 20 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 6.59954
ghost atom cutoff = 6.59954
binsize = 3.29977, bins = 6 6 6
2 neighbor lists, perpetual/occasional/extra = 2 0 0
(1) pair eam/alloy, perpetual, half/full from (2)
attributes: half, newton on
pair build: halffull/newton
stencil: none
bin: none
(2) pair spin/exchange, perpetual
attributes: full, newton on
pair build: full/bin/atomonly
stencil: full/bin/3d
bin: standard
Per MPI rank memory allocation (min/avg/max) = 5.664 | 5.664 | 5.664 Mbytes
Step Time v_magx v_magy v_magnorm PotEng v_emag Temp TotEng
0 0 0 0 1 -2285.0679 -94.095041 100.00543 -2278.6175
50 0.005 0 0 1 -2284.7564 -94.094967 95.176149 -2278.6175
100 0.01 0 0 1 -2283.8977 -94.094606 81.863661 -2278.6175
150 0.015 0 0 1 -2282.7006 -94.09362 63.303924 -2278.6175
200 0.02 0 0 1 -2281.452 -94.091648 43.94572 -2278.6174
250 0.025 0 0 1 -2280.4374 -94.08853 28.216468 -2278.6174
300 0.03 0 0 1 -2279.859 -94.084466 19.248729 -2278.6174
350 0.035 0 0 1 -2279.78 -94.080048 18.024631 -2278.6174
400 0.04 0 0 1 -2280.1196 -94.076134 23.288369 -2278.6174
450 0.045 0 0 1 -2280.697 -94.073612 32.240137 -2278.6175
500 0.05 0 0 1 -2281.305 -94.073124 41.666374 -2278.6175
550 0.055 0 0 1 -2281.7792 -94.074853 49.019175 -2278.6175
600 0.06 0 0 1 -2282.0409 -94.078438 53.075365 -2278.6175
650 0.065 0 0 1 -2282.1031 -94.083056 54.040647 -2278.6175
700 0.07 0 0 1 -2282.0468 -94.087642 53.167271 -2278.6175
750 0.075 0 0 1 -2281.9765 -94.09119 52.076935 -2278.6174
800 0.08 0 0 1 -2281.9742 -94.093023 52.041607 -2278.6174
850 0.085 0 0 1 -2282.0669 -94.09296 53.478909 -2278.6174
900 0.09 0 0 1 -2282.2193 -94.091331 55.842481 -2278.6175
950 0.095 0 0 1 -2282.354 -94.088827 57.930866 -2278.6175
1000 0.1 0 0 1 -2282.3885 -94.086262 58.464579 -2278.6175
Loop time of 2.75 on 4 procs for 1000 steps with 500 atoms
Performance: 3.142 ns/day, 7.639 hours/ns, 363.637 timesteps/s
99.7% CPU use with 4 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.57529 | 0.60652 | 0.63664 | 3.2 | 22.06
Neigh | 0.0026889 | 0.0029447 | 0.0030761 | 0.3 | 0.11
Comm | 0.10943 | 0.14471 | 0.17847 | 7.5 | 5.26
Output | 0.00045419 | 0.00050056 | 0.00060511 | 0.0 | 0.02
Modify | 1.9865 | 1.9926 | 1.998 | 0.3 | 72.46
Other | | 0.002738 | | | 0.10
Nlocal: 125 ave 132 max 116 min
Histogram: 1 0 0 0 0 1 1 0 0 1
Nghost: 1099 ave 1108 max 1092 min
Histogram: 1 0 0 1 1 0 0 0 0 1
Neighs: 6033 ave 6372 max 5495 min
Histogram: 1 0 0 0 0 0 0 2 0 1
FullNghs: 12066 ave 12964 max 10977 min
Histogram: 1 0 0 0 0 1 1 0 0 1
Total # of neighbors = 48264
Ave neighs/atom = 96.528
Neighbor list builds = 6
Dangerous builds = 0
Please see the log.cite file for references relevant to this simulation
Total wall time: 0:00:02

View File

@ -1,142 +0,0 @@
LAMMPS (30 Oct 2019)
# fcc cobalt in a 3d periodic box
clear
units metal
atom_style spin
dimension 3
boundary p p p
# necessary for the serial algorithm (sametag)
atom_modify map array
lattice fcc 3.54
Lattice spacing in x,y,z = 3.54 3.54 3.54
region box block 0.0 5.0 0.0 5.0 0.0 5.0
create_box 1 box
Created orthogonal box = (0 0 0) to (17.7 17.7 17.7)
1 by 1 by 1 MPI processor grid
create_atoms 1 box
Created 500 atoms
create_atoms CPU = 0.000470161 secs
# setting mass, mag. moments, and interactions for fcc cobalt
mass 1 58.93
#set group all spin/random 31 1.72
set group all spin 1.72 0.0 0.0 1.0
500 settings made for spin
velocity all create 100 4928459 rot yes dist gaussian
pair_style hybrid/overlay eam/alloy spin/exchange 4.0
pair_coeff * * eam/alloy Co_PurjaPun_2012.eam.alloy Co
pair_coeff * * spin/exchange exchange 4.0 0.0446928 0.003496 1.4885
neighbor 0.1 bin
neigh_modify every 10 check yes delay 20
fix 1 all precession/spin zeeman 1.0 0.0 0.0 1.0
fix_modify 1 energy yes
fix 2 all langevin/spin 0.0 0.0 21
fix 3 all nve/spin lattice moving
timestep 0.0001
# compute and output options
compute out_mag all spin
compute out_pe all pe
compute out_ke all ke
compute out_temp all temp
thermo_style custom f_1
variable magx equal c_out_mag[1]
variable magy equal c_out_mag[2]
variable magz equal c_out_mag[3]
variable magnorm equal c_out_mag[4]
variable emag equal c_out_mag[5]
variable tmag equal c_out_mag[6]
thermo_style custom step time f_1 v_magx v_magy v_magnorm v_emag temp etotal
thermo 50
# compute outsp all property/atom spx spy spz sp fmx fmy fmz
# dump 1 all custom 100 dump_cobalt_fcc.lammpstrj type x y z c_outsp[1] c_outsp[2] c_outsp[3]
run 1000
Neighbor list info ...
update every 10 steps, delay 20 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 6.59954
ghost atom cutoff = 6.59954
binsize = 3.29977, bins = 6 6 6
2 neighbor lists, perpetual/occasional/extra = 2 0 0
(1) pair eam/alloy, perpetual, half/full from (2)
attributes: half, newton on
pair build: halffull/newton
stencil: none
bin: none
(2) pair spin/exchange, perpetual
attributes: full, newton on
pair build: full/bin/atomonly
stencil: full/bin/3d
bin: standard
Per MPI rank memory allocation (min/avg/max) = 5.718 | 5.718 | 5.718 Mbytes
Step Time f_1 v_magx v_magy v_magnorm v_emag Temp TotEng
0 0 -0.099570972 0 0 1 -188.09051 100.00543 -2278.6175
50 0.005 -0.099570972 0 0 1 -188.09048 95.094679 -2278.6175
100 0.01 -0.099570972 0 0 1 -188.09007 81.578321 -2278.6177
150 0.015 -0.099570972 0 0 1 -188.08848 62.802727 -2278.6185
200 0.02 -0.099570972 0 0 1 -188.08487 43.35108 -2278.6203
250 0.025 -0.099570972 0 0 1 -188.07877 27.749821 -2278.6233
300 0.03 -0.099570972 0 0 1 -188.07054 19.149389 -2278.6274
350 0.035 -0.099570972 0 0 1 -188.06135 18.453387 -2278.632
400 0.04 -0.099570972 0 0 1 -188.053 24.249423 -2278.6362
450 0.045 -0.099570972 0 0 1 -188.04742 33.548008 -2278.639
500 0.05 -0.099570972 0 0 1 -188.04604 42.973172 -2278.6397
550 0.055 -0.099570972 0 0 1 -188.04935 49.902539 -2278.638
600 0.06 -0.099570972 0 0 1 -188.0567 53.166772 -2278.6344
650 0.065 -0.099570972 0 0 1 -188.06642 53.153416 -2278.6295
700 0.07 -0.099570972 0 0 1 -188.07628 51.377187 -2278.6246
750 0.075 -0.099570972 0 0 1 -188.08415 49.725449 -2278.6206
800 0.08 -0.099570972 0 0 1 -188.08857 49.663576 -2278.6184
850 0.085 -0.099570972 0 0 1 -188.0891 51.681567 -2278.6182
900 0.09 -0.099570972 0 0 1 -188.08636 55.166554 -2278.6195
950 0.095 -0.099570972 0 0 1 -188.08174 58.718232 -2278.6218
1000 0.1 -0.099570972 0 0 1 -188.0769 60.75567 -2278.6243
Loop time of 4.6196 on 1 procs for 1000 steps with 500 atoms
Performance: 1.870 ns/day, 12.832 hours/ns, 216.469 timesteps/s
99.9% CPU use with 1 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 2.3116 | 2.3116 | 2.3116 | 0.0 | 50.04
Neigh | 0.011227 | 0.011227 | 0.011227 | 0.0 | 0.24
Comm | 0.032837 | 0.032837 | 0.032837 | 0.0 | 0.71
Output | 0.00039411 | 0.00039411 | 0.00039411 | 0.0 | 0.01
Modify | 2.2584 | 2.2584 | 2.2584 | 0.0 | 48.89
Other | | 0.005152 | | | 0.11
Nlocal: 500 ave 500 max 500 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 1956 ave 1956 max 1956 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 24065 ave 24065 max 24065 min
Histogram: 1 0 0 0 0 0 0 0 0 0
FullNghs: 48130 ave 48130 max 48130 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 48130
Ave neighs/atom = 96.26
Neighbor list builds = 6
Dangerous builds = 0
Please see the log.cite file for references relevant to this simulation
Total wall time: 0:00:04

View File

@ -1,142 +0,0 @@
LAMMPS (30 Oct 2019)
# fcc cobalt in a 3d periodic box
clear
units metal
atom_style spin
dimension 3
boundary p p p
# necessary for the serial algorithm (sametag)
atom_modify map array
lattice fcc 3.54
Lattice spacing in x,y,z = 3.54 3.54 3.54
region box block 0.0 5.0 0.0 5.0 0.0 5.0
create_box 1 box
Created orthogonal box = (0 0 0) to (17.7 17.7 17.7)
1 by 2 by 2 MPI processor grid
create_atoms 1 box
Created 500 atoms
create_atoms CPU = 0.000808001 secs
# setting mass, mag. moments, and interactions for fcc cobalt
mass 1 58.93
#set group all spin/random 31 1.72
set group all spin 1.72 0.0 0.0 1.0
500 settings made for spin
velocity all create 100 4928459 rot yes dist gaussian
pair_style hybrid/overlay eam/alloy spin/exchange 4.0
pair_coeff * * eam/alloy Co_PurjaPun_2012.eam.alloy Co
pair_coeff * * spin/exchange exchange 4.0 0.0446928 0.003496 1.4885
neighbor 0.1 bin
neigh_modify every 10 check yes delay 20
fix 1 all precession/spin zeeman 1.0 0.0 0.0 1.0
fix_modify 1 energy yes
fix 2 all langevin/spin 0.0 0.0 21
fix 3 all nve/spin lattice moving
timestep 0.0001
# compute and output options
compute out_mag all spin
compute out_pe all pe
compute out_ke all ke
compute out_temp all temp
thermo_style custom f_1
variable magx equal c_out_mag[1]
variable magy equal c_out_mag[2]
variable magz equal c_out_mag[3]
variable magnorm equal c_out_mag[4]
variable emag equal c_out_mag[5]
variable tmag equal c_out_mag[6]
thermo_style custom step time f_1 v_magx v_magy v_magnorm v_emag temp etotal
thermo 50
# compute outsp all property/atom spx spy spz sp fmx fmy fmz
# dump 1 all custom 100 dump_cobalt_fcc.lammpstrj type x y z c_outsp[1] c_outsp[2] c_outsp[3]
run 1000
Neighbor list info ...
update every 10 steps, delay 20 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 6.59954
ghost atom cutoff = 6.59954
binsize = 3.29977, bins = 6 6 6
2 neighbor lists, perpetual/occasional/extra = 2 0 0
(1) pair eam/alloy, perpetual, half/full from (2)
attributes: half, newton on
pair build: halffull/newton
stencil: none
bin: none
(2) pair spin/exchange, perpetual
attributes: full, newton on
pair build: full/bin/atomonly
stencil: full/bin/3d
bin: standard
Per MPI rank memory allocation (min/avg/max) = 5.664 | 5.664 | 5.664 Mbytes
Step Time f_1 v_magx v_magy v_magnorm v_emag Temp TotEng
0 0 -0.099570972 0 0 1 -188.09051 100.00543 -2372.6129
50 0.005 -0.099570972 0 0 1 -188.09036 95.174807 -2372.6129
100 0.01 -0.099570972 0 0 1 -188.08965 81.854304 -2372.6129
150 0.015 -0.099570972 0 0 1 -188.0877 63.270938 -2372.6129
200 0.02 -0.099570972 0 0 1 -188.08381 43.867262 -2372.6129
250 0.025 -0.099570972 0 0 1 -188.07767 28.075261 -2372.6129
300 0.03 -0.099570972 0 0 1 -188.06966 19.046222 -2372.6129
350 0.035 -0.099570972 0 0 1 -188.06096 17.79071 -2372.6129
400 0.04 -0.099570972 0 0 1 -188.05326 23.079994 -2372.6129
450 0.045 -0.099570972 0 0 1 -188.04831 32.127316 -2372.6129
500 0.05 -0.099570972 0 0 1 -188.04737 41.709644 -2372.6129
550 0.055 -0.099570972 0 0 1 -188.05082 49.246292 -2372.6129
600 0.06 -0.099570972 0 0 1 -188.05795 53.465535 -2372.6129
650 0.065 -0.099570972 0 0 1 -188.06713 54.522857 -2372.6129
700 0.07 -0.099570972 0 0 1 -188.07626 53.635521 -2372.6129
750 0.075 -0.099570972 0 0 1 -188.08332 52.419678 -2372.6129
800 0.08 -0.099570972 0 0 1 -188.08696 52.176558 -2372.6129
850 0.085 -0.099570972 0 0 1 -188.0868 53.380592 -2372.6129
900 0.09 -0.099570972 0 0 1 -188.08348 55.551378 -2372.6129
950 0.095 -0.099570972 0 0 1 -188.07838 57.540047 -2372.6129
1000 0.1 -0.099570972 0 0 1 -188.07314 58.088674 -2372.6129
Loop time of 2.54753 on 4 procs for 1000 steps with 500 atoms
Performance: 3.392 ns/day, 7.076 hours/ns, 392.538 timesteps/s
100.0% CPU use with 4 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.62017 | 0.6485 | 0.66275 | 2.1 | 25.46
Neigh | 0.0027115 | 0.0029724 | 0.0030868 | 0.3 | 0.12
Comm | 0.095047 | 0.1102 | 0.13819 | 5.0 | 4.33
Output | 0.00039029 | 0.00042999 | 0.00049996 | 0.0 | 0.02
Modify | 1.7801 | 1.7834 | 1.7852 | 0.1 | 70.01
Other | | 0.002028 | | | 0.08
Nlocal: 125 ave 133 max 116 min
Histogram: 1 0 0 0 0 2 0 0 0 1
Nghost: 1099 ave 1108 max 1091 min
Histogram: 1 0 0 0 2 0 0 0 0 1
Neighs: 6032.5 ave 6417 max 5489 min
Histogram: 1 0 0 0 0 0 1 1 0 1
FullNghs: 12065 ave 13062 max 10970 min
Histogram: 1 0 0 0 0 2 0 0 0 1
Total # of neighbors = 48260
Ave neighs/atom = 96.52
Neighbor list builds = 6
Dangerous builds = 0
Please see the log.cite file for references relevant to this simulation
Total wall time: 0:00:02

View File

@ -32,10 +32,9 @@ pair_coeff * * spin/exchange exchange 4.0 -0.3593 1.135028015e-05 1.064568567
neighbor 0.1 bin
neigh_modify every 10 check yes delay 20
#fix 1 all precession/spin zeeman 1.0 0.0 0.0 1.0
fix 1 all precession/spin anisotropy 0.01 0.0 0.0 1.0
#fix 2 all langevin/spin 0.0 0.0 21
fix 2 all langevin/spin 0.0 0.1 21
fix_modify 1 energy yes
fix 2 all langevin/spin 0.0 0.0 21
fix 3 all nve/spin lattice moving
timestep 0.0001
@ -51,8 +50,8 @@ variable magnorm equal c_out_mag[4]
variable emag equal c_out_mag[5]
variable tmag equal c_out_mag[6]
thermo_style custom step time v_magnorm v_emag temp press etotal
thermo 10
thermo_style custom step time v_magnorm pe v_emag temp press etotal
thermo 50
compute outsp all property/atom spx spy spz sp fmx fmy fmz
dump 1 all custom 100 dump_cobalt_hcp.lammpstrj type x y z c_outsp[1] c_outsp[2] c_outsp[3]

View File

@ -0,0 +1,142 @@
LAMMPS (19 Mar 2020)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (../comm.cpp:94)
using 1 OpenMP thread(s) per MPI task
# hcp cobalt in a 3d periodic box
clear
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (../comm.cpp:94)
using 1 OpenMP thread(s) per MPI task
units metal
atom_style spin
dimension 3
boundary p p p
# necessary for the serial algorithm (sametag)
atom_modify map array
lattice hcp 2.5071
Lattice spacing in x,y,z = 2.5071 4.34242 4.09408
region box block 0.0 5.0 0.0 5.0 0.0 5.0
create_box 1 box
Created orthogonal box = (0 0 0) to (12.5355 21.7121 20.4704)
1 by 1 by 1 MPI processor grid
create_atoms 1 box
Created 500 atoms
create_atoms CPU = 0.00121403 secs
# setting mass, mag. moments, and interactions for hcp cobalt
mass 1 58.93
set group all spin/random 31 1.72
500 settings made for spin/random
#set group all spin 1.72 0.0 0.0 1.0
velocity all create 100 4928459 rot yes dist gaussian
#pair_style hybrid/overlay eam/alloy spin/exchange 4.0 spin/neel 4.0
pair_style hybrid/overlay eam/alloy spin/exchange 4.0
pair_coeff * * eam/alloy Co_PurjaPun_2012.eam.alloy Co
pair_coeff * * spin/exchange exchange 4.0 -0.3593 1.135028015e-05 1.064568567
#pair_coeff * * spin/neel neel 4.0 0.0048 0.234 1.168 2.6905 0.705 0.652
neighbor 0.1 bin
neigh_modify every 10 check yes delay 20
fix 1 all precession/spin anisotropy 0.01 0.0 0.0 1.0
fix_modify 1 energy yes
fix 2 all langevin/spin 0.0 0.0 21
fix 3 all nve/spin lattice moving
timestep 0.0001
compute out_mag all spin
compute out_pe all pe
compute out_ke all ke
compute out_temp all temp
variable magz equal c_out_mag[3]
variable magnorm equal c_out_mag[4]
variable emag equal c_out_mag[5]
variable tmag equal c_out_mag[6]
thermo_style custom step time v_magnorm pe v_emag temp press etotal
thermo 50
compute outsp all property/atom spx spy spz sp fmx fmy fmz
dump 1 all custom 100 dump_cobalt_hcp.lammpstrj type x y z c_outsp[1] c_outsp[2] c_outsp[3]
run 1000
Neighbor list info ...
update every 10 steps, delay 20 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 6.59954
ghost atom cutoff = 6.59954
binsize = 3.29977, bins = 4 7 7
2 neighbor lists, perpetual/occasional/extra = 2 0 0
(1) pair eam/alloy, perpetual, half/full from (2)
attributes: half, newton on
pair build: halffull/newton
stencil: none
bin: none
(2) pair spin/exchange, perpetual
attributes: full, newton on
pair build: full/bin/atomonly
stencil: full/bin/3d
bin: standard
Per MPI rank memory allocation (min/avg/max) = 7.902 | 7.902 | 7.902 Mbytes
Step Time v_magnorm PotEng v_emag Temp Press TotEng
0 0 0.076558814 -2197.5536 -2.5536882 100.00543 -552.75983 -2191.1032
50 0.005 0.079437931 -2197.3113 -2.6177795 96.18776 -337.75504 -2191.1071
100 0.01 0.079575823 -2196.5768 -2.7375927 84.740309 571.91195 -2191.1109
150 0.015 0.078526145 -2195.4996 -2.8719243 67.984081 1845.185 -2191.1146
200 0.02 0.077001318 -2194.3502 -3.019723 50.107518 3008.9709 -2191.1182
250 0.025 0.077628454 -2193.403 -3.1832392 35.364524 4018.0217 -2191.122
300 0.03 0.077407462 -2192.8618 -3.3557644 26.910583 4535.9542 -2191.126
350 0.035 0.078090775 -2192.8052 -3.5305639 25.971561 4733.0322 -2191.13
400 0.04 0.078594494 -2193.135 -3.6772939 31.026665 4309.2088 -2191.1338
450 0.045 0.079898162 -2193.6965 -3.8044501 39.672566 3591.9593 -2191.1376
500 0.05 0.079885039 -2194.3293 -3.9435795 49.423774 2698.4519 -2191.1414
550 0.055 0.077196547 -2194.8554 -4.0868278 57.523322 1934.2444 -2191.1451
600 0.06 0.075430904 -2195.1797 -4.2355252 62.494025 1597.2543 -2191.1488
650 0.065 0.076510964 -2195.2854 -4.3752086 64.080496 1656.2312 -2191.1522
700 0.07 0.07649426 -2195.2723 -4.5226349 63.825926 1521.7541 -2191.1555
750 0.075 0.076254777 -2195.2746 -4.6937954 63.804162 1505.1323 -2191.1592
800 0.08 0.074211447 -2195.3567 -4.8567561 65.022623 1203.5409 -2191.1627
850 0.085 0.072034236 -2195.5531 -5.0007443 68.003461 900.03381 -2191.1668
900 0.09 0.071097702 -2195.8563 -5.1391578 72.641879 548.08834 -2191.1709
950 0.095 0.072642434 -2196.2007 -5.2853353 77.926596 194.45928 -2191.1743
1000 0.1 0.07306537 -2196.46 -5.4169261 81.891897 -404.70871 -2191.1779
Loop time of 4.78824 on 1 procs for 1000 steps with 500 atoms
Performance: 1.804 ns/day, 13.301 hours/ns, 208.845 timesteps/s
100.0% CPU use with 1 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 2.3284 | 2.3284 | 2.3284 | 0.0 | 48.63
Neigh | 0.01273 | 0.01273 | 0.01273 | 0.0 | 0.27
Comm | 0.042432 | 0.042432 | 0.042432 | 0.0 | 0.89
Output | 0.0083201 | 0.0083201 | 0.0083201 | 0.0 | 0.17
Modify | 2.3895 | 2.3895 | 2.3895 | 0.0 | 49.90
Other | | 0.006823 | | | 0.14
Nlocal: 500 ave 500 max 500 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 2444 ave 2444 max 2444 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 27036 ave 27036 max 27036 min
Histogram: 1 0 0 0 0 0 0 0 0 0
FullNghs: 54072 ave 54072 max 54072 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 54072
Ave neighs/atom = 108.144
Neighbor list builds = 6
Dangerous builds = 0
Please see the log.cite file for references relevant to this simulation
Total wall time: 0:00:04

View File

@ -0,0 +1,142 @@
LAMMPS (19 Mar 2020)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (../comm.cpp:94)
using 1 OpenMP thread(s) per MPI task
# hcp cobalt in a 3d periodic box
clear
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (../comm.cpp:94)
using 1 OpenMP thread(s) per MPI task
units metal
atom_style spin
dimension 3
boundary p p p
# necessary for the serial algorithm (sametag)
atom_modify map array
lattice hcp 2.5071
Lattice spacing in x,y,z = 2.5071 4.34242 4.09408
region box block 0.0 5.0 0.0 5.0 0.0 5.0
create_box 1 box
Created orthogonal box = (0 0 0) to (12.5355 21.7121 20.4704)
1 by 2 by 2 MPI processor grid
create_atoms 1 box
Created 500 atoms
create_atoms CPU = 0.000710964 secs
# setting mass, mag. moments, and interactions for hcp cobalt
mass 1 58.93
set group all spin/random 31 1.72
500 settings made for spin/random
#set group all spin 1.72 0.0 0.0 1.0
velocity all create 100 4928459 rot yes dist gaussian
#pair_style hybrid/overlay eam/alloy spin/exchange 4.0 spin/neel 4.0
pair_style hybrid/overlay eam/alloy spin/exchange 4.0
pair_coeff * * eam/alloy Co_PurjaPun_2012.eam.alloy Co
pair_coeff * * spin/exchange exchange 4.0 -0.3593 1.135028015e-05 1.064568567
#pair_coeff * * spin/neel neel 4.0 0.0048 0.234 1.168 2.6905 0.705 0.652
neighbor 0.1 bin
neigh_modify every 10 check yes delay 20
fix 1 all precession/spin anisotropy 0.01 0.0 0.0 1.0
fix_modify 1 energy yes
fix 2 all langevin/spin 0.0 0.0 21
fix 3 all nve/spin lattice moving
timestep 0.0001
compute out_mag all spin
compute out_pe all pe
compute out_ke all ke
compute out_temp all temp
variable magz equal c_out_mag[3]
variable magnorm equal c_out_mag[4]
variable emag equal c_out_mag[5]
variable tmag equal c_out_mag[6]
thermo_style custom step time v_magnorm pe v_emag temp press etotal
thermo 50
compute outsp all property/atom spx spy spz sp fmx fmy fmz
dump 1 all custom 100 dump_cobalt_hcp.lammpstrj type x y z c_outsp[1] c_outsp[2] c_outsp[3]
run 1000
Neighbor list info ...
update every 10 steps, delay 20 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 6.59954
ghost atom cutoff = 6.59954
binsize = 3.29977, bins = 4 7 7
2 neighbor lists, perpetual/occasional/extra = 2 0 0
(1) pair eam/alloy, perpetual, half/full from (2)
attributes: half, newton on
pair build: halffull/newton
stencil: none
bin: none
(2) pair spin/exchange, perpetual
attributes: full, newton on
pair build: full/bin/atomonly
stencil: full/bin/3d
bin: standard
Per MPI rank memory allocation (min/avg/max) = 7.814 | 7.814 | 7.815 Mbytes
Step Time v_magnorm PotEng v_emag Temp Press TotEng
0 0 0.076558814 -2197.5536 -2.5536882 100.00543 -552.75983 -2191.1032
50 0.005 0.079452711 -2197.3115 -2.6266704 96.190558 -328.47868 -2191.1071
100 0.01 0.079678568 -2196.5823 -2.7759025 84.826338 585.21827 -2191.1109
150 0.015 0.078665787 -2195.5034 -2.9426881 68.043637 1872.5465 -2191.1146
200 0.02 0.076875906 -2194.3466 -3.1163511 50.052941 2986.3962 -2191.1181
250 0.025 0.076865073 -2193.3857 -3.2792912 35.095608 4003.4846 -2191.122
300 0.03 0.07675751 -2192.8611 -3.4452905 26.902302 4561.1429 -2191.1259
350 0.035 0.077351833 -2192.8579 -3.6193072 26.792986 4523.2641 -2191.1297
400 0.04 0.077672952 -2193.275 -3.7845654 33.199609 4108.3226 -2191.1336
450 0.045 0.077553541 -2193.9028 -3.9247064 42.874729 3267.3626 -2191.1373
500 0.05 0.076992612 -2194.5433 -4.0593738 52.743363 2317.5276 -2191.1413
550 0.055 0.074971927 -2195.0364 -4.1961092 60.332059 1620.5766 -2191.145
600 0.06 0.072652113 -2195.3154 -4.3458839 64.600641 1265.4418 -2191.1486
650 0.065 0.071405665 -2195.392 -4.5049778 65.734457 1221.4637 -2191.1521
700 0.07 0.072030336 -2195.337 -4.6535106 64.831697 1224.9583 -2191.1553
750 0.075 0.072468553 -2195.2702 -4.7829549 63.746912 1220.2392 -2191.1585
800 0.08 0.071546019 -2195.3271 -4.9161885 64.581676 1134.3858 -2191.1616
850 0.085 0.071414723 -2195.5618 -5.0652271 68.168922 864.52044 -2191.1648
900 0.09 0.073428472 -2195.9219 -5.219789 73.702937 556.14868 -2191.168
950 0.095 0.0745891 -2196.3065 -5.3782699 79.616238 -64.458151 -2191.1711
1000 0.1 0.074027925 -2196.6049 -5.5446493 84.189424 -640.80166 -2191.1746
Loop time of 2.86028 on 4 procs for 1000 steps with 500 atoms
Performance: 3.021 ns/day, 7.945 hours/ns, 349.616 timesteps/s
100.0% CPU use with 4 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.6266 | 0.66621 | 0.68577 | 3.0 | 23.29
Neigh | 0.0030921 | 0.0031813 | 0.0032392 | 0.1 | 0.11
Comm | 0.1184 | 0.14023 | 0.18062 | 6.6 | 4.90
Output | 0.0031779 | 0.0032207 | 0.0033176 | 0.1 | 0.11
Modify | 2.0432 | 2.0445 | 2.0467 | 0.1 | 71.48
Other | | 0.002987 | | | 0.10
Nlocal: 125 ave 128 max 121 min
Histogram: 1 0 1 0 0 0 0 0 0 2
Nghost: 1324 ave 1331 max 1318 min
Histogram: 2 0 0 0 0 0 0 0 1 1
Neighs: 6756.5 ave 6978 max 6543 min
Histogram: 1 1 0 0 0 0 0 0 1 1
FullNghs: 13513 ave 13915 max 13042 min
Histogram: 1 1 0 0 0 0 0 0 0 2
Total # of neighbors = 54052
Ave neighs/atom = 108.104
Neighbor list builds = 6
Dangerous builds = 0
Please see the log.cite file for references relevant to this simulation
Total wall time: 0:00:02

View File

@ -1,219 +0,0 @@
LAMMPS (30 Oct 2019)
# hcp cobalt in a 3d periodic box
clear
units metal
atom_style spin
dimension 3
boundary p p p
# necessary for the serial algorithm (sametag)
atom_modify map array
lattice hcp 2.5071
Lattice spacing in x,y,z = 2.5071 4.34242 4.09408
region box block 0.0 5.0 0.0 5.0 0.0 5.0
create_box 1 box
Created orthogonal box = (0 0 0) to (12.5355 21.7121 20.4704)
1 by 1 by 1 MPI processor grid
create_atoms 1 box
Created 500 atoms
create_atoms CPU = 0.00105 secs
# setting mass, mag. moments, and interactions for hcp cobalt
mass 1 58.93
set group all spin/random 31 1.72
500 settings made for spin/random
#set group all spin 1.72 0.0 0.0 1.0
velocity all create 100 4928459 rot yes dist gaussian
#pair_style hybrid/overlay eam/alloy spin/exchange 4.0 spin/neel 4.0
pair_style hybrid/overlay eam/alloy spin/exchange 4.0
pair_coeff * * eam/alloy Co_PurjaPun_2012.eam.alloy Co
pair_coeff * * spin/exchange exchange 4.0 -0.3593 1.135028015e-05 1.064568567
#pair_coeff * * spin/neel neel 4.0 0.0048 0.234 1.168 2.6905 0.705 0.652
neighbor 0.1 bin
neigh_modify every 10 check yes delay 20
#fix 1 all precession/spin zeeman 1.0 0.0 0.0 1.0
fix 1 all precession/spin anisotropy 0.01 0.0 0.0 1.0
#fix 2 all langevin/spin 0.0 0.0 21
fix 2 all langevin/spin 0.0 0.1 21
fix 3 all nve/spin lattice moving
timestep 0.0001
compute out_mag all spin
compute out_pe all pe
compute out_ke all ke
compute out_temp all temp
variable magz equal c_out_mag[3]
variable magnorm equal c_out_mag[4]
variable emag equal c_out_mag[5]
variable tmag equal c_out_mag[6]
thermo_style custom step time v_magnorm v_emag temp press etotal
thermo 10
compute outsp all property/atom spx spy spz sp fmx fmy fmz
dump 1 all custom 100 dump_cobalt_hcp.lammpstrj type x y z c_outsp[1] c_outsp[2] c_outsp[3]
run 1000
Neighbor list info ...
update every 10 steps, delay 20 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 6.59954
ghost atom cutoff = 6.59954
binsize = 3.29977, bins = 4 7 7
2 neighbor lists, perpetual/occasional/extra = 2 0 0
(1) pair eam/alloy, perpetual, half/full from (2)
attributes: half, newton on
pair build: halffull/newton
stencil: none
bin: none
(2) pair spin/exchange, perpetual
attributes: full, newton on
pair build: full/bin/atomonly
stencil: full/bin/3d
bin: standard
Per MPI rank memory allocation (min/avg/max) = 7.902 | 7.902 | 7.902 Mbytes
Step Time v_magnorm v_emag Temp Press TotEng
0 0 0.076558814 -5.1073764 100.00543 -552.75983 -2189.4486
10 0.001 0.074494403 -6.2746901 100.01038 -1571.7966 -2190.0317
20 0.002 0.072366265 -7.4280779 99.885587 -2535.9845 -2190.5874
30 0.003 0.070127018 -8.5667999 99.611653 -3445.9872 -2191.1169
40 0.004 0.067755946 -9.6899272 99.164813 -4302.5715 -2191.6215
50 0.005 0.065261592 -10.79648 98.520535 -5107.2841 -2192.103
60 0.006 0.062676613 -11.885341 97.657148 -5862.7198 -2192.5638
70 0.007 0.060046709 -12.955115 96.558718 -6572.0571 -2193.0064
80 0.008 0.057417313 -14.004096 95.216748 -7238.1396 -2193.4327
90 0.009 0.054822275 -15.030416 93.630634 -7862.5226 -2193.8437
100 0.01 0.052277835 -16.032345 91.80711 -8445.2646 -2194.2391
110 0.011 0.049783153 -17.008652 89.759163 -8985.5937 -2194.6181
120 0.012 0.047326373 -17.958895 87.504922 -9483.1141 -2194.98
130 0.013 0.044893289 -18.883574 85.066818 -9938.8838 -2195.3247
140 0.014 0.042474822 -19.784052 82.471014 -10355.911 -2195.6533
150 0.015 0.040070404 -20.662271 79.746901 -10739.081 -2195.9677
160 0.016 0.037686856 -21.520294 76.926428 -11094.793 -2196.2709
170 0.017 0.035334961 -22.359822 74.043181 -11430.247 -2196.5659
180 0.018 0.033026799 -23.181822 71.131269 -11752.268 -2196.8556
190 0.019 0.030775544 -23.986406 68.224204 -12065.774 -2197.1412
200 0.02 0.028597121 -24.773013 65.353995 -12372.712 -2197.4226
210 0.021 0.026511775 -25.540835 62.55053 -12672.055 -2197.6975
220 0.022 0.02454383 -26.289327 59.841288 -12961.112 -2197.9631
230 0.023 0.02271918 -27.018625 57.251361 -13237.544 -2198.2165
240 0.024 0.021061271 -27.729714 54.80373 -13501.028 -2198.4564
250 0.025 0.019587072 -28.42449 52.519717 -13754.325 -2198.6833
260 0.026 0.018304494 -29.105398 50.419388 -14002.718 -2198.8994
270 0.027 0.017211977 -29.775134 48.521812 -14253.089 -2199.1079
280 0.028 0.016300002 -30.436204 46.845075 -14512.437 -2199.3119
290 0.029 0.015553519 -31.090499 45.405985 -14786.53 -2199.5143
300 0.03 0.014954102 -31.739026 44.219544 -15079.165 -2199.7168
310 0.031 0.014481189 -32.381585 43.298175 -15391.531 -2199.9198
320 0.032 0.014112494 -33.016984 42.650874 -15722.828 -2200.1226
330 0.033 0.013824206 -33.643289 42.282535 -16070.874 -2200.324
340 0.034 0.013591568 -34.258323 42.19365 -16433.065 -2200.5226
350 0.035 0.013390035 -34.860184 42.380506 -16807.186 -2200.7174
360 0.036 0.01319679 -35.447655 42.835832 -17191.816 -2200.9077
370 0.037 0.012992271 -36.020512 43.549656 -17586.676 -2201.0935
380 0.038 0.012761486 -36.579332 44.510078 -17991.857 -2201.2754
390 0.039 0.012494918 -37.125414 45.703757 -18407.738 -2201.4538
400 0.04 0.0121888 -37.660321 47.115967 -18834.276 -2201.6292
410 0.041 0.011844474 -38.185489 48.730291 -19270.674 -2201.8019
420 0.042 0.011466715 -38.70192 50.528119 -19715.276 -2201.9716
430 0.043 0.011061388 -39.21005 52.488204 -20165.66 -2202.1377
440 0.044 0.010633241 -39.709778 54.586528 -20618.997 -2202.2998
450 0.045 0.010184696 -40.200724 56.79654 -21072.538 -2202.4571
460 0.046 0.0097161044 -40.682449 59.089699 -21523.873 -2202.6094
470 0.047 0.0092271788 -41.154614 61.436133 -21970.922 -2202.7565
480 0.048 0.0087187266 -41.617256 63.805414 -22412.32 -2202.8989
490 0.049 0.0081937768 -42.070708 66.167399 -22847.061 -2203.037
500 0.05 0.0076576327 -42.51563 68.493235 -23274.619 -2203.172
510 0.051 0.0071170477 -42.952841 70.756444 -23694.559 -2203.3046
520 0.052 0.006579078 -43.383338 72.933996 -24106.717 -2203.4358
530 0.053 0.006050144 -43.807962 75.007131 -24510.338 -2203.5662
540 0.054 0.0055354475 -44.227552 76.961803 -24904.495 -2203.6957
550 0.055 0.0050386503 -44.64268 78.788647 -25287.341 -2203.8241
560 0.056 0.0045617699 -45.053996 80.4825 -25657.11 -2203.9504
570 0.057 0.0041054334 -45.461923 82.041527 -26011.443 -2204.0737
580 0.058 0.003669689 -45.866895 83.466142 -26348.265 -2204.1931
590 0.059 0.0032553824 -46.269219 84.757926 -26665.834 -2204.3077
600 0.06 0.0028655752 -46.669125 85.918711 -26963.24 -2204.4173
610 0.061 0.0025060765 -47.066641 86.95 -27240.331 -2204.5218
620 0.062 0.0021839971 -47.461566 87.852838 -27497.728 -2204.6218
630 0.063 0.0019039581 -47.853462 88.628142 -27736.503 -2204.7177
640 0.064 0.0016633855 -48.241747 89.277364 -27957.91 -2204.81
650 0.065 0.0014502904 -48.625803 89.803307 -28163.11 -2204.899
660 0.066 0.0012463786 -49.005026 90.210807 -28352.881 -2204.9847
670 0.067 0.0010345087 -49.378935 90.507107 -28527.721 -2205.0668
680 0.068 0.00080788134 -49.747325 90.701795 -28688.395 -2205.1453
690 0.069 0.000586442 -50.110227 90.80636 -28836.094 -2205.22
700 0.07 0.00046855102 -50.467799 90.833539 -28972.361 -2205.2911
710 0.071 0.00061091693 -50.82044 90.796649 -29099.44 -2205.3592
720 0.072 0.00094960177 -51.168606 90.709122 -29219.676 -2205.4249
730 0.073 0.0013742455 -51.512913 90.584346 -29335.643 -2205.4887
740 0.074 0.0018397629 -51.853957 90.435783 -29449.521 -2205.5511
750 0.075 0.0023216474 -52.192407 90.277231 -29563.316 -2205.6124
760 0.076 0.0028000512 -52.528883 90.123061 -29678.726 -2205.6729
770 0.077 0.0032569295 -52.863859 89.98824 -29797.079 -2205.7329
780 0.078 0.0036765431 -53.197843 89.888047 -29919.964 -2205.7925
790 0.079 0.0040467094 -53.530921 89.837568 -30048.271 -2205.8521
800 0.08 0.0043597837 -53.862938 89.850978 -30182.622 -2205.9119
810 0.081 0.0046129296 -54.193489 89.940884 -30323.293 -2205.9718
820 0.082 0.0048076151 -54.522077 90.117797 -30470.468 -2206.0321
830 0.083 0.004948533 -54.84813 90.389814 -30624.056 -2206.0926
840 0.084 0.0050423324 -55.171024 90.762454 -30783.658 -2206.1532
850 0.085 0.0050965581 -55.490357 91.238681 -30949.141 -2206.2139
860 0.086 0.0051190641 -55.805904 91.818973 -31120.5 -2206.2745
870 0.087 0.0051180301 -56.117429 92.501449 -31297.412 -2206.3349
880 0.088 0.0051024116 -56.424751 93.281992 -31479.436 -2206.3949
890 0.089 0.005082454 -56.727832 94.154367 -31666.293 -2206.4544
900 0.09 0.0050697645 -57.026442 95.110386 -31857.043 -2206.513
910 0.091 0.0050765431 -57.320291 96.140056 -32050.436 -2206.5703
920 0.092 0.0051139309 -57.609075 97.231838 -32245.079 -2206.6257
930 0.093 0.0051899535 -57.89236 98.372982 -32439.141 -2206.6788
940 0.094 0.0053078572 -58.169742 99.54995 -32630.727 -2206.7288
950 0.095 0.0054654923 -58.44083 100.74893 -32817.882 -2206.7752
960 0.096 0.0056558757 -58.705483 101.95638 -32999.116 -2206.8176
970 0.097 0.0058685513 -58.963698 103.15953 -33173.159 -2206.8557
980 0.098 0.0060912487 -59.215624 104.34681 -33338.961 -2206.8893
990 0.099 0.0063114886 -59.461806 105.50819 -33496.345 -2206.9188
1000 0.1 0.0065179843 -59.702883 106.63524 -33645.259 -2206.9444
Loop time of 5.20295 on 1 procs for 1000 steps with 500 atoms
Performance: 1.661 ns/day, 14.453 hours/ns, 192.199 timesteps/s
100.0% CPU use with 1 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 2.6241 | 2.6241 | 2.6241 | 0.0 | 50.43
Neigh | 0.01424 | 0.01424 | 0.01424 | 0.0 | 0.27
Comm | 0.041207 | 0.041207 | 0.041207 | 0.0 | 0.79
Output | 0.0090086 | 0.0090086 | 0.0090086 | 0.0 | 0.17
Modify | 2.5084 | 2.5084 | 2.5084 | 0.0 | 48.21
Other | | 0.006008 | | | 0.12
Nlocal: 500 ave 500 max 500 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 2442 ave 2442 max 2442 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 27581 ave 27581 max 27581 min
Histogram: 1 0 0 0 0 0 0 0 0 0
FullNghs: 55162 ave 55162 max 55162 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 55162
Ave neighs/atom = 110.324
Neighbor list builds = 7
Dangerous builds = 0
Please see the log.cite file for references relevant to this simulation
Total wall time: 0:00:05

View File

@ -1,219 +0,0 @@
LAMMPS (30 Oct 2019)
# hcp cobalt in a 3d periodic box
clear
units metal
atom_style spin
dimension 3
boundary p p p
# necessary for the serial algorithm (sametag)
atom_modify map array
lattice hcp 2.5071
Lattice spacing in x,y,z = 2.5071 4.34242 4.09408
region box block 0.0 5.0 0.0 5.0 0.0 5.0
create_box 1 box
Created orthogonal box = (0 0 0) to (12.5355 21.7121 20.4704)
1 by 2 by 2 MPI processor grid
create_atoms 1 box
Created 500 atoms
create_atoms CPU = 0.00101995 secs
# setting mass, mag. moments, and interactions for hcp cobalt
mass 1 58.93
set group all spin/random 31 1.72
500 settings made for spin/random
#set group all spin 1.72 0.0 0.0 1.0
velocity all create 100 4928459 rot yes dist gaussian
#pair_style hybrid/overlay eam/alloy spin/exchange 4.0 spin/neel 4.0
pair_style hybrid/overlay eam/alloy spin/exchange 4.0
pair_coeff * * eam/alloy Co_PurjaPun_2012.eam.alloy Co
pair_coeff * * spin/exchange exchange 4.0 -0.3593 1.135028015e-05 1.064568567
#pair_coeff * * spin/neel neel 4.0 0.0048 0.234 1.168 2.6905 0.705 0.652
neighbor 0.1 bin
neigh_modify every 10 check yes delay 20
#fix 1 all precession/spin zeeman 1.0 0.0 0.0 1.0
fix 1 all precession/spin anisotropy 0.01 0.0 0.0 1.0
#fix 2 all langevin/spin 0.0 0.0 21
fix 2 all langevin/spin 0.0 0.1 21
fix 3 all nve/spin lattice moving
timestep 0.0001
compute out_mag all spin
compute out_pe all pe
compute out_ke all ke
compute out_temp all temp
variable magz equal c_out_mag[3]
variable magnorm equal c_out_mag[4]
variable emag equal c_out_mag[5]
variable tmag equal c_out_mag[6]
thermo_style custom step time v_magnorm v_emag temp press etotal
thermo 10
compute outsp all property/atom spx spy spz sp fmx fmy fmz
dump 1 all custom 100 dump_cobalt_hcp.lammpstrj type x y z c_outsp[1] c_outsp[2] c_outsp[3]
run 1000
Neighbor list info ...
update every 10 steps, delay 20 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 6.59954
ghost atom cutoff = 6.59954
binsize = 3.29977, bins = 4 7 7
2 neighbor lists, perpetual/occasional/extra = 2 0 0
(1) pair eam/alloy, perpetual, half/full from (2)
attributes: half, newton on
pair build: halffull/newton
stencil: none
bin: none
(2) pair spin/exchange, perpetual
attributes: full, newton on
pair build: full/bin/atomonly
stencil: full/bin/3d
bin: standard
Per MPI rank memory allocation (min/avg/max) = 7.814 | 7.814 | 7.815 Mbytes
Step Time v_magnorm v_emag Temp Press TotEng
0 0 0.076558814 -5.1073764 100.00543 -552.75983 -2190.3478
10 0.001 0.074494512 -6.2728301 99.980769 -1570.0726 -2191.5261
20 0.002 0.072367013 -7.4259977 99.847801 -2531.5119 -2192.6655
30 0.003 0.070129365 -8.566306 99.586282 -3438.1309 -2193.7672
40 0.004 0.067761178 -9.6929189 99.171132 -4291.017 -2194.8323
50 0.005 0.065270916 -10.8048 98.575397 -5091.9111 -2195.8628
60 0.006 0.062690557 -11.900573 97.773618 -5843.4528 -2196.8612
70 0.007 0.060064592 -12.978381 96.745047 -6548.726 -2197.8306
80 0.008 0.05743694 -14.035923 95.476292 -7210.2954 -2198.773
90 0.009 0.054839883 -15.07074 93.963026 -7829.4252 -2199.689
100 0.01 0.052288504 -16.08066 92.210482 -8405.9983 -2200.5773
110 0.011 0.049782155 -17.064251 90.232741 -8939.3051 -2201.4357
120 0.012 0.047311759 -18.021135 88.051042 -9429.1353 -2202.2626
130 0.013 0.044869196 -18.952065 85.691573 -9876.5628 -2203.0575
140 0.014 0.042453961 -19.858739 83.18315 -10284.249 -2203.8215
150 0.015 0.040074171 -20.743348 80.555177 -10656.417 -2204.5569
160 0.016 0.037742459 -21.608 77.836156 -10998.818 -2205.2677
170 0.017 0.035470168 -22.454209 75.052994 -11318.525 -2205.9587
180 0.018 0.033263447 -23.282658 72.231211 -11623.118 -2206.6354
190 0.019 0.031122821 -24.093311 69.395936 -11919.248 -2207.3023
200 0.02 0.029045634 -24.88579 66.573223 -12211.306 -2207.9613
210 0.021 0.027029857 -25.659817 63.791041 -12500.812 -2208.6115
220 0.022 0.025077742 -26.415541 61.079413 -12787.018 -2209.2498
230 0.023 0.023198048 -27.153652 58.469604 -13068.277 -2209.8722
240 0.024 0.02140599 -27.875313 55.992687 -13343.621 -2210.4756
250 0.025 0.019720922 -28.581973 53.678031 -13613.86 -2211.0588
260 0.026 0.018162738 -29.275283 51.552191 -13882.15 -2211.6232
270 0.027 0.016748514 -29.956802 49.638467 -14153.137 -2212.1718
280 0.028 0.01549075 -30.628043 47.957071 -14432.246 -2212.7087
290 0.029 0.014397611 -31.290177 46.525552 -14724.005 -2213.2371
300 0.03 0.013474315 -31.943984 45.359085 -15031.315 -2213.759
310 0.031 0.012723957 -32.589853 44.47023 -15355.595 -2214.275
320 0.032 0.012146358 -33.227585 43.868153 -15696.845 -2214.7851
330 0.033 0.011734827 -33.856656 43.557623 -16054.887 -2215.289
340 0.034 0.011472508 -34.476313 43.538346 -16429.77 -2215.7871
350 0.035 0.011330772 -35.085716 43.805034 -16821.627 -2216.2802
360 0.036 0.011271169 -35.684147 44.348312 -17230.21 -2216.7687
370 0.037 0.01125027 -36.271215 45.156046 -17654.485 -2217.2524
380 0.038 0.011225354 -36.847053 46.214576 -18092.623 -2217.7301
390 0.039 0.011159026 -37.412284 47.509345 -18542.156 -2218.2003
400 0.04 0.011022073 -37.967916 49.024843 -19000.554 -2218.6614
410 0.041 0.01079477 -38.515123 50.744046 -19465.713 -2219.1128
420 0.042 0.010467095 -39.054921 52.647653 -19935.873 -2219.5544
430 0.043 0.010038219 -39.588034 54.713405 -20409.666 -2219.9869
440 0.044 0.0095155267 -40.114703 56.915658 -20885.556 -2220.4109
450 0.045 0.0089134996 -40.634722 59.225397 -21361.621 -2220.8268
460 0.046 0.0082528918 -41.147681 61.610799 -21835.762 -2221.2347
470 0.047 0.0075606723 -41.653088 64.038349 -22305.687 -2221.6343
480 0.048 0.0068707613 -42.150486 66.474377 -22768.948 -2222.0253
490 0.049 0.0062249854 -42.639704 68.886721 -23223.418 -2222.4076
500 0.05 0.0056723593 -43.120772 71.24617 -23667.077 -2222.7814
510 0.051 0.00526312 -43.59404 73.527392 -24098.459 -2223.147
520 0.052 0.0050342241 -44.059917 75.709206 -24516.163 -2223.5051
530 0.053 0.0049906301 -44.518898 77.774314 -24919.192 -2223.8564
540 0.054 0.0050976586 -44.971364 79.708763 -25306.611 -2224.2014
550 0.055 0.0052941974 -45.417577 81.501347 -25677.67 -2224.5405
560 0.056 0.0055157717 -45.857628 83.143173 -26031.673 -2224.8736
570 0.057 0.0057113414 -46.291426 84.627457 -26367.904 -2225.2003
580 0.058 0.0058493207 -46.718709 85.949497 -26685.6 -2225.52
590 0.059 0.0059162201 -47.139052 87.10679 -26984.124 -2225.8316
600 0.06 0.0059118584 -47.551892 88.099176 -27263.145 -2226.1347
610 0.061 0.005843747 -47.956571 88.928929 -27522.773 -2226.4287
620 0.062 0.0057222223 -48.352422 89.600763 -27763.549 -2226.7139
630 0.063 0.0055570967 -48.738876 90.12173 -27986.321 -2226.9905
640 0.064 0.0053558993 -49.115723 90.501081 -28192.238 -2227.2593
650 0.065 0.0051233209 -49.483122 90.750056 -28382.3 -2227.5205
660 0.066 0.0048614512 -49.841791 90.881635 -28557.623 -2227.7746
670 0.067 0.0045706003 -50.192974 90.910245 -28719.422 -2228.0219
680 0.068 0.0042506564 -50.538196 90.851397 -28868.809 -2228.2627
690 0.069 0.0039028575 -50.879364 90.721317 -29007.619 -2228.4973
700 0.07 0.0035319814 -51.218193 90.536521 -29137.623 -2228.7265
710 0.071 0.0031491486 -51.556251 90.313501 -29261.193 -2228.9511
720 0.072 0.0027758205 -51.894643 90.068503 -29380.924 -2229.1724
730 0.073 0.002449449 -52.233987 89.817462 -29499.606 -2229.3917
740 0.074 0.0022276613 -52.574465 89.57612 -29620.196 -2229.6103
750 0.075 0.0021767124 -52.915641 89.360246 -29744.882 -2229.829
760 0.076 0.0023310362 -53.256843 89.185838 -29875.573 -2230.0485
770 0.077 0.0026637349 -53.597197 89.069228 -30013.477 -2230.2685
780 0.078 0.0031129938 -53.93565 89.026943 -30158.812 -2230.4882
790 0.079 0.0036204667 -54.271339 89.075322 -30311.602 -2230.7066
800 0.08 0.0041448552 -54.603455 89.229912 -30471.244 -2230.9226
810 0.081 0.0046613106 -54.931421 89.504766 -30636.938 -2231.1352
820 0.082 0.0051580947 -55.255056 89.911726 -30808.087 -2231.3434
830 0.083 0.0056329652 -55.574491 90.459766 -30984.153 -2231.5469
840 0.084 0.0060893356 -55.890024 91.154456 -31164.372 -2231.7452
850 0.085 0.0065324419 -56.202052 91.997528 -31347.792 -2231.9379
860 0.086 0.0069661977 -56.511206 92.986622 -31533.977 -2232.1249
870 0.087 0.0073913051 -56.817814 94.115192 -31721.92 -2232.306
880 0.088 0.0078048547 -57.122061 95.372548 -31910.795 -2232.4809
890 0.089 0.008201165 -57.423984 96.744135 -32100.108 -2232.65
900 0.09 0.0085732702 -57.723377 98.212046 -32289.532 -2232.8136
910 0.091 0.0089144724 -58.019938 99.755667 -32479.154 -2232.9728
920 0.092 0.0092194916 -58.313266 101.35254 -32669.227 -2233.1285
930 0.093 0.0094849872 -58.602956 102.97932 -32860.091 -2233.2822
940 0.094 0.0097093572 -58.888668 104.61271 -33051.981 -2233.4348
950 0.095 0.0098920175 -59.169925 106.23045 -33244.279 -2233.5871
960 0.096 0.01003244 -59.44662 107.81212 -33436.562 -2233.7396
970 0.097 0.010129313 -59.718668 109.33976 -33627.714 -2233.8925
980 0.098 0.010180127 -59.986126 110.79823 -33816.218 -2234.0455
990 0.099 0.010181304 -60.24929 112.17528 -34000.522 -2234.1984
1000 0.1 0.01012881 -60.508632 113.46137 -34179.052 -2234.3508
Loop time of 2.93788 on 4 procs for 1000 steps with 500 atoms
Performance: 2.941 ns/day, 8.161 hours/ns, 340.381 timesteps/s
100.0% CPU use with 4 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.72349 | 0.73783 | 0.7554 | 1.3 | 25.11
Neigh | 0.00353 | 0.0036981 | 0.0038559 | 0.2 | 0.13
Comm | 0.12285 | 0.14476 | 0.16041 | 3.6 | 4.93
Output | 0.0046515 | 0.0047909 | 0.0050418 | 0.2 | 0.16
Modify | 2.0407 | 2.0439 | 2.0482 | 0.2 | 69.57
Other | | 0.00288 | | | 0.10
Nlocal: 125 ave 136 max 119 min
Histogram: 1 1 1 0 0 0 0 0 0 1
Nghost: 1324 ave 1331 max 1310 min
Histogram: 1 0 0 0 0 0 0 0 2 1
Neighs: 6897.25 ave 7552 max 6604 min
Histogram: 2 1 0 0 0 0 0 0 0 1
FullNghs: 13794.5 ave 15117 max 13164 min
Histogram: 2 0 1 0 0 0 0 0 0 1
Total # of neighbors = 55178
Ave neighs/atom = 110.356
Neighbor list builds = 7
Dangerous builds = 0
Please see the log.cite file for references relevant to this simulation
Total wall time: 0:00:02

View File

@ -1,7 +1,11 @@
LAMMPS (30 Oct 2019)
LAMMPS (19 Mar 2020)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (../comm.cpp:94)
using 1 OpenMP thread(s) per MPI task
# bcc iron in a 3d periodic box
clear
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (../comm.cpp:94)
using 1 OpenMP thread(s) per MPI task
units metal
atom_style spin
@ -19,7 +23,7 @@ Created orthogonal box = (0 0 0) to (34.398 34.398 34.398)
1 by 1 by 1 MPI processor grid
create_atoms 1 box
Created 3456 atoms
create_atoms CPU = 0.000741005 secs
create_atoms CPU = 0.00106382 secs
# setting mass, mag. moments, and interactions for bcc iron
@ -88,23 +92,23 @@ Neighbor list info ...
bin: none
Per MPI rank memory allocation (min/avg/max) = 13.4 | 13.4 | 13.4 Mbytes
Step Time v_magx v_magy v_magz v_magnorm v_tmag v_emag PotEng TotEng
0 0 -1 0 0 1 1.0737264e-35 -768.37511 -15175.868 -15131.207
50 0.005 -1 -2.7722752e-10 -2.1828666e-10 1 6.8846921e-09 -768.35793 -15174.244 -15131.215
100 0.01 -1 -2.0983066e-09 -1.7330951e-09 1 1.0038885e-08 -768.30868 -15169.656 -15131.24
Loop time of 7.86359 on 1 procs for 100 steps with 3456 atoms
0 0 -1 0 0 1 1.0737264e-35 -384.18755 -15175.868 -15131.207
50 0.005 -1 -2.7725069e-10 -2.182903e-10 1 6.8851185e-09 -384.17896 -15174.244 -15131.207
100 0.01 -1 -2.0990209e-09 -1.7332235e-09 1 1.0040825e-08 -384.15433 -15169.655 -15131.207
Loop time of 7.47017 on 1 procs for 100 steps with 3456 atoms
Performance: 0.110 ns/day, 218.433 hours/ns, 12.717 timesteps/s
100.0% CPU use with 1 MPI tasks x no OpenMP threads
Performance: 0.116 ns/day, 207.505 hours/ns, 13.387 timesteps/s
99.9% CPU use with 1 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 3.6134 | 3.6134 | 3.6134 | 0.0 | 45.95
Pair | 3.1998 | 3.1998 | 3.1998 | 0.0 | 42.83
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0.014062 | 0.014062 | 0.014062 | 0.0 | 0.18
Output | 0.006057 | 0.006057 | 0.006057 | 0.0 | 0.08
Modify | 4.226 | 4.226 | 4.226 | 0.0 | 53.74
Other | | 0.004064 | | | 0.05
Comm | 0.015094 | 0.015094 | 0.015094 | 0.0 | 0.20
Output | 0.006531 | 0.006531 | 0.006531 | 0.0 | 0.09
Modify | 4.2443 | 4.2443 | 4.2443 | 0.0 | 56.82
Other | | 0.004467 | | | 0.06
Nlocal: 3456 ave 3456 max 3456 min
Histogram: 1 0 0 0 0 0 0 0 0 0

View File

@ -1,7 +1,11 @@
LAMMPS (30 Oct 2019)
LAMMPS (19 Mar 2020)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (../comm.cpp:94)
using 1 OpenMP thread(s) per MPI task
# bcc iron in a 3d periodic box
clear
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (../comm.cpp:94)
using 1 OpenMP thread(s) per MPI task
units metal
atom_style spin
@ -19,7 +23,7 @@ Created orthogonal box = (0 0 0) to (34.398 34.398 34.398)
1 by 2 by 2 MPI processor grid
create_atoms 1 box
Created 3456 atoms
create_atoms CPU = 0.00090003 secs
create_atoms CPU = 0.00132084 secs
# setting mass, mag. moments, and interactions for bcc iron
@ -88,23 +92,23 @@ Neighbor list info ...
bin: none
Per MPI rank memory allocation (min/avg/max) = 9.217 | 9.217 | 9.217 Mbytes
Step Time v_magx v_magy v_magz v_magnorm v_tmag v_emag PotEng TotEng
0 0 -1 0 0 1 1.0737264e-35 -768.37511 -15560.055 -15515.394
50 0.005 -1 9.6204015e-11 -3.3767807e-10 1 6.6905249e-09 -768.35767 -15558.438 -15515.394
100 0.01 -1 7.8881609e-10 -2.7017321e-09 1 9.8111281e-09 -768.30769 -15553.868 -15515.394
Loop time of 2.29116 on 4 procs for 100 steps with 3456 atoms
0 0 -1 0 0 1 1.0737264e-35 -384.18755 -15175.868 -15131.207
50 0.005 -1 9.6205501e-11 -3.3769045e-10 1 6.6909444e-09 -384.17884 -15174.259 -15131.207
100 0.01 -1 7.8887025e-10 -2.7021386e-09 1 9.8130686e-09 -384.15383 -15169.712 -15131.207
Loop time of 2.27865 on 4 procs for 100 steps with 3456 atoms
Performance: 0.377 ns/day, 63.643 hours/ns, 43.646 timesteps/s
99.9% CPU use with 4 MPI tasks x no OpenMP threads
Performance: 0.379 ns/day, 63.296 hours/ns, 43.886 timesteps/s
99.5% CPU use with 4 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.92259 | 0.92963 | 0.93393 | 0.4 | 40.57
Pair | 0.80959 | 0.8184 | 0.82996 | 0.9 | 35.92
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0.02284 | 0.027597 | 0.035185 | 2.8 | 1.20
Output | 0.0018489 | 0.0018544 | 0.0018642 | 0.0 | 0.08
Modify | 1.3296 | 1.3303 | 1.3308 | 0.0 | 58.06
Other | | 0.001818 | | | 0.08
Comm | 0.035061 | 0.046857 | 0.055443 | 3.9 | 2.06
Output | 0.0018592 | 0.0018642 | 0.0018783 | 0.0 | 0.08
Modify | 1.4085 | 1.4095 | 1.41 | 0.1 | 61.86
Other | | 0.002023 | | | 0.09
Nlocal: 864 ave 864 max 864 min
Histogram: 4 0 0 0 0 0 0 0 0 0

View File

@ -1,7 +1,11 @@
LAMMPS (30 Oct 2019)
LAMMPS (19 Mar 2020)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (../comm.cpp:94)
using 1 OpenMP thread(s) per MPI task
# bcc iron in a 3d periodic box
clear
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (../comm.cpp:94)
using 1 OpenMP thread(s) per MPI task
units metal
atom_style spin
@ -19,7 +23,7 @@ Created orthogonal box = (0 0 0) to (34.398 34.398 34.398)
1 by 1 by 1 MPI processor grid
create_atoms 1 box
Created 3456 atoms
create_atoms CPU = 0.00166988 secs
create_atoms CPU = 0.00187302 secs
# setting mass, mag. moments, and interactions for bcc iron
@ -67,10 +71,10 @@ dump 1 all custom 100 dump_iron.lammpstrj type x y z c_outsp[1] c_outsp[2] c_o
run 100
EwaldDipoleSpin initialization ...
using 12-bit tables for long-range coulomb (../kspace.cpp:323)
using 12-bit tables for long-range coulomb (../kspace.cpp:332)
G vector (1/distance) = 0.324623
estimated absolute RMS force accuracy = 9.55526e-84
estimated relative force accuracy = 6.63576e-85
estimated absolute RMS force accuracy = 1.69788e-59
estimated relative force accuracy = 1.17911e-60
KSpace vectors: actual max1d max3d = 2084 10 4630
kxmax kymax kzmax = 10 10 10
Neighbor list info ...
@ -97,24 +101,24 @@ Neighbor list info ...
bin: none
Per MPI rank memory allocation (min/avg/max) = 30.07 | 30.07 | 30.07 Mbytes
Step Time v_magx v_magy v_magz v_magnorm v_tmag v_emag PotEng TotEng
0 0 -1 0 0 1 2.5872886e-37 -767.88567 -15175.39 -15130.729
50 0.005 -1 4.3660916e-09 -2.1918692e-09 1 5.3480999e-10 -767.86847 -15173.766 -15130.738
100 0.01 -1 9.9854966e-09 -4.2823677e-09 1 2.3267629e-09 -767.81917 -15169.178 -15130.762
Loop time of 24.9345 on 1 procs for 100 steps with 3456 atoms
0 0 -1 0 0 1 2.5872886e-37 -383.94283 -15175.635 -15130.974
50 0.005 -1 4.3660908e-09 -2.1918693e-09 1 5.3484784e-10 -383.93423 -15174.011 -15130.974
100 0.01 -1 9.9854743e-09 -4.282369e-09 1 2.3273467e-09 -383.90957 -15169.421 -15130.974
Loop time of 24.8682 on 1 procs for 100 steps with 3456 atoms
Performance: 0.035 ns/day, 692.624 hours/ns, 4.011 timesteps/s
100.0% CPU use with 1 MPI tasks x no OpenMP threads
Performance: 0.035 ns/day, 690.783 hours/ns, 4.021 timesteps/s
100.0% CPU use with 1 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 4.8022 | 4.8022 | 4.8022 | 0.0 | 19.26
Kspace | 10.337 | 10.337 | 10.337 | 0.0 | 41.46
Pair | 4.6097 | 4.6097 | 4.6097 | 0.0 | 18.54
Kspace | 10.303 | 10.303 | 10.303 | 0.0 | 41.43
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0.013856 | 0.013856 | 0.013856 | 0.0 | 0.06
Output | 0.007138 | 0.007138 | 0.007138 | 0.0 | 0.03
Modify | 9.7705 | 9.7705 | 9.7705 | 0.0 | 39.18
Other | | 0.004077 | | | 0.02
Comm | 0.015258 | 0.015258 | 0.015258 | 0.0 | 0.06
Output | 0.006583 | 0.006583 | 0.006583 | 0.0 | 0.03
Modify | 9.9285 | 9.9285 | 9.9285 | 0.0 | 39.92
Other | | 0.004751 | | | 0.02
Nlocal: 3456 ave 3456 max 3456 min
Histogram: 1 0 0 0 0 0 0 0 0 0

View File

@ -1,7 +1,11 @@
LAMMPS (30 Oct 2019)
LAMMPS (19 Mar 2020)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (../comm.cpp:94)
using 1 OpenMP thread(s) per MPI task
# bcc iron in a 3d periodic box
clear
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (../comm.cpp:94)
using 1 OpenMP thread(s) per MPI task
units metal
atom_style spin
@ -19,7 +23,7 @@ Created orthogonal box = (0 0 0) to (34.398 34.398 34.398)
1 by 2 by 2 MPI processor grid
create_atoms 1 box
Created 3456 atoms
create_atoms CPU = 0.00088191 secs
create_atoms CPU = 0.000773907 secs
# setting mass, mag. moments, and interactions for bcc iron
@ -67,10 +71,10 @@ dump 1 all custom 100 dump_iron.lammpstrj type x y z c_outsp[1] c_outsp[2] c_o
run 100
EwaldDipoleSpin initialization ...
using 12-bit tables for long-range coulomb (../kspace.cpp:323)
using 12-bit tables for long-range coulomb (../kspace.cpp:332)
G vector (1/distance) = 0.324623
estimated absolute RMS force accuracy = 9.29828e-84
estimated relative force accuracy = 6.4573e-85
estimated absolute RMS force accuracy = 2.94041e-64
estimated relative force accuracy = 2.042e-65
KSpace vectors: actual max1d max3d = 2084 10 4630
kxmax kymax kzmax = 10 10 10
Neighbor list info ...
@ -97,24 +101,24 @@ Neighbor list info ...
bin: none
Per MPI rank memory allocation (min/avg/max) = 25.89 | 25.89 | 25.89 Mbytes
Step Time v_magx v_magy v_magz v_magnorm v_tmag v_emag PotEng TotEng
0 0 -1 0 0 1 3.5107565e-37 -767.88567 -15559.577 -15514.916
50 0.005 -1 4.3196063e-09 -2.1966927e-09 1 5.1719577e-10 -767.86822 -15557.96 -15514.916
100 0.01 -1 9.7636593e-09 -4.3236953e-09 1 2.2443181e-09 -767.81819 -15553.39 -15514.916
Loop time of 6.80139 on 4 procs for 100 steps with 3456 atoms
0 0 -1 0 0 1 3.5107565e-37 -383.94283 -15175.635 -15130.974
50 0.005 -1 4.3196054e-09 -2.1966927e-09 1 5.1723249e-10 -383.93411 -15174.026 -15130.974
100 0.01 -1 9.7636345e-09 -4.3236965e-09 1 2.2448849e-09 -383.90908 -15169.479 -15130.974
Loop time of 7.03264 on 4 procs for 100 steps with 3456 atoms
Performance: 0.127 ns/day, 188.927 hours/ns, 14.703 timesteps/s
100.0% CPU use with 4 MPI tasks x no OpenMP threads
Performance: 0.123 ns/day, 195.351 hours/ns, 14.219 timesteps/s
99.9% CPU use with 4 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 1.248 | 1.2649 | 1.2816 | 1.1 | 18.60
Kspace | 2.523 | 2.5743 | 2.6505 | 3.0 | 37.85
Pair | 1.1743 | 1.207 | 1.2416 | 2.2 | 17.16
Kspace | 2.6173 | 2.6542 | 2.7273 | 2.7 | 37.74
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0.029461 | 0.087268 | 0.13754 | 13.0 | 1.28
Output | 0.0018618 | 0.001869 | 0.0018811 | 0.0 | 0.03
Modify | 2.8692 | 2.8709 | 2.8741 | 0.1 | 42.21
Other | | 0.002119 | | | 0.03
Comm | 0.042837 | 0.11362 | 0.1882 | 16.9 | 1.62
Output | 0.0018778 | 0.0018882 | 0.0019088 | 0.0 | 0.03
Modify | 3.0484 | 3.0535 | 3.0606 | 0.3 | 43.42
Other | | 0.002387 | | | 0.03
Nlocal: 864 ave 864 max 864 min
Histogram: 4 0 0 0 0 0 0 0 0 0
@ -132,4 +136,4 @@ Dangerous builds = 0
Please see the log.cite file for references relevant to this simulation
Total wall time: 0:00:06
Total wall time: 0:00:07

View File

@ -1,7 +1,11 @@
LAMMPS (30 Oct 2019)
LAMMPS (19 Mar 2020)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (../comm.cpp:94)
using 1 OpenMP thread(s) per MPI task
# bcc iron in a 3d periodic box
clear
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (../comm.cpp:94)
using 1 OpenMP thread(s) per MPI task
units metal
atom_style spin
@ -19,7 +23,7 @@ Created orthogonal box = (0 0 0) to (34.398 34.398 34.398)
1 by 1 by 1 MPI processor grid
create_atoms 1 box
Created 3456 atoms
create_atoms CPU = 0.00166583 secs
create_atoms CPU = 0.00192595 secs
# setting mass, mag. moments, and interactions for bcc iron
@ -99,24 +103,24 @@ Neighbor list info ...
bin: none
Per MPI rank memory allocation (min/avg/max) = 16.27 | 16.27 | 16.27 Mbytes
Step Time v_magx v_magy v_magz v_magnorm v_tmag v_emag PotEng TotEng
0 0 -1 0 0 1 3.7996771e-37 -767.89759 -15175.402 -15130.741
50 0.005 -1 3.6585337e-09 -1.9445403e-09 1 5.1405121e-10 -767.88039 -15173.779 -15130.75
100 0.01 -1 7.3585728e-09 -3.8640878e-09 1 2.0194927e-09 -767.83109 -15169.191 -15130.774
Loop time of 15.3615 on 1 procs for 100 steps with 3456 atoms
0 0 -1 0 0 1 3.7996771e-37 -383.94879 -15175.641 -15130.98
50 0.005 -1 3.6585337e-09 -1.9445403e-09 1 5.1408909e-10 -383.94019 -15174.017 -15130.98
100 0.01 -1 7.3585736e-09 -3.8640869e-09 1 2.0200831e-09 -383.91553 -15169.428 -15130.98
Loop time of 15.1465 on 1 procs for 100 steps with 3456 atoms
Performance: 0.056 ns/day, 426.709 hours/ns, 6.510 timesteps/s
99.9% CPU use with 1 MPI tasks x no OpenMP threads
Performance: 0.057 ns/day, 420.737 hours/ns, 6.602 timesteps/s
100.0% CPU use with 1 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 4.8418 | 4.8418 | 4.8418 | 0.0 | 31.52
Kspace | 0.66626 | 0.66626 | 0.66626 | 0.0 | 4.34
Pair | 4.6008 | 4.6008 | 4.6008 | 0.0 | 30.38
Kspace | 0.65995 | 0.65995 | 0.65995 | 0.0 | 4.36
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0.014248 | 0.014248 | 0.014248 | 0.0 | 0.09
Output | 0.0064788 | 0.0064788 | 0.0064788 | 0.0 | 0.04
Modify | 9.8279 | 9.8279 | 9.8279 | 0.0 | 63.98
Other | | 0.00478 | | | 0.03
Comm | 0.01495 | 0.01495 | 0.01495 | 0.0 | 0.10
Output | 0.0065951 | 0.0065951 | 0.0065951 | 0.0 | 0.04
Modify | 9.8589 | 9.8589 | 9.8589 | 0.0 | 65.09
Other | | 0.005332 | | | 0.04
Nlocal: 3456 ave 3456 max 3456 min
Histogram: 1 0 0 0 0 0 0 0 0 0

View File

@ -1,7 +1,11 @@
LAMMPS (30 Oct 2019)
LAMMPS (19 Mar 2020)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (../comm.cpp:94)
using 1 OpenMP thread(s) per MPI task
# bcc iron in a 3d periodic box
clear
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (../comm.cpp:94)
using 1 OpenMP thread(s) per MPI task
units metal
atom_style spin
@ -19,7 +23,7 @@ Created orthogonal box = (0 0 0) to (34.398 34.398 34.398)
1 by 2 by 2 MPI processor grid
create_atoms 1 box
Created 3456 atoms
create_atoms CPU = 0.00123286 secs
create_atoms CPU = 0.0007658 secs
# setting mass, mag. moments, and interactions for bcc iron
@ -99,24 +103,24 @@ Neighbor list info ...
bin: none
Per MPI rank memory allocation (min/avg/max) = 10.42 | 10.42 | 10.42 Mbytes
Step Time v_magx v_magy v_magz v_magnorm v_tmag v_emag PotEng TotEng
0 0 -1 0 0 1 2.3173191e-37 -767.89759 -15559.59 -15514.929
50 0.005 -1 3.6593054e-09 -1.9379563e-09 1 4.9747018e-10 -767.88014 -15557.972 -15514.929
100 0.01 -1 7.3731919e-09 -3.8151563e-09 1 1.9544299e-09 -767.8301 -15553.402 -15514.929
Loop time of 4.4084 on 4 procs for 100 steps with 3456 atoms
0 0 -1 0 0 1 2.3173191e-37 -383.94879 -15175.641 -15130.98
50 0.005 -1 3.6593053e-09 -1.9379563e-09 1 4.9750695e-10 -383.94007 -15174.032 -15130.98
100 0.01 -1 7.3731899e-09 -3.8151552e-09 1 1.9550037e-09 -383.91504 -15169.485 -15130.98
Loop time of 4.3717 on 4 procs for 100 steps with 3456 atoms
Performance: 0.196 ns/day, 122.455 hours/ns, 22.684 timesteps/s
100.0% CPU use with 4 MPI tasks x no OpenMP threads
Performance: 0.198 ns/day, 121.436 hours/ns, 22.874 timesteps/s
100.0% CPU use with 4 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 1.2326 | 1.2513 | 1.2693 | 1.3 | 28.38
Kspace | 0.22823 | 0.24585 | 0.26385 | 2.8 | 5.58
Pair | 1.1624 | 1.1869 | 1.2125 | 1.8 | 27.15
Kspace | 0.24468 | 0.26758 | 0.29157 | 3.6 | 6.12
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0.025352 | 0.028409 | 0.032299 | 1.6 | 0.64
Output | 0.001868 | 0.0018761 | 0.0018861 | 0.0 | 0.04
Modify | 2.8753 | 2.8788 | 2.8818 | 0.1 | 65.30
Other | | 0.002175 | | | 0.05
Comm | 0.027149 | 0.030758 | 0.033902 | 1.7 | 0.70
Output | 0.0030079 | 0.0030248 | 0.0030622 | 0.0 | 0.07
Modify | 2.8782 | 2.8806 | 2.8837 | 0.1 | 65.89
Other | | 0.002793 | | | 0.06
Nlocal: 864 ave 864 max 864 min
Histogram: 4 0 0 0 0 0 0 0 0 0

View File

@ -31,6 +31,7 @@ neighbor 0.1 bin
neigh_modify every 10 check yes delay 20
fix 1 all precession/spin zeeman 0.0 0.0 0.0 1.0
fix_modify 1 energy yes
fix 2 all langevin/spin 0.0 0.0 21
fix 3 all nve/spin lattice moving

View File

@ -0,0 +1,141 @@
LAMMPS (19 Mar 2020)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (../comm.cpp:94)
using 1 OpenMP thread(s) per MPI task
# bcc iron in a 3d periodic box
clear
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (../comm.cpp:94)
using 1 OpenMP thread(s) per MPI task
units metal
atom_style spin
dimension 3
boundary p p p
# necessary for the serial algorithm (sametag)
atom_modify map array
lattice bcc 2.8665
Lattice spacing in x,y,z = 2.8665 2.8665 2.8665
region box block 0.0 5.0 0.0 5.0 0.0 5.0
create_box 1 box
Created orthogonal box = (0 0 0) to (14.3325 14.3325 14.3325)
1 by 1 by 1 MPI processor grid
create_atoms 1 box
Created 250 atoms
create_atoms CPU = 0.00103498 secs
# setting mass, mag. moments, and interactions for bcc iron
mass 1 55.845
set group all spin/random 31 2.2
250 settings made for spin/random
# set group all spin 2.2 0.0 0.0 1.0
velocity all create 100 4928459 rot yes dist gaussian
pair_style hybrid/overlay eam/alloy spin/exchange 3.5
pair_coeff * * eam/alloy Fe_Mishin2006.eam.alloy Fe
pair_coeff * * spin/exchange exchange 3.4 0.02726 0.2171 1.841
neighbor 0.1 bin
neigh_modify every 10 check yes delay 20
fix 1 all precession/spin zeeman 0.0 0.0 0.0 1.0
fix_modify 1 energy yes
fix 2 all langevin/spin 0.0 0.0 21
fix 3 all nve/spin lattice moving
timestep 0.0001
# compute and output options
compute out_mag all spin
compute out_pe all pe
compute out_ke all ke
compute out_temp all temp
variable magz equal c_out_mag[3]
variable magnorm equal c_out_mag[4]
variable emag equal c_out_mag[5]
variable tmag equal c_out_mag[6]
thermo_style custom step time v_magnorm v_tmag temp v_emag ke pe press etotal
thermo 50
compute outsp all property/atom spx spy spz sp fmx fmy fmz
dump 1 all custom 100 dump_iron.lammpstrj type x y z c_outsp[1] c_outsp[2] c_outsp[3]
run 1000
Neighbor list info ...
update every 10 steps, delay 20 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 5.77337
ghost atom cutoff = 5.77337
binsize = 2.88668, bins = 5 5 5
2 neighbor lists, perpetual/occasional/extra = 2 0 0
(1) pair eam/alloy, perpetual, half/full from (2)
attributes: half, newton on
pair build: halffull/newton
stencil: none
bin: none
(2) pair spin/exchange, perpetual
attributes: full, newton on
pair build: full/bin/atomonly
stencil: full/bin/3d
bin: standard
Per MPI rank memory allocation (min/avg/max) = 7.82 | 7.82 | 7.82 Mbytes
Step Time v_magnorm v_tmag Temp v_emag KinEng PotEng Press TotEng
0 0 0.076456975 4554.5462 100.00358 -0.42895634 3.2186929 -1070.429 394.43342 -1067.2103
50 0.005 0.076456974 4671.3033 96.520068 -0.43128393 3.1065733 -1070.3169 707.2166 -1067.2103
100 0.01 0.076456983 4793.2674 86.525198 -0.43550567 2.7848806 -1069.9952 1456.8628 -1067.2103
150 0.015 0.076456973 4894.1924 71.664875 -0.44035101 2.3065896 -1069.5169 2510.4447 -1067.2103
200 0.02 0.076456944 4859.9379 54.610496 -0.44591875 1.7576812 -1068.968 3686.7124 -1067.2103
250 0.025 0.076456953 4636.3427 38.560198 -0.45258831 1.2410899 -1068.4514 4757.3215 -1067.2103
300 0.03 0.076457027 4333.5734 26.459387 -0.45961266 0.85161592 -1068.0619 5505.1963 -1067.2103
350 0.035 0.076457102 4124.1453 20.205123 -0.46569388 0.65031758 -1067.8606 5768.9329 -1067.2103
400 0.04 0.076457116 4064.6843 20.142986 -0.47038245 0.64831763 -1067.8586 5521.4162 -1067.2103
450 0.045 0.076457072 4076.2576 25.084719 -0.4741092 0.80737114 -1068.0176 4890.8714 -1067.2103
500 0.05 0.076457001 4105.5788 32.863873 -0.47765283 1.0577493 -1068.268 4093.2603 -1067.2103
550 0.055 0.076456962 4109.0613 41.126745 -0.48188168 1.3236962 -1068.534 3337.5111 -1067.2103
600 0.06 0.076456996 3993.3891 47.996188 -0.48790867 1.5447946 -1068.7551 2767.4353 -1067.2103
650 0.065 0.076457077 3776.2342 52.386407 -0.49621451 1.6860972 -1068.8964 2458.9058 -1067.2103
700 0.07 0.076457137 3586.7963 54.031337 -0.50582777 1.7390405 -1068.9493 2397.5333 -1067.2103
750 0.075 0.076457135 3535.1994 53.389736 -0.51542639 1.7183901 -1068.9287 2514.1889 -1067.2103
800 0.08 0.076457118 3585.6546 51.428399 -0.52423597 1.6552629 -1068.8655 2732.6669 -1067.2103
850 0.085 0.076457118 3634.4891 49.293001 -0.53165471 1.5865335 -1068.7968 2977.6259 -1067.2103
900 0.09 0.076457126 3660.3333 47.999274 -0.53780479 1.5448939 -1068.7552 3176.4112 -1067.2103
950 0.095 0.076457129 3671.3259 48.180409 -0.54376787 1.5507239 -1068.761 3283.2242 -1067.2103
1000 0.1 0.076457139 3628.6038 49.917341 -0.55029097 1.6066284 -1068.8169 3283.2361 -1067.2103
Loop time of 1.66624 on 1 procs for 1000 steps with 250 atoms
Performance: 5.185 ns/day, 4.628 hours/ns, 600.152 timesteps/s
99.7% CPU use with 1 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.73884 | 0.73884 | 0.73884 | 0.0 | 44.34
Neigh | 0.0045731 | 0.0045731 | 0.0045731 | 0.0 | 0.27
Comm | 0.023681 | 0.023681 | 0.023681 | 0.0 | 1.42
Output | 0.0042441 | 0.0042441 | 0.0042441 | 0.0 | 0.25
Modify | 0.89131 | 0.89131 | 0.89131 | 0.0 | 53.49
Other | | 0.003589 | | | 0.22
Nlocal: 250 ave 250 max 250 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 1407 ave 1407 max 1407 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 7868 ave 7868 max 7868 min
Histogram: 1 0 0 0 0 0 0 0 0 0
FullNghs: 15736 ave 15736 max 15736 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 15736
Ave neighs/atom = 62.944
Neighbor list builds = 6
Dangerous builds = 0
Please see the log.cite file for references relevant to this simulation
Total wall time: 0:00:01

View File

@ -0,0 +1,141 @@
LAMMPS (19 Mar 2020)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (../comm.cpp:94)
using 1 OpenMP thread(s) per MPI task
# bcc iron in a 3d periodic box
clear
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (../comm.cpp:94)
using 1 OpenMP thread(s) per MPI task
units metal
atom_style spin
dimension 3
boundary p p p
# necessary for the serial algorithm (sametag)
atom_modify map array
lattice bcc 2.8665
Lattice spacing in x,y,z = 2.8665 2.8665 2.8665
region box block 0.0 5.0 0.0 5.0 0.0 5.0
create_box 1 box
Created orthogonal box = (0 0 0) to (14.3325 14.3325 14.3325)
1 by 2 by 2 MPI processor grid
create_atoms 1 box
Created 250 atoms
create_atoms CPU = 0.000688791 secs
# setting mass, mag. moments, and interactions for bcc iron
mass 1 55.845
set group all spin/random 31 2.2
250 settings made for spin/random
# set group all spin 2.2 0.0 0.0 1.0
velocity all create 100 4928459 rot yes dist gaussian
pair_style hybrid/overlay eam/alloy spin/exchange 3.5
pair_coeff * * eam/alloy Fe_Mishin2006.eam.alloy Fe
pair_coeff * * spin/exchange exchange 3.4 0.02726 0.2171 1.841
neighbor 0.1 bin
neigh_modify every 10 check yes delay 20
fix 1 all precession/spin zeeman 0.0 0.0 0.0 1.0
fix_modify 1 energy yes
fix 2 all langevin/spin 0.0 0.0 21
fix 3 all nve/spin lattice moving
timestep 0.0001
# compute and output options
compute out_mag all spin
compute out_pe all pe
compute out_ke all ke
compute out_temp all temp
variable magz equal c_out_mag[3]
variable magnorm equal c_out_mag[4]
variable emag equal c_out_mag[5]
variable tmag equal c_out_mag[6]
thermo_style custom step time v_magnorm v_tmag temp v_emag ke pe press etotal
thermo 50
compute outsp all property/atom spx spy spz sp fmx fmy fmz
dump 1 all custom 100 dump_iron.lammpstrj type x y z c_outsp[1] c_outsp[2] c_outsp[3]
run 1000
Neighbor list info ...
update every 10 steps, delay 20 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 5.77337
ghost atom cutoff = 5.77337
binsize = 2.88668, bins = 5 5 5
2 neighbor lists, perpetual/occasional/extra = 2 0 0
(1) pair eam/alloy, perpetual, half/full from (2)
attributes: half, newton on
pair build: halffull/newton
stencil: none
bin: none
(2) pair spin/exchange, perpetual
attributes: full, newton on
pair build: full/bin/atomonly
stencil: full/bin/3d
bin: standard
Per MPI rank memory allocation (min/avg/max) = 7.766 | 7.766 | 7.766 Mbytes
Step Time v_magnorm v_tmag Temp v_emag KinEng PotEng Press TotEng
0 0 0.076456975 4554.5462 100.00358 -0.42895634 3.2186929 -1070.429 394.43342 -1067.2103
50 0.005 0.076456995 4714.366 96.279315 -0.42705836 3.0988245 -1070.3091 714.16563 -1067.2103
100 0.01 0.076457028 4844.7708 86.007787 -0.43034795 2.7682274 -1069.9785 1479.8537 -1067.2103
150 0.015 0.076457073 4938.5943 70.888778 -0.43554708 2.2816103 -1069.4919 2538.6386 -1067.2103
200 0.02 0.076457107 4910.2627 53.612031 -0.44069391 1.7255448 -1068.9358 3702.0713 -1067.2103
250 0.025 0.07645713 4705.3075 37.374184 -0.44525189 1.2029171 -1068.4132 4749.8323 -1067.2103
300 0.03 0.076457162 4418.4389 25.117093 -0.44873668 0.80841314 -1068.0187 5473.2266 -1067.2103
350 0.035 0.07645722 4233.0963 18.792985 -0.45135838 0.60486682 -1067.8151 5710.4039 -1067.2103
400 0.04 0.07645726 4204.002 18.876267 -0.4546575 0.60754729 -1067.8178 5437.712 -1067.2103
450 0.045 0.076457242 4221.8277 24.290539 -0.45989731 0.78180986 -1067.9921 4788.1384 -1067.2103
500 0.05 0.076457208 4226.337 32.849365 -0.46668115 1.0572823 -1068.2676 3961.2587 -1067.2103
550 0.055 0.076457211 4195.1414 42.028524 -0.47379263 1.3527207 -1068.563 3159.4718 -1067.2103
600 0.06 0.076457222 4043.6401 49.722783 -0.48155951 1.6003664 -1068.8106 2554.4057 -1067.2103
650 0.065 0.07645721 3784.8692 54.624328 -0.49066003 1.7581264 -1068.9684 2266.0816 -1067.2103
700 0.07 0.076457187 3576.7472 56.274292 -0.49932577 1.8112318 -1069.0215 2298.3908 -1067.2103
750 0.075 0.07645717 3531.6724 55.083486 -0.50591093 1.7729047 -1068.9832 2557.6667 -1067.2103
800 0.08 0.076457176 3593.0894 52.172747 -0.51103604 1.6792204 -1068.8895 2933.0411 -1067.2103
850 0.085 0.076457206 3688.4988 48.957423 -0.51573858 1.5757327 -1068.786 3313.8291 -1067.2103
900 0.09 0.076457231 3788.943 46.719714 -0.52043742 1.5037103 -1068.714 3600.8734 -1067.2103
950 0.095 0.076457251 3854.0552 46.272425 -0.52460019 1.4893139 -1068.6996 3718.2987 -1067.2103
1000 0.1 0.076457302 3859.0984 47.806309 -0.52719778 1.5386831 -1068.749 3641.2287 -1067.2103
Loop time of 1.55258 on 4 procs for 1000 steps with 250 atoms
Performance: 5.565 ns/day, 4.313 hours/ns, 644.089 timesteps/s
99.8% CPU use with 4 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.2109 | 0.21798 | 0.22568 | 1.1 | 14.04
Neigh | 0.0011308 | 0.0011812 | 0.0012279 | 0.1 | 0.08
Comm | 0.074407 | 0.082247 | 0.090297 | 2.0 | 5.30
Output | 0.0019011 | 0.0019355 | 0.0020187 | 0.1 | 0.12
Modify | 1.2468 | 1.2477 | 1.249 | 0.1 | 80.36
Other | | 0.001532 | | | 0.10
Nlocal: 62.5 ave 66 max 60 min
Histogram: 1 1 0 0 0 1 0 0 0 1
Nghost: 846.25 ave 861 max 831 min
Histogram: 1 0 1 0 0 0 0 1 0 1
Neighs: 1962.25 ave 2091 max 1866 min
Histogram: 1 0 0 2 0 0 0 0 0 1
FullNghs: 3924.5 ave 4138 max 3771 min
Histogram: 1 1 0 0 0 1 0 0 0 1
Total # of neighbors = 15698
Ave neighs/atom = 62.792
Neighbor list builds = 6
Dangerous builds = 0
Please see the log.cite file for references relevant to this simulation
Total wall time: 0:00:01

View File

@ -1,7 +1,11 @@
LAMMPS (30 Oct 2019)
LAMMPS (19 Mar 2020)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (../comm.cpp:94)
using 1 OpenMP thread(s) per MPI task
# bcc iron in a 3d periodic box
clear
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (../comm.cpp:94)
using 1 OpenMP thread(s) per MPI task
units metal
atom_style spin
@ -19,7 +23,7 @@ Created orthogonal box = (0 0 0) to (14.3325 14.3325 14.3325)
1 by 1 by 1 MPI processor grid
create_atoms 1 box
Created 250 atoms
create_atoms CPU = 0.00101709 secs
create_atoms CPU = 0.000530005 secs
# setting mass, mag. moments, and interactions for bcc iron
@ -81,53 +85,53 @@ Neighbor list info ...
bin: standard
Per MPI rank memory allocation (min/avg/max) = 7.82 | 7.82 | 7.82 Mbytes
Step Time v_magx v_magy v_magz v_magnorm v_tmag v_emag PotEng TotEng
0 0 -1 0 0 1 0 -55.58269 -1097.7914 -1094.5727
50 0.005 -1 0 0 1 0 -55.581417 -1097.6764 -1094.5733
100 0.01 -1 0 0 1 0 -55.577759 -1097.35 -1094.5751
150 0.015 -1 0 0 1 0 -55.57219 -1096.8677 -1094.5779
200 0.02 -1 0 0 1 0 -55.565438 -1096.3163 -1094.5813
250 0.025 -1 0 0 1 0 -55.558379 -1095.7987 -1094.5848
300 0.03 -1 0 0 1 0 -55.551886 -1095.4103 -1094.5881
350 0.035 -1 0 0 1 0 -55.546675 -1095.2124 -1094.5907
400 0.04 -1 0 0 1 0 -55.543187 -1095.2153 -1094.5924
450 0.045 -1 0 0 1 0 -55.54154 -1095.379 -1094.5932
500 0.05 -1 0 0 1 0 -55.541574 -1095.633 -1094.5932
550 0.055 -1 0 0 1 0 -55.542941 -1095.9006 -1094.5925
600 0.06 -1 0 0 1 0 -55.545209 -1096.1205 -1094.5914
650 0.065 -1 0 0 1 0 -55.547951 -1096.2575 -1094.59
700 0.07 -1 0 0 1 0 -55.550801 -1096.3044 -1094.5886
750 0.075 -1 0 0 1 0 -55.553483 -1096.2778 -1094.5873
800 0.08 -1 0 0 1 0 -55.555816 -1096.2098 -1094.5861
850 0.085 -1 0 0 1 0 -55.557706 -1096.1372 -1094.5852
900 0.09 -1 0 0 1 0 -55.55913 -1096.0919 -1094.5844
950 0.095 -1 0 0 1 0 -55.560111 -1096.0925 -1094.584
1000 0.1 -1 0 0 1 0 -55.560705 -1096.1411 -1094.5837
Loop time of 1.74825 on 1 procs for 1000 steps with 250 atoms
0 0 -1 0 0 1 0 -27.791345 -1097.7914 -1094.5727
50 0.005 -1 0 0 1 0 -27.790708 -1097.6764 -1094.5727
100 0.01 -1 0 0 1 0 -27.788879 -1097.3499 -1094.5727
150 0.015 -1 0 0 1 0 -27.78609 -1096.8672 -1094.5727
200 0.02 -1 0 0 1 0 -27.782705 -1096.3147 -1094.5727
250 0.025 -1 0 0 1 0 -27.779157 -1095.7952 -1094.5727
300 0.03 -1 0 0 1 0 -27.775883 -1095.4038 -1094.5727
350 0.035 -1 0 0 1 0 -27.773241 -1095.2023 -1094.5727
400 0.04 -1 0 0 1 0 -27.771451 -1095.201 -1094.5727
450 0.045 -1 0 0 1 0 -27.770578 -1095.3608 -1094.5727
500 0.05 -1 0 0 1 0 -27.770546 -1095.6113 -1094.5727
550 0.055 -1 0 0 1 0 -27.771185 -1095.8764 -1094.5727
600 0.06 -1 0 0 1 0 -27.772282 -1096.0948 -1094.5727
650 0.065 -1 0 0 1 0 -27.773629 -1096.2313 -1094.5727
700 0.07 -1 0 0 1 0 -27.775042 -1096.2787 -1094.5727
750 0.075 -1 0 0 1 0 -27.776384 -1096.2534 -1094.5727
800 0.08 -1 0 0 1 0 -27.777564 -1096.1872 -1094.5727
850 0.085 -1 0 0 1 0 -27.778533 -1096.117 -1094.5727
900 0.09 -1 0 0 1 0 -27.779276 -1096.0741 -1094.5727
950 0.095 -1 0 0 1 0 -27.779802 -1096.0771 -1094.5727
1000 0.1 -1 0 0 1 0 -27.780134 -1096.1278 -1094.5727
Loop time of 1.70062 on 1 procs for 1000 steps with 250 atoms
Performance: 4.942 ns/day, 4.856 hours/ns, 571.999 timesteps/s
100.0% CPU use with 1 MPI tasks x no OpenMP threads
Performance: 5.080 ns/day, 4.724 hours/ns, 588.019 timesteps/s
100.0% CPU use with 1 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.80384 | 0.80384 | 0.80384 | 0.0 | 45.98
Neigh | 0.004528 | 0.004528 | 0.004528 | 0.0 | 0.26
Comm | 0.022954 | 0.022954 | 0.022954 | 0.0 | 1.31
Output | 0.0034568 | 0.0034568 | 0.0034568 | 0.0 | 0.20
Modify | 0.91007 | 0.91007 | 0.91007 | 0.0 | 52.06
Other | | 0.003404 | | | 0.19
Pair | 0.72617 | 0.72617 | 0.72617 | 0.0 | 42.70
Neigh | 0.0045958 | 0.0045958 | 0.0045958 | 0.0 | 0.27
Comm | 0.023132 | 0.023132 | 0.023132 | 0.0 | 1.36
Output | 0.0035856 | 0.0035856 | 0.0035856 | 0.0 | 0.21
Modify | 0.93966 | 0.93966 | 0.93966 | 0.0 | 55.25
Other | | 0.003483 | | | 0.20
Nlocal: 250 ave 250 max 250 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 1415 ave 1415 max 1415 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 7873 ave 7873 max 7873 min
Neighs: 7872 ave 7872 max 7872 min
Histogram: 1 0 0 0 0 0 0 0 0 0
FullNghs: 15746 ave 15746 max 15746 min
FullNghs: 15744 ave 15744 max 15744 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 15746
Ave neighs/atom = 62.984
Total # of neighbors = 15744
Ave neighs/atom = 62.976
Neighbor list builds = 6
Dangerous builds = 0
# min_style spin

View File

@ -1,7 +1,11 @@
LAMMPS (30 Oct 2019)
LAMMPS (19 Mar 2020)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (../comm.cpp:94)
using 1 OpenMP thread(s) per MPI task
# bcc iron in a 3d periodic box
clear
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (../comm.cpp:94)
using 1 OpenMP thread(s) per MPI task
units metal
atom_style spin
@ -19,7 +23,7 @@ Created orthogonal box = (0 0 0) to (14.3325 14.3325 14.3325)
1 by 2 by 2 MPI processor grid
create_atoms 1 box
Created 250 atoms
create_atoms CPU = 0.000651121 secs
create_atoms CPU = 0.00071311 secs
# setting mass, mag. moments, and interactions for bcc iron
@ -81,53 +85,53 @@ Neighbor list info ...
bin: standard
Per MPI rank memory allocation (min/avg/max) = 7.766 | 7.766 | 7.766 Mbytes
Step Time v_magx v_magy v_magz v_magnorm v_tmag v_emag PotEng TotEng
0 0 -1 0 0 1 0 -55.58269 -1125.5827 -1122.364
50 0.005 -1 0 0 1 0 -55.581457 -1125.4635 -1122.364
100 0.01 -1 0 0 1 0 -55.577922 -1125.1262 -1122.364
150 0.015 -1 0 0 1 0 -55.572562 -1124.6305 -1122.364
200 0.02 -1 0 0 1 0 -55.566098 -1124.067 -1122.364
250 0.025 -1 0 0 1 0 -55.559384 -1123.5412 -1122.364
300 0.03 -1 0 0 1 0 -55.553261 -1123.1491 -1122.364
350 0.035 -1 0 0 1 0 -55.548413 -1122.9526 -1122.364
400 0.04 -1 0 0 1 0 -55.545248 -1122.9623 -1122.364
450 0.045 -1 0 0 1 0 -55.54387 -1123.1395 -1122.364
500 0.05 -1 0 0 1 0 -55.544101 -1123.4126 -1122.364
550 0.055 -1 0 0 1 0 -55.54558 -1123.7021 -1122.364
600 0.06 -1 0 0 1 0 -55.547857 -1123.9414 -1122.364
650 0.065 -1 0 0 1 0 -55.550495 -1124.0897 -1122.364
700 0.07 -1 0 0 1 0 -55.553127 -1124.136 -1122.364
750 0.075 -1 0 0 1 0 -55.555497 -1124.0961 -1122.364
800 0.08 -1 0 0 1 0 -55.557466 -1124.0053 -1122.364
850 0.085 -1 0 0 1 0 -55.559001 -1123.9069 -1122.364
900 0.09 -1 0 0 1 0 -55.560147 -1123.8404 -1122.364
950 0.095 -1 0 0 1 0 -55.560992 -1123.8312 -1122.364
1000 0.1 -1 0 0 1 0 -55.561635 -1123.8853 -1122.364
Loop time of 1.5074 on 4 procs for 1000 steps with 250 atoms
0 0 -1 0 0 1 0 -27.791345 -1097.7914 -1094.5727
50 0.005 -1 0 0 1 0 -27.790728 -1097.6727 -1094.5727
100 0.01 -1 0 0 1 0 -27.78896 -1097.3371 -1094.5727
150 0.015 -1 0 0 1 0 -27.786276 -1096.8437 -1094.5727
200 0.02 -1 0 0 1 0 -27.783035 -1096.2824 -1094.5727
250 0.025 -1 0 0 1 0 -27.779661 -1095.758 -1094.5727
300 0.03 -1 0 0 1 0 -27.776574 -1095.3661 -1094.5727
350 0.035 -1 0 0 1 0 -27.774114 -1095.1684 -1094.5727
400 0.04 -1 0 0 1 0 -27.772489 -1095.1758 -1094.5727
450 0.045 -1 0 0 1 0 -27.771753 -1095.3498 -1094.5727
500 0.05 -1 0 0 1 0 -27.771823 -1095.6196 -1094.5727
550 0.055 -1 0 0 1 0 -27.772521 -1095.9061 -1094.5727
600 0.06 -1 0 0 1 0 -27.773627 -1096.1431 -1094.5727
650 0.065 -1 0 0 1 0 -27.774925 -1096.2899 -1094.5727
700 0.07 -1 0 0 1 0 -27.776234 -1096.3356 -1094.5727
750 0.075 -1 0 0 1 0 -27.777423 -1096.2961 -1094.5727
800 0.08 -1 0 0 1 0 -27.778424 -1096.2063 -1094.5727
850 0.085 -1 0 0 1 0 -27.779215 -1096.1093 -1094.5727
900 0.09 -1 0 0 1 0 -27.779817 -1096.0443 -1094.5727
950 0.095 -1 0 0 1 0 -27.780271 -1096.0363 -1094.5727
1000 0.1 -1 0 0 1 0 -27.780622 -1096.0914 -1094.5727
Loop time of 1.54663 on 4 procs for 1000 steps with 250 atoms
Performance: 5.732 ns/day, 4.187 hours/ns, 663.393 timesteps/s
99.9% CPU use with 4 MPI tasks x no OpenMP threads
Performance: 5.586 ns/day, 4.296 hours/ns, 646.566 timesteps/s
99.9% CPU use with 4 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.22156 | 0.23223 | 0.24219 | 1.5 | 15.41
Neigh | 0.0011292 | 0.0011852 | 0.0012362 | 0.1 | 0.08
Comm | 0.067507 | 0.076341 | 0.087237 | 2.6 | 5.06
Output | 0.0015073 | 0.0015442 | 0.0015914 | 0.1 | 0.10
Modify | 1.1934 | 1.1947 | 1.1955 | 0.1 | 79.25
Other | | 0.001434 | | | 0.10
Pair | 0.2086 | 0.21716 | 0.22473 | 1.2 | 14.04
Neigh | 0.0011575 | 0.0011975 | 0.001235 | 0.1 | 0.08
Comm | 0.070766 | 0.080324 | 0.088558 | 2.3 | 5.19
Output | 0.0016837 | 0.0017157 | 0.0017838 | 0.1 | 0.11
Modify | 1.2424 | 1.2446 | 1.2467 | 0.1 | 80.47
Other | | 0.001639 | | | 0.11
Nlocal: 62.5 ave 66 max 60 min
Histogram: 1 1 0 0 0 1 0 0 0 1
Nghost: 848.25 ave 861 max 834 min
Histogram: 1 0 0 0 1 0 1 0 0 1
Neighs: 1962.75 ave 2087 max 1870 min
Neighs: 1962.25 ave 2085 max 1870 min
Histogram: 1 1 0 0 0 0 1 0 0 1
FullNghs: 3925.5 ave 4138 max 3776 min
FullNghs: 3924.5 ave 4136 max 3776 min
Histogram: 1 1 0 0 0 1 0 0 0 1
Total # of neighbors = 15702
Ave neighs/atom = 62.808
Total # of neighbors = 15698
Ave neighs/atom = 62.792
Neighbor list builds = 6
Dangerous builds = 0
# min_style spin

View File

@ -1,136 +0,0 @@
LAMMPS (30 Oct 2019)
# bcc iron in a 3d periodic box
clear
units metal
atom_style spin
dimension 3
boundary p p p
# necessary for the serial algorithm (sametag)
atom_modify map array
lattice bcc 2.8665
Lattice spacing in x,y,z = 2.8665 2.8665 2.8665
region box block 0.0 5.0 0.0 5.0 0.0 5.0
create_box 1 box
Created orthogonal box = (0 0 0) to (14.3325 14.3325 14.3325)
1 by 1 by 1 MPI processor grid
create_atoms 1 box
Created 250 atoms
create_atoms CPU = 0.000422955 secs
# setting mass, mag. moments, and interactions for bcc iron
mass 1 55.845
set group all spin/random 31 2.2
250 settings made for spin/random
# set group all spin 2.2 0.0 0.0 1.0
velocity all create 100 4928459 rot yes dist gaussian
pair_style hybrid/overlay eam/alloy spin/exchange 3.5
pair_coeff * * eam/alloy Fe_Mishin2006.eam.alloy Fe
pair_coeff * * spin/exchange exchange 3.4 0.02726 0.2171 1.841
neighbor 0.1 bin
neigh_modify every 10 check yes delay 20
fix 1 all precession/spin zeeman 0.0 0.0 0.0 1.0
fix 2 all langevin/spin 0.0 0.0 21
fix 3 all nve/spin lattice moving
timestep 0.0001
# compute and output options
compute out_mag all spin
compute out_pe all pe
compute out_ke all ke
compute out_temp all temp
variable magz equal c_out_mag[3]
variable magnorm equal c_out_mag[4]
variable emag equal c_out_mag[5]
variable tmag equal c_out_mag[6]
thermo_style custom step time v_magnorm v_tmag temp v_emag ke pe press etotal
thermo 50
compute outsp all property/atom spx spy spz sp fmx fmy fmz
dump 1 all custom 100 dump_iron.lammpstrj type x y z c_outsp[1] c_outsp[2] c_outsp[3]
run 1000
Neighbor list info ...
update every 10 steps, delay 20 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 5.77337
ghost atom cutoff = 5.77337
binsize = 2.88668, bins = 5 5 5
2 neighbor lists, perpetual/occasional/extra = 2 0 0
(1) pair eam/alloy, perpetual, half/full from (2)
attributes: half, newton on
pair build: halffull/newton
stencil: none
bin: none
(2) pair spin/exchange, perpetual
attributes: full, newton on
pair build: full/bin/atomonly
stencil: full/bin/3d
bin: standard
Per MPI rank memory allocation (min/avg/max) = 7.82 | 7.82 | 7.82 Mbytes
Step Time v_magnorm v_tmag Temp v_emag KinEng PotEng Press TotEng
0 0 0.076456975 4554.5462 100.00358 -0.85791269 3.2186929 -1070.429 394.43342 -1067.2103
50 0.005 0.076456974 4658.383 96.663685 -0.86504718 3.1111957 -1070.3179 709.50826 -1067.2067
100 0.01 0.076456983 4744.1872 86.965803 -0.88035771 2.7990619 -1069.9981 1466.6938 -1067.1991
150 0.015 0.076456973 4794.5283 72.421197 -0.8996913 2.3309324 -1069.5203 2534.3867 -1067.1894
200 0.02 0.076456944 4707.6548 55.633188 -0.921682 1.7905973 -1068.969 3732.183 -1067.1784
250 0.025 0.076456953 4439.4697 39.802206 -0.94649004 1.2810649 -1068.447 4831.5559 -1067.166
300 0.03 0.076457027 4101.6694 27.882295 -0.97253854 0.8974133 -1068.0504 5612.0928 -1067.153
350 0.035 0.076457103 3860.1545 21.776538 -0.99708692 0.70089477 -1067.8416 5906.3057 -1067.1407
400 0.04 0.076457117 3765.5341 21.857102 -1.0190244 0.70348778 -1067.8332 5682.0053 -1067.1297
450 0.045 0.076457072 3739.9037 26.959407 -1.0389343 0.86770942 -1067.9875 5066.5077 -1067.1198
500 0.05 0.076457001 3730.8342 34.92521 -1.0582008 1.124095 -1068.2342 4279.2424 -1067.1101
550 0.055 0.076456962 3698.0556 43.405912 -1.0785156 1.397053 -1068.497 3533.4153 -1067.1
600 0.06 0.076456997 3560.947 50.544844 -1.102048 1.626825 -1068.715 2975.8479 -1067.0882
650 0.065 0.076457079 3341.7402 55.261218 -1.1296588 1.7786252 -1068.853 2683.3023 -1067.0744
700 0.07 0.076457136 3156.8448 57.25083 -1.1595102 1.8426624 -1068.9021 2640.5967 -1067.0595
750 0.075 0.076457132 3099.5181 56.934336 -1.1893875 1.8324758 -1068.877 2778.3261 -1067.0445
800 0.08 0.076457116 3132.9985 55.266343 -1.2181223 1.7787901 -1068.809 3020.1175 -1067.0302
850 0.085 0.076457116 3163.2943 53.376453 -1.2443326 1.7179626 -1068.735 3287.9042 -1067.0171
900 0.09 0.076457121 3168.063 52.279557 -1.2676425 1.6826581 -1068.6881 3504.7334 -1067.0054
950 0.095 0.076457122 3144.2102 52.667743 -1.2902335 1.6951522 -1068.6893 3622.1382 -1066.9941
1000 0.1 0.076457135 3061.0811 54.684094 -1.314147 1.76005 -1068.7422 3625.2935 -1066.9822
Loop time of 1.6779 on 1 procs for 1000 steps with 250 atoms
Performance: 5.149 ns/day, 4.661 hours/ns, 595.982 timesteps/s
100.0% CPU use with 1 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.78285 | 0.78285 | 0.78285 | 0.0 | 46.66
Neigh | 0.004487 | 0.004487 | 0.004487 | 0.0 | 0.27
Comm | 0.022926 | 0.022926 | 0.022926 | 0.0 | 1.37
Output | 0.003927 | 0.003927 | 0.003927 | 0.0 | 0.23
Modify | 0.86033 | 0.86033 | 0.86033 | 0.0 | 51.27
Other | | 0.003381 | | | 0.20
Nlocal: 250 ave 250 max 250 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 1399 ave 1399 max 1399 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 7855 ave 7855 max 7855 min
Histogram: 1 0 0 0 0 0 0 0 0 0
FullNghs: 15710 ave 15710 max 15710 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 15710
Ave neighs/atom = 62.84
Neighbor list builds = 6
Dangerous builds = 0
Please see the log.cite file for references relevant to this simulation
Total wall time: 0:00:01

View File

@ -1,136 +0,0 @@
LAMMPS (30 Oct 2019)
# bcc iron in a 3d periodic box
clear
units metal
atom_style spin
dimension 3
boundary p p p
# necessary for the serial algorithm (sametag)
atom_modify map array
lattice bcc 2.8665
Lattice spacing in x,y,z = 2.8665 2.8665 2.8665
region box block 0.0 5.0 0.0 5.0 0.0 5.0
create_box 1 box
Created orthogonal box = (0 0 0) to (14.3325 14.3325 14.3325)
1 by 2 by 2 MPI processor grid
create_atoms 1 box
Created 250 atoms
create_atoms CPU = 0.000705957 secs
# setting mass, mag. moments, and interactions for bcc iron
mass 1 55.845
set group all spin/random 31 2.2
250 settings made for spin/random
# set group all spin 2.2 0.0 0.0 1.0
velocity all create 100 4928459 rot yes dist gaussian
pair_style hybrid/overlay eam/alloy spin/exchange 3.5
pair_coeff * * eam/alloy Fe_Mishin2006.eam.alloy Fe
pair_coeff * * spin/exchange exchange 3.4 0.02726 0.2171 1.841
neighbor 0.1 bin
neigh_modify every 10 check yes delay 20
fix 1 all precession/spin zeeman 0.0 0.0 0.0 1.0
fix 2 all langevin/spin 0.0 0.0 21
fix 3 all nve/spin lattice moving
timestep 0.0001
# compute and output options
compute out_mag all spin
compute out_pe all pe
compute out_ke all ke
compute out_temp all temp
variable magz equal c_out_mag[3]
variable magnorm equal c_out_mag[4]
variable emag equal c_out_mag[5]
variable tmag equal c_out_mag[6]
thermo_style custom step time v_magnorm v_tmag temp v_emag ke pe press etotal
thermo 50
compute outsp all property/atom spx spy spz sp fmx fmy fmz
dump 1 all custom 100 dump_iron.lammpstrj type x y z c_outsp[1] c_outsp[2] c_outsp[3]
run 1000
Neighbor list info ...
update every 10 steps, delay 20 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 5.77337
ghost atom cutoff = 5.77337
binsize = 2.88668, bins = 5 5 5
2 neighbor lists, perpetual/occasional/extra = 2 0 0
(1) pair eam/alloy, perpetual, half/full from (2)
attributes: half, newton on
pair build: halffull/newton
stencil: none
bin: none
(2) pair spin/exchange, perpetual
attributes: full, newton on
pair build: full/bin/atomonly
stencil: full/bin/3d
bin: standard
Per MPI rank memory allocation (min/avg/max) = 7.766 | 7.766 | 7.766 Mbytes
Step Time v_magnorm v_tmag Temp v_emag KinEng PotEng Press TotEng
0 0 0.076456975 4554.5462 100.00358 -0.85791269 3.2186929 -1070.8579 394.43342 -1067.6392
50 0.005 0.076456995 4701.2004 96.298333 -0.85659448 3.0994366 -1070.7387 714.37866 -1067.6392
100 0.01 0.076457028 4794.5923 86.330828 -0.87003341 2.7786247 -1070.4179 1484.2951 -1067.6392
150 0.015 0.076457074 4836.9634 71.603402 -0.89006992 2.3046111 -1069.9438 2551.9258 -1067.6392
200 0.02 0.076457106 4754.5574 54.648817 -0.91124541 1.7589146 -1069.3981 3731.1494 -1067.6392
250 0.025 0.076457128 4502.135 38.599515 -0.93187522 1.2423553 -1068.8816 4804.619 -1067.6392
300 0.03 0.076457157 4176.7186 26.383018 -0.95082226 0.8491579 -1068.4884 5563.3287 -1067.6392
350 0.035 0.076457207 3955.5658 20.01039 -0.96826468 0.64404992 -1068.2833 5839.6479 -1067.6392
400 0.04 0.076457243 3887.9746 20.097682 -0.98706373 0.64685949 -1068.2861 5601.1255 -1067.6392
450 0.045 0.076457231 3868.5613 25.687511 -1.0095684 0.82677249 -1068.466 4974.0031 -1067.6392
500 0.05 0.076457204 3838.4905 34.604697 -1.0349855 1.113779 -1068.753 4157.1837 -1067.6392
550 0.055 0.076457196 3775.1404 44.251809 -1.0609123 1.4242788 -1069.0635 3357.1 -1067.6392
600 0.06 0.076457188 3604.8828 52.475202 -1.0880854 1.6889551 -1069.3282 2752.0424 -1067.6392
650 0.065 0.07645718 3345.5894 57.926479 -1.1179657 1.8644087 -1069.5036 2467.7403 -1067.6392
700 0.07 0.076457185 3138.2001 60.030548 -1.1469999 1.9321298 -1069.5714 2510.1752 -1067.6392
750 0.075 0.07645719 3074.9626 59.122504 -1.1721939 1.9029037 -1069.5421 2788.7489 -1067.6392
800 0.08 0.076457195 3103.5294 56.349146 -1.1949365 1.813641 -1069.4529 3192.5158 -1067.6392
850 0.085 0.076457199 3164.2317 53.154464 -1.2164642 1.7108177 -1069.35 3602.931 -1067.6392
900 0.09 0.076457199 3228.1358 50.837416 -1.2366018 1.6362417 -1069.2755 3917.0758 -1067.6392
950 0.095 0.076457222 3247.5532 50.234549 -1.2539657 1.6168379 -1069.2561 4059.9275 -1067.6392
1000 0.1 0.076457266 3208.3875 51.592727 -1.2671834 1.6605519 -1069.2998 4001.4995 -1067.6392
Loop time of 1.47769 on 4 procs for 1000 steps with 250 atoms
Performance: 5.847 ns/day, 4.105 hours/ns, 676.731 timesteps/s
100.0% CPU use with 4 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.21791 | 0.22724 | 0.23568 | 1.4 | 15.38
Neigh | 0.001137 | 0.0011771 | 0.0012221 | 0.1 | 0.08
Comm | 0.066727 | 0.074288 | 0.083826 | 2.3 | 5.03
Output | 0.0017431 | 0.0017657 | 0.0018256 | 0.1 | 0.12
Modify | 1.1707 | 1.1718 | 1.1725 | 0.1 | 79.30
Other | | 0.001427 | | | 0.10
Nlocal: 62.5 ave 66 max 60 min
Histogram: 1 0 0 2 0 0 0 0 0 1
Nghost: 844 ave 857 max 829 min
Histogram: 1 0 0 1 0 0 0 0 1 1
Neighs: 1962.5 ave 2096 max 1855 min
Histogram: 1 0 1 0 0 1 0 0 0 1
FullNghs: 3925 ave 4139 max 3766 min
Histogram: 1 0 0 2 0 0 0 0 0 1
Total # of neighbors = 15700
Ave neighs/atom = 62.8
Neighbor list builds = 6
Dangerous builds = 0
Please see the log.cite file for references relevant to this simulation
Total wall time: 0:00:01

View File

@ -31,6 +31,7 @@ neighbor 0.1 bin
neigh_modify every 10 check yes delay 20
fix 1 all precession/spin zeeman 0.0 0.0 0.0 1.0
fix_modify 1 energy yes
fix 2 all langevin/spin 0.0 0.0 21
fix 3 all nve/spin lattice moving
@ -48,7 +49,7 @@ variable magnorm equal c_out_mag[4]
variable emag equal c_out_mag[5]
variable tmag equal c_out_mag[6]
thermo_style custom step time v_magnorm v_emag temp v_tmag etotal
thermo_style custom step time v_magnorm pe v_emag temp v_tmag etotal
thermo 50
compute outsp all property/atom spx spy spz sp fmx fmy fmz

View File

@ -50,7 +50,7 @@ variable magnorm equal c_out_mag[4]
variable emag equal c_out_mag[5]
variable tmag equal c_out_mag[6]
thermo_style custom step time v_magnorm v_emag temp v_tmag etotal
thermo_style custom step time v_magnorm pe v_emag temp v_tmag etotal
thermo 50
compute outsp all property/atom spx spy spz sp fmx fmy fmz

Some files were not shown because too many files have changed in this diff Show More