make the legacy fortran wrapper work (again)

This commit is contained in:
Axel Kohlmeyer 2020-10-09 07:52:36 -04:00
parent ca3d10fa39
commit 5457accb3d
No known key found for this signature in database
GPG Key ID: D9B44E93BF0C375A
5 changed files with 148 additions and 49 deletions

View File

@ -1,7 +1,7 @@
libfwrapper.c is a C file that wraps the LAMMPS library API
in src/library.h so that it can be called from Fortran.
libfwrapper.c is a C file that wraps a few functions of the LAMMPS
library API in src/library.h so that it can be called from Fortran.
See the couple/simple/simple.f90 program for an example of a Fortran
See the couple/simple/simple_f77.f90 program for an example of a Fortran
code that does this.
See the README file in that dir for instructions on how to build a

View File

@ -29,14 +29,7 @@
void lammps_open_(MPI_Fint *comm, int64_t *ptr)
{
void *obj;
MPI_Comm ccomm;
/* convert MPI communicator from fortran to c */
ccomm = MPI_Comm_f2c(*comm);
lammps_open(0,NULL,ccomm,&obj);
*ptr = (int64_t) obj;
*ptr = (int64_t) lammps_open_fortran(0,NULL,*comm);
}
/* no-MPI version of the wrapper from above. */
@ -107,30 +100,3 @@ void lammps_get_natoms_(int64_t *ptr, MPI_Fint *natoms)
*natoms = lammps_get_natoms(obj);
}
/* wrapper to copy coordinates from lammps to fortran */
/* NOTE: this is now out-of-date, needs to be updated to lammps_gather_atoms()
void lammps_get_coords_(int64_t *ptr, double *coords)
{
void *obj;
obj = (void *) *ptr;
lammps_get_coords(obj,coords);
}
*/
/* wrapper to copy coordinates from fortran to lammps */
/* NOTE: this is now out-of-date, needs to be updated to lammps_scatter_atoms()
void lammps_put_coords_(int64_t *ptr, double *coords)
{
void *obj;
obj = (void *) *ptr;
lammps_put_coords(obj,coords);
}
*/

View File

@ -7,18 +7,21 @@ code to perform a coupled calculation.
simple.cpp is the C++ driver
simple.c is the C driver
simple.f90 is the Fortran driver
simple.f90 is the Fortran driver using the new Fortran module
simple_f77.f90 is the Fortran driver using the legacy Fortran wrapper
The 3 codes do the same thing, so you can compare them to see how to
The 4 codes do the same thing, so you can compare them to see how to
drive LAMMPS from each language. See python/example/simple.py
to do something similar from Python. The Fortran driver requires a
Fortran module that uses the Fortran 03 ISO_C_BINDING module to
to do something similar from Python. The new Fortran driver requires
a Fortran module that uses the Fortran 03 ISO_C_BINDING module to
interface the LAMMPS C library functions to Fortran.
First build LAMMPS as a library (see examples/COUPLE/README), e.g.
make mode=shlib mpi
or via CMake through settings -DBUILD_SHARED_LIBS=on
You can then build any of the driver codes with compile lines like
these, which include paths to the LAMMPS library interface, and
linking with FFTW (only needed if you built LAMMPS as a library with
@ -27,22 +30,27 @@ its PPPM solver).
This builds the C++ driver with the LAMMPS library using the mpicxx
(C++) compiler:
mpicxx -I/home/sjplimp/lammps/src -c simple.cpp
mpicxx -L/home/sjplimp/lammps/src simple.o -llammps -o simpleCC
mpicxx -I${HOME}/lammps/src -c simple.cpp
mpicxx -L${HOME}/lammps/src simple.o -llammps -o simpleCC
This builds the C driver with the LAMMPS library using the mpicc (C)
compiler:
mpicc -I/home/sjplimp/lammps/src -c simple.c
mpicc -L/home/sjplimp/lammps/src simple.o -llammps -o simpleC
mpicc -I${HOME}/lammps/src -c simple.c
mpicc -L${HOME}/lammps/src simple.o -llammps -o simpleC
This builds the Fortran module and driver with the LAMMPS library
using the mpifort (Fortran) compilers, using the Fortran module from
the fortran directory:
mpifort -L/home/sjplimp/lammps/src ../../../fortran/lammps.f90 simple.f90 -llammps -o simpleF
mpifort -L${HOME}/lammps/src ../../../fortran/lammps.f90 simple.f90 -llammps -o simpleF
You then run simpleCC, simpleC, or simpleF on a parallel machine
This builds the legacy Fortran wrapper and driver with the LAMMPS library
using the mpifort (Fortran) MPI compiler wrapper (assuming GNU gfortran).
mpifort -std=legacy -L${HOME}/lammps/src ../fortran/libfwrapper.c simple.f90 -llammps -o simpleF77
You then run simpleCC, simpleC, simpleF, or simpleF77 on a parallel machine
on some number of processors Q with 2 arguments:
% mpirun -np Q simpleCC P in.lj

View File

@ -29,7 +29,7 @@ PROGRAM f_driver
REAL (kind=8), ALLOCATABLE :: x(:)
REAL (kind=8), PARAMETER :: epsilon=0.1
CHARACTER (len=64) :: arg
CHARACTER (len=1024) :: line

View File

@ -0,0 +1,125 @@
! LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator
! www.cs.sandia.gov/~sjplimp/lammps.html
! Steve Plimpton, sjplimp@sandia.gov, Sandia National Laboratories
!
! Copyright (2003) Sandia Corporation. Under the terms of Contract
! DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains
! certain rights in this software. This software is distributed under
! the GNU General Public License.
!
! See the README file in the top-level LAMMPS directory.
! f_driver = simple example of how an umbrella program
! can invoke LAMMPS as a library on some subset of procs
! Syntax: simpleF P in.lammps
! P = # of procs to run LAMMPS on
! must be <= # of procs the driver code itself runs on
! in.lammps = LAMMPS input script
! See README for compilation instructions
PROGRAM f_driver
IMPLICIT NONE
INCLUDE 'mpif.h'
INTEGER, PARAMETER :: fp=20
INTEGER :: n, narg, ierr, me, nprocs, natoms
INTEGER :: lammps, nprocs_lammps, comm_lammps
INTEGER (kind=8) :: ptr
REAL (kind=8), ALLOCATABLE :: x(:)
REAL (kind=8), PARAMETER :: epsilon=0.1
CHARACTER (len=64) :: arg
CHARACTER (len=1024) :: line
! setup MPI and various communicators
! driver runs on all procs in MPI_COMM_WORLD
! comm_lammps only has 1st P procs (could be all or any subset)
CALL mpi_init(ierr)
narg = command_argument_count()
IF (narg /= 2) THEN
PRINT *, 'Syntax: simpleF P in.lammps'
CALL mpi_abort(MPI_COMM_WORLD,1,ierr)
END IF
CALL mpi_comm_rank(MPI_COMM_WORLD,me,ierr);
CALL mpi_comm_size(MPI_COMM_WORLD,nprocs,ierr);
CALL get_command_argument(1,arg)
READ (arg,'(I10)') nprocs_lammps
IF (nprocs_lammps > nprocs) THEN
IF (me == 0) THEN
PRINT *, 'ERROR: LAMMPS cannot use more procs than available'
CALL mpi_abort(MPI_COMM_WORLD,2,ierr)
END IF
END IF
lammps = 0
IF (me < nprocs_lammps) THEN
lammps = 1
ELSE
lammps = MPI_UNDEFINED
END IF
CALL mpi_comm_split(MPI_COMM_WORLD,lammps,0,comm_lammps,ierr)
! open LAMMPS input script on rank zero
CALL get_command_argument(2,arg)
OPEN(UNIT=fp, FILE=arg, ACTION='READ', STATUS='OLD', IOSTAT=ierr)
IF (ierr /= 0) THEN
PRINT *, 'ERROR: Could not open LAMMPS input script'
CALL mpi_abort(MPI_COMM_WORLD,3,ierr);
END IF
! run the input script thru LAMMPS one line at a time until end-of-file
! driver proc 0 reads a line, Bcasts it to all procs
! (could just send it to proc 0 of comm_lammps and let it Bcast)
! all LAMMPS procs call lammps_command() on the line */
IF (lammps == 1) CALL lammps_open(comm_lammps,ptr)
n = 0
DO
IF (me == 0) THEN
READ (UNIT=fp, FMT='(A)', IOSTAT=ierr) line
n = 0
IF (ierr == 0) THEN
n = LEN(TRIM(line))
IF (n == 0 ) THEN
line = ' '
n = 1
END IF
END IF
END IF
CALL mpi_bcast(n,1,MPI_INTEGER,0,MPI_COMM_WORLD,ierr)
IF (n == 0) EXIT
CALL mpi_bcast(line,n,MPI_CHARACTER,0,MPI_COMM_WORLD,ierr)
IF (lammps == 1) CALL lammps_command(ptr,line,n)
END DO
CLOSE(UNIT=fp)
! run 10 more steps followed by a single step */
IF (lammps == 1) THEN
CALL lammps_command(ptr,'run 10',6)
CALL lammps_get_natoms(ptr,natoms)
print*,'natoms=',natoms
CALL lammps_command(ptr,'run 1',5);
END IF
! free LAMMPS object
IF (lammps == 1) CALL lammps_close(ptr);
! close down MPI
CALL mpi_finalize(ierr)
END PROGRAM f_driver