forked from lijiext/lammps
git-svn-id: svn://svn.icms.temple.edu/lammps-ro/trunk@14326 f3b2605a-c512-4ea7-a41b-209d697bcdaa
This commit is contained in:
parent
582ba55e4a
commit
e000e4a04f
|
@ -15,21 +15,14 @@ if len(argv) != 1:
|
|||
print "Syntax: demo.py"
|
||||
sys.exit()
|
||||
|
||||
me = 0
|
||||
# uncomment if running in parallel via Pypar
|
||||
#import pypar
|
||||
#me = pypar.rank()
|
||||
#nprocs = pypar.size()
|
||||
|
||||
from lammps import lammps
|
||||
|
||||
lmp = lammps()
|
||||
|
||||
# test out various library functions after running in.demo
|
||||
|
||||
lmp.file("in.demo")
|
||||
|
||||
if me == 0: print "\nPython output:"
|
||||
print "\nPython output:"
|
||||
|
||||
natoms = lmp.extract_global("natoms",0)
|
||||
mass = lmp.extract_atom("mass",2)
|
||||
|
@ -55,7 +48,3 @@ xc[0] = xc[0] + 1.0
|
|||
lmp.scatter_atoms("x",1,3,xc)
|
||||
|
||||
print "Changed x[0][0] via scatter_atoms =",x[0][0]
|
||||
|
||||
# uncomment if running in parallel via Pypar
|
||||
#print "Proc %d out of %d procs has" % (me,nprocs), lmp
|
||||
#pypar.finalize()
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -2,9 +2,24 @@
|
|||
# preceeding line should have path for Python on your machine
|
||||
|
||||
# simple.py
|
||||
# Purpose: mimic operation of couple/simple/simple.cpp via Python
|
||||
# Syntax: simple.py in.lammps
|
||||
# in.lammps = LAMMPS input script
|
||||
# Purpose: mimic operation of examples/COUPLE/simple/simple.cpp via Python
|
||||
|
||||
# Serial syntax: simple.py in.lammps
|
||||
# in.lammps = LAMMPS input script
|
||||
|
||||
# Parallel syntax: mpirun -np 4 simple.py in.lammps
|
||||
# in.lammps = LAMMPS input script
|
||||
|
||||
# if run in parallel with script as-is:
|
||||
# will invoke P instances of a one-processor run
|
||||
# both Python and LAMMPS will run on single processors
|
||||
# each will read same input file, write to same log.lammps file (bad)
|
||||
|
||||
# if run in parallel after uncommening either Pypar or mpi4py sections below:
|
||||
# will invoke 1 instance of a P-processor run
|
||||
# both Python and LAMMPS will run in parallel
|
||||
# see the split.py example for how to use multiple procs
|
||||
# to run multiple LAMMPS jobs, each on a subset of procs
|
||||
|
||||
import sys
|
||||
|
||||
|
@ -19,11 +34,16 @@ infile = sys.argv[1]
|
|||
|
||||
me = 0
|
||||
|
||||
# uncomment if running in parallel via Pypar
|
||||
# uncomment this if running in parallel via Pypar
|
||||
#import pypar
|
||||
#me = pypar.rank()
|
||||
#nprocs = pypar.size()
|
||||
|
||||
# uncomment this if running in parallel via mpi4py
|
||||
#from mpi4py import MPI
|
||||
#me = MPI.COMM_WORLD.Get_rank()
|
||||
#nprocs = MPI.COMM_WORLD.Get_size()
|
||||
|
||||
from lammps import lammps
|
||||
lmp = lammps()
|
||||
|
||||
|
@ -54,3 +74,7 @@ print "Force on 1 atom via extract_variable:",fx[0]
|
|||
# uncomment if running in parallel via Pypar
|
||||
#print "Proc %d out of %d procs has" % (me,nprocs), lmp
|
||||
#pypar.finalize()
|
||||
|
||||
# uncomment if running in parallel via mpi4py
|
||||
#print "Proc %d out of %d procs has" % (me,nprocs), lmp
|
||||
#MPI.Finalize()
|
||||
|
|
|
@ -0,0 +1,79 @@
|
|||
#!/usr/bin/env python -i
|
||||
# preceeding line should have path for Python on your machine
|
||||
|
||||
# split.py
|
||||
# Purpose: similar to simple.py, but first the world communicator
|
||||
# is split in two halves and LAMMPS is run only on one partition
|
||||
# Syntax: split.py in.lammps
|
||||
# in.lammps = LAMMPS input script
|
||||
|
||||
import sys
|
||||
|
||||
# parse command line
|
||||
|
||||
argv = sys.argv
|
||||
if len(argv) != 2:
|
||||
print "Syntax: simple.py in.lammps"
|
||||
sys.exit()
|
||||
|
||||
infile = sys.argv[1]
|
||||
|
||||
me = 0
|
||||
|
||||
# this example *only* works with mpi4py version 2.0.0 or later
|
||||
|
||||
from mpi4py import MPI
|
||||
comm = MPI.COMM_WORLD
|
||||
me = comm.Get_rank()
|
||||
nprocs = comm.Get_size()
|
||||
|
||||
# create two subcommunicators
|
||||
|
||||
if me < nprocs // 2: color = 0
|
||||
else: color = 1
|
||||
|
||||
split = comm.Split(color,key=0)
|
||||
|
||||
if color == 0:
|
||||
from lammps import lammps
|
||||
lmp = lammps(comm=split)
|
||||
|
||||
# run infile one line at a time
|
||||
|
||||
lines = open(infile,'r').readlines()
|
||||
for line in lines: lmp.command(line)
|
||||
|
||||
# run 10 more steps
|
||||
# get coords from LAMMPS
|
||||
# change coords of 1st atom
|
||||
# put coords back into LAMMPS
|
||||
# run a single step with changed coords
|
||||
|
||||
lmp.command("run 10")
|
||||
x = lmp.gather_atoms("x",1,3)
|
||||
epsilon = 0.1
|
||||
x[0] += epsilon
|
||||
lmp.scatter_atoms("x",1,3,x)
|
||||
lmp.command("run 1");
|
||||
|
||||
f = lmp.extract_atom("f",3)
|
||||
print "Force on 1 atom via extract_atom: ",f[0][0]
|
||||
|
||||
fx = lmp.extract_variable("fx","all",1)
|
||||
print "Force on 1 atom via extract_variable:",fx[0]
|
||||
print "Proc %d out of %d procs has" % (me,nprocs), lmp
|
||||
print "Calculation on partition 0 complete"
|
||||
|
||||
else:
|
||||
# could run a 2nd calculation on second partition
|
||||
# with different LAMMPS instance or another code
|
||||
# in this case, just sleep on second partition
|
||||
|
||||
import time
|
||||
time.sleep(2)
|
||||
print "Calculation on partition 1 complete"
|
||||
|
||||
# shutdown mpi4py
|
||||
|
||||
comm.Barrier()
|
||||
MPI.Finalize()
|
|
@ -17,12 +17,6 @@ if len(argv) != 2:
|
|||
|
||||
infile = sys.argv[1]
|
||||
|
||||
me = 0
|
||||
# uncomment if running in parallel via Pypar
|
||||
#import pypar
|
||||
#me = pypar.rank()
|
||||
#nprocs = pypar.size()
|
||||
|
||||
from lammps import lammps
|
||||
lmp = lammps()
|
||||
|
||||
|
@ -30,11 +24,7 @@ lmp = lammps()
|
|||
|
||||
lmp.file(infile)
|
||||
|
||||
# run infile one line at a time
|
||||
# or run infile one line at a time
|
||||
|
||||
#lines = open(infile,'r').readlines()
|
||||
#for line in lines: lmp.command(line)
|
||||
|
||||
# uncomment if running in parallel via Pypar
|
||||
#print "Proc %d out of %d procs has" % (me,nprocs), lmp
|
||||
#pypar.finalize()
|
||||
|
|
Loading…
Reference in New Issue