From a499b28469f3d0fd839c61af6ccf99694f414f8a Mon Sep 17 00:00:00 2001 From: sjplimp Date: Wed, 12 Jan 2011 21:41:35 +0000 Subject: [PATCH] git-svn-id: svn://svn.icms.temple.edu/lammps-ro/trunk@5545 f3b2605a-c512-4ea7-a41b-209d697bcdaa --- lib/gpu/crml_gpu.cpp | 135 ++++++++++++++++++++++++++++ lib/gpu/crml_gpu_memory.cpp | 169 ++++++++++++++++++++++++++++++++++++ lib/gpu/crml_gpu_memory.h | 79 +++++++++++++++++ 3 files changed, 383 insertions(+) create mode 100644 lib/gpu/crml_gpu.cpp create mode 100644 lib/gpu/crml_gpu_memory.cpp create mode 100644 lib/gpu/crml_gpu_memory.h diff --git a/lib/gpu/crml_gpu.cpp b/lib/gpu/crml_gpu.cpp new file mode 100644 index 0000000000..7458300907 --- /dev/null +++ b/lib/gpu/crml_gpu.cpp @@ -0,0 +1,135 @@ +/* ---------------------------------------------------------------------- + LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator + http://lammps.sandia.gov, Sandia National Laboratories + Steve Plimpton, sjplimp@sandia.gov + + Copyright (2003) Sandia Corporation. Under the terms of Contract + DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains + certain rights in this software. This software is distributed under + the GNU General Public License. + + See the README file in the top-level LAMMPS directory. +------------------------------------------------------------------------- */ + +/* ---------------------------------------------------------------------- + Contributing authors: Mike Brown (ORNL), brownw@ornl.gov +------------------------------------------------------------------------- */ + +#include +#include +#include + +#include "crml_gpu_memory.h" + +using namespace std; + +static CRML_GPU_Memory CRMLMF; + +// --------------------------------------------------------------------------- +// Allocate memory on host and device and copy constants to device +// --------------------------------------------------------------------------- +bool crml_gpu_init(const int ntypes, double cut_bothsq, double **host_lj1, + double **host_lj2, double **host_lj3, double **host_lj4, + double **offset, double *special_lj, const int inum, + const int nall, const int max_nbors, const int maxspecial, + const double cell_size, int &gpu_mode, FILE *screen, + double host_cut_ljsq, double host_cut_coulsq, + double *host_special_coul, const double qqrd2e, + const double g_ewald, const double cut_lj_innersq, + const double denom_lj, double **epsilon, + double **sigma, const bool mix_arithmetic) { + CRMLMF.clear(); + gpu_mode=CRMLMF.device->gpu_mode(); + double gpu_split=CRMLMF.device->particle_split(); + int first_gpu=CRMLMF.device->first_device(); + int last_gpu=CRMLMF.device->last_device(); + int world_me=CRMLMF.device->world_me(); + int gpu_rank=CRMLMF.device->gpu_rank(); + int procs_per_gpu=CRMLMF.device->procs_per_gpu(); + + CRMLMF.device->init_message(screen,"lj/charmm/coul/long",first_gpu,last_gpu); + + bool message=false; + if (CRMLMF.device->replica_me()==0 && screen) + message=true; + + if (message) { + fprintf(screen,"Initializing GPU and compiling on process 0..."); + fflush(screen); + } + + if (world_me==0) { + bool init_ok=CRMLMF.init(ntypes, cut_bothsq, host_lj1, host_lj2, host_lj3, + host_lj4, offset, special_lj, inum, nall, 300, + maxspecial, cell_size, gpu_split, screen, + host_cut_ljsq, host_cut_coulsq, host_special_coul, + qqrd2e, g_ewald, cut_lj_innersq, denom_lj, + epsilon,sigma,mix_arithmetic); + if (!init_ok) + return false; + } + + CRMLMF.device->world_barrier(); + if (message) + fprintf(screen,"Done.\n"); + + for (int i=0; igpu_barrier(); + if (message) + fprintf(screen,"Done.\n"); + } + if (message) + fprintf(screen,"\n"); + return true; +} + +void crml_gpu_clear() { + CRMLMF.clear(); +} + +int * crml_gpu_compute_n(const int timestep, const int ago, const int inum_full, + const int nall, double **host_x, int *host_type, + double *boxlo, double *boxhi, int *tag, int **nspecial, + int **special, const bool eflag, const bool vflag, + const bool eatom, const bool vatom, int &host_start, + const double cpu_time, bool &success, double *host_q) { + return CRMLMF.compute(timestep, ago, inum_full, nall, host_x, host_type, boxlo, + boxhi, tag, nspecial, special, eflag, vflag, eatom, + vatom, host_start, cpu_time, success, host_q); +} + +void crml_gpu_compute(const int timestep, const int ago, const int inum_full, + const int nall, double **host_x, int *host_type, + int *ilist, int *numj, int **firstneigh, + const bool eflag, const bool vflag, const bool eatom, + const bool vatom, int &host_start, const double cpu_time, + bool &success, double *host_q) { + CRMLMF.compute(timestep,ago,inum_full,nall,host_x,host_type,ilist,numj, + firstneigh,eflag,vflag,eatom,vatom,host_start,cpu_time,success, + host_q); +} + +double crml_gpu_bytes() { + return CRMLMF.host_memory_usage(); +} + + diff --git a/lib/gpu/crml_gpu_memory.cpp b/lib/gpu/crml_gpu_memory.cpp new file mode 100644 index 0000000000..e877503e87 --- /dev/null +++ b/lib/gpu/crml_gpu_memory.cpp @@ -0,0 +1,169 @@ +/* ---------------------------------------------------------------------- + LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator + http://lammps.sandia.gov, Sandia National Laboratories + Steve Plimpton, sjplimp@sandia.gov + + Copyright (2003) Sandia Corporation. Under the terms of Contract + DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains + certain rights in this software. This software is distributed under + the GNU General Public License. + + See the README file in the top-level LAMMPS directory. +------------------------------------------------------------------------- */ + +/* ---------------------------------------------------------------------- + Contributing authors: Mike Brown (ORNL), brownw@ornl.gov +------------------------------------------------------------------------- */ + +#ifdef USE_OPENCL +#include "crml_gpu_cl.h" +#else +#include "crml_gpu_ptx.h" +#endif + +#include "crml_gpu_memory.h" +#include +#define CRML_GPU_MemoryT CRML_GPU_Memory + +extern PairGPUDevice pair_gpu_device; + +template +CRML_GPU_MemoryT::CRML_GPU_Memory() : ChargeGPUMemory(), + _allocated(false) { +} + +template +CRML_GPU_MemoryT::~CRML_GPU_Memory() { + clear(); +} + +template +int CRML_GPU_MemoryT::bytes_per_atom(const int max_nbors) const { + return this->bytes_per_atom_atomic(max_nbors); +} + +template +bool CRML_GPU_MemoryT::init(const int ntypes, + double host_cut_bothsq, double **host_lj1, + double **host_lj2, double **host_lj3, + double **host_lj4, double **host_offset, + double *host_special_lj, const int nlocal, + const int nall, const int max_nbors, + const int maxspecial, const double cell_size, + const double gpu_split, FILE *_screen, + double host_cut_ljsq, const double host_cut_coulsq, + double *host_special_coul, const double qqrd2e, + const double g_ewald, const double cut_lj_innersq, + const double denom_lj, double **epsilon, + double **sigma, const bool mix_arithmetic) { + this->init_atomic(nlocal,nall,max_nbors,maxspecial,cell_size,gpu_split, + _screen,crml_gpu_kernel); + + // If atom type constants fit in shared memory use fast kernel + int lj_types=ntypes; + shared_types=false; + if (this->_block_size>=64 && mix_arithmetic) + shared_types=true; + _lj_types=lj_types; + + // Allocate a host write buffer for data initialization + int h_size=lj_types*lj_types; + if (h_size host_write(h_size*32,*(this->ucl_device), + UCL_WRITE_OPTIMIZED); + for (int i=0; iucl_device),UCL_READ_ONLY); + this->atom->type_pack4(ntypes,lj_types,lj1,host_write,host_lj1,host_lj2, + host_lj3,host_lj4); + + ljd.alloc(MAX_BIO_SHARED_TYPES,*(this->ucl_device),UCL_READ_ONLY); + this->atom->self_pack2(ntypes,ljd,host_write,epsilon,sigma); + + sp_lj.alloc(8,*(this->ucl_device),UCL_READ_ONLY); + for (int i=0; i<4; i++) { + host_write[i]=host_special_lj[i]; + host_write[i+4]=host_special_coul[i]; + } + ucl_copy(sp_lj,host_write,8,false); + + _cut_bothsq = host_cut_bothsq; + _cut_coulsq = host_cut_coulsq; + _cut_ljsq = host_cut_ljsq; + _cut_lj_innersq = cut_lj_innersq; + _qqrd2e=qqrd2e; + _g_ewald=g_ewald; + _denom_lj=denom_lj; + + _allocated=true; + this->_max_bytes=lj1.row_bytes()+ljd.row_bytes()+sp_lj.row_bytes(); + return true; +} + +template +void CRML_GPU_MemoryT::clear() { + if (!_allocated) + return; + _allocated=false; + + lj1.clear(); + ljd.clear(); + sp_lj.clear(); + this->clear_atomic(); +} + +template +double CRML_GPU_MemoryT::host_memory_usage() const { + return this->host_memory_usage_atomic()+sizeof(CRML_GPU_Memory); +} + +// --------------------------------------------------------------------------- +// Calculate energies, forces, and torques +// --------------------------------------------------------------------------- +template +void CRML_GPU_MemoryT::loop(const bool _eflag, const bool _vflag) { + // Compute the block size and grid size to keep all cores busy + const int BX=this->block_size(); + int eflag, vflag; + if (_eflag) + eflag=1; + else + eflag=0; + + if (_vflag) + vflag=1; + else + vflag=0; + + int GX=static_cast(ceil(static_cast(this->atom->inum())/BX)); + + int ainum=this->atom->inum(); + int anall=this->atom->nall(); + int nbor_pitch=this->nbor->nbor_pitch(); + this->time_pair.start(); + if (shared_types) { + this->k_pair_fast.set_size(GX,BX); + this->k_pair_fast.run(&this->atom->dev_x.begin(), &ljd.begin(), + &sp_lj.begin(), &this->nbor->dev_nbor.begin(), + &this->atom->dev_ans.begin(), + &this->atom->dev_engv.begin(), &eflag, &vflag, + &ainum, &anall, &nbor_pitch, + &this->atom->dev_q.begin(), &_cut_coulsq, + &_qqrd2e, &_g_ewald, &_denom_lj, &_cut_bothsq, + &_cut_ljsq, &_cut_lj_innersq); + } else { + this->k_pair.set_size(GX,BX); + this->k_pair.run(&this->atom->dev_x.begin(), &lj1.begin(), + &_lj_types, &sp_lj.begin(), &this->nbor->dev_nbor.begin(), + &this->atom->dev_ans.begin(), + &this->atom->dev_engv.begin(), &eflag, &vflag, &ainum, + &anall, &nbor_pitch, &this->atom->dev_q.begin(), + &_cut_coulsq, &_qqrd2e, &_g_ewald, &_denom_lj, + &_cut_bothsq, &_cut_ljsq, &_cut_lj_innersq); + } + this->time_pair.stop(); +} + +template class CRML_GPU_Memory; diff --git a/lib/gpu/crml_gpu_memory.h b/lib/gpu/crml_gpu_memory.h new file mode 100644 index 0000000000..5520cd3a17 --- /dev/null +++ b/lib/gpu/crml_gpu_memory.h @@ -0,0 +1,79 @@ +/* ---------------------------------------------------------------------- + LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator + http://lammps.sandia.gov, Sandia National Laboratories + Steve Plimpton, sjplimp@sandia.gov + + Copyright (2003) Sandia Corporation. Under the terms of Contract + DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains + certain rights in this software. This software is distributed under + the GNU General Public License. + + See the README file in the top-level LAMMPS directory. +------------------------------------------------------------------------- */ + +/* ---------------------------------------------------------------------- + Contributing authors: Mike Brown (ORNL), brownw@ornl.gov +------------------------------------------------------------------------- */ + +#ifndef CRML_GPU_MEMORY_H +#define CRML_GPU_MEMORY_H + +#include "charge_gpu_memory.h" + +template +class CRML_GPU_Memory : public ChargeGPUMemory { + public: + CRML_GPU_Memory(); + ~CRML_GPU_Memory(); + + /// Clear any previous data and set up for a new LAMMPS run + /** \param max_nbors initial number of rows in the neighbor matrix + * \param cell_size cutoff + skin + * \param gpu_split fraction of particles handled by device **/ + bool init(const int ntypes, double host_cut_bothsq, + double **host_lj1, double **host_lj2, double **host_lj3, + double **host_lj4, double **host_offset, double *host_special_lj, + const int nlocal, const int nall, const int max_nbors, + const int maxspecial, const double cell_size, + const double gpu_split, FILE *screen, double host_cut_ljsq, + const double host_cut_coulsq, double *host_special_coul, + const double qqrd2e, const double g_ewald, + const double cut_lj_innersq, const double denom_lj, + double **epsilon, double **sigma, const bool mix_arithmetic); + + /// Clear all host and device data + /** \note This is called at the beginning of the init() routine **/ + void clear(); + + /// Returns memory usage on device per atom + int bytes_per_atom(const int max_nbors) const; + + /// Total host memory used by library for pair style + double host_memory_usage() const; + + // --------------------------- TYPE DATA -------------------------- + + /// x = lj1, y = lj2, z = lj3, w = lj4 + UCL_D_Vec lj1; + /// x = epsilon, y = sigma + UCL_D_Vec ljd; + /// Special LJ values [0-3] and Special Coul values [4-7] + UCL_D_Vec sp_lj; + + /// If atom type constants fit in shared memory, use fast kernels + bool shared_types; + + /// Number of atom types + int _lj_types; + + numtyp _qqrd2e, _g_ewald, _denom_lj; + + numtyp _cut_coulsq, _cut_bothsq, _cut_ljsq, _cut_lj_innersq; + + private: + bool _allocated; + void loop(const bool _eflag, const bool _vflag); +}; + +#endif +