diff --git a/python/lammps/core.py b/python/lammps/core.py index 930a40a4b0..5cd112ed2d 100644 --- a/python/lammps/core.py +++ b/python/lammps/core.py @@ -1622,8 +1622,8 @@ class lammps(object): """Return a string with detailed information about any devices that are usable by the GPU package. - This is a wrapper around the :cpp:func:`lammps_get_gpu_device_info` - function of the C-library interface. + This is a wrapper around the :cpp:func:`lammps_get_gpu_device_info` + function of the C-library interface. :return: GPU device info string :rtype: string diff --git a/python/lammps/mliap/loader.py b/python/lammps/mliap/loader.py index 0e76568e2c..e1e60c4f50 100644 --- a/python/lammps/mliap/loader.py +++ b/python/lammps/mliap/loader.py @@ -29,7 +29,7 @@ from ctypes import pythonapi, c_int, c_void_p, py_object class DynamicLoader(importlib.abc.Loader): def __init__(self,module_name,library,api_version=1013): self.api_version = api_version - + attr = "PyInit_"+module_name initfunc = getattr(library,attr) # c_void_p is standin for PyModuleDef * @@ -44,7 +44,7 @@ class DynamicLoader(importlib.abc.Loader): createfunc.restype = py_object module = createfunc(self.module_def, spec, self.api_version) return module - + def exec_module(self, module): execfunc = pythonapi.PyModule_ExecDef # c_void_p is standin for PyModuleDef * @@ -59,12 +59,12 @@ def activate_mliappy(lmp): library = lmp.lib module_names = ["mliap_model_python_couple", "mliap_unified_couple"] api_version = library.lammps_python_api_version() - + for module_name in module_names: # Make Machinery loader = DynamicLoader(module_name,library,api_version) spec = importlib.util.spec_from_loader(module_name,loader) - + # Do the import module = importlib.util.module_from_spec(spec) sys.modules[module_name] = module diff --git a/python/lammps/mliap/mliap_unified_lj.py b/python/lammps/mliap/mliap_unified_lj.py index 87925e1e39..37e7170d4a 100644 --- a/python/lammps/mliap/mliap_unified_lj.py +++ b/python/lammps/mliap/mliap_unified_lj.py @@ -19,16 +19,16 @@ class MLIAPUnifiedLJ(MLIAPUnified): def compute_gradients(self, data): """Test compute_gradients.""" - + def compute_descriptors(self, data): """Test compute_descriptors.""" - + def compute_forces(self, data): """Test compute_forces.""" eij, fij = self.compute_pair_ef(data) data.update_pair_energy(eij) data.update_pair_forces(fij) - + def compute_pair_ef(self, data): rij = data.rij diff --git a/python/lammps/mliap/pytorch.py b/python/lammps/mliap/pytorch.py index d699c239b0..51b953fd61 100644 --- a/python/lammps/mliap/pytorch.py +++ b/python/lammps/mliap/pytorch.py @@ -80,10 +80,10 @@ class TorchWrapper(torch.nn.Module): n_params : torch.nn.Module (None) Number of NN model parameters - + device : torch.nn.Module (None) Accelerator device - + dtype : torch.dtype (torch.float64) Dtype to use on device """ @@ -325,6 +325,6 @@ class ElemwiseModels(torch.nn.Module): per_atom_attributes = torch.zeros(elems.size(dim=0), dtype=self.dtype) given_elems, elem_indices = torch.unique(elems, return_inverse=True) for i, elem in enumerate(given_elems): - self.subnets[elem].to(self.dtype) + self.subnets[elem].to(self.dtype) per_atom_attributes[elem_indices == i] = self.subnets[elem](descriptors[elem_indices == i]).flatten() return per_atom_attributes