From 3a6a91c078341cd427c616518526af4eaaabcf32 Mon Sep 17 00:00:00 2001 From: izayoi16 Date: Tue, 1 Nov 2022 11:41:06 +0800 Subject: [PATCH] remove sponge ops --- .../sponge/angle/angle_atom_energy_impl.cu | 66 - .../sponge/angle/angle_atom_energy_impl.cuh | 27 - .../sponge/angle/angle_energy_impl.cu | 63 - .../sponge/angle/angle_energy_impl.cuh | 27 - .../sponge/angle/angle_force_impl.cu | 86 - .../sponge/angle/angle_force_impl.cuh | 27 - .../angle_force_with_atom_energy_impl.cu | 90 - .../angle_force_with_atom_energy_impl.cuh | 28 - .../bond/bond_atom_energy_cuda_gpu_impl.cu | 57 - .../bond/bond_atom_energy_cuda_gpu_impl.cuh | 28 - .../sponge/bond/bond_energy_cuda_gpu_impl.cu | 61 - .../sponge/bond/bond_energy_cuda_gpu_impl.cuh | 28 - .../sponge/bond/bond_force_cuda_gpu_impl.cu | 63 - .../sponge/bond/bond_force_cuda_gpu_impl.cuh | 27 - ..._force_with_atom_energy_and_virial_impl.cu | 75 - ...force_with_atom_energy_and_virial_impl.cuh | 29 - .../bond/bond_force_with_atom_energy_impl.cu | 69 - .../bond/bond_force_with_atom_energy_impl.cuh | 28 - .../bond/bond_force_with_atom_virial_impl.cu | 69 - .../bond/bond_force_with_atom_virial_impl.cuh | 28 - .../sponge/common/atomcrdtocv_impl.cu | 123 - .../sponge/common/atomcrdtocv_impl.cuh | 27 - .../sponge/common/crd_to_uint_crd_impl.cu | 51 - .../sponge/common/crd_to_uint_crd_impl.cuh | 27 - .../common/crd_to_uint_crd_quarter_impl.cu | 54 - .../common/crd_to_uint_crd_quarter_impl.cuh | 27 - .../sponge/common/get_center_of_mass_impl.cu | 44 - .../sponge/common/get_center_of_mass_impl.cuh | 27 - .../cuda_impl/sponge/common/getcenter_impl.cu | 43 - .../sponge/common/getcenter_impl.cuh | 28 - .../sponge/common/map_center_of_mass_impl.cu | 51 - .../sponge/common/map_center_of_mass_impl.cuh | 28 - .../sponge/common/mdtemperature_impl.cu | 49 - .../sponge/common/mdtemperature_impl.cuh | 26 - .../sponge/common/total_c6_get_impl.cu | 48 - .../sponge/common/total_c6_get_impl.cuh | 26 - .../kernel/cuda_impl/sponge/common_sponge.cuh | 366 -- .../sponge/crdmcmap/cal_no_wrap_crd_impl.cu | 46 - .../sponge/crdmcmap/cal_no_wrap_crd_impl.cuh | 28 - .../crdmcmap/refresh_boxmaptimes_impl.cu | 47 - .../crdmcmap/refresh_boxmaptimes_impl.cuh | 28 - .../dihedral/dihedral_atom_energy_impl.cu | 84 - .../dihedral/dihedral_atom_energy_impl.cuh | 28 - .../sponge/dihedral/dihedral_energy_impl.cu | 81 - .../sponge/dihedral/dihedral_energy_impl.cuh | 28 - .../sponge/dihedral/dihedral_force_impl.cu | 121 - .../sponge/dihedral/dihedral_force_impl.cuh | 28 - .../dihedral_force_with_atom_energy_impl.cu | 125 - .../dihedral_force_with_atom_energy_impl.cuh | 29 - ...ce_with_lj_virial_direct_cf_energy_impl.cu | 144 - ...e_with_lj_virial_direct_cf_energy_impl.cuh | 34 - .../cuda_impl/sponge/lj/lj_energy_impl.cu | 102 - .../cuda_impl/sponge/lj/lj_energy_impl.cuh | 28 - .../cuda_impl/sponge/lj/lj_force_impl.cu | 117 - .../cuda_impl/sponge/lj/lj_force_impl.cuh | 29 - .../lj/lj_force_with_pme_direct_force_impl.cu | 133 - .../lj_force_with_pme_direct_force_impl.cuh | 30 - ..._pme_direct_force_with_atom_energy_impl.cu | 147 - ...pme_direct_force_with_atom_energy_impl.cuh | 31 - .../nb14/dihedral_14_cf_atom_energy_impl.cu | 78 - .../nb14/dihedral_14_cf_atom_energy_impl.cuh | 27 - .../sponge/nb14/dihedral_14_cf_energy_impl.cu | 78 - .../nb14/dihedral_14_cf_energy_impl.cuh | 27 - .../nb14/dihedral_14_lj_atom_energy_impl.cu | 102 - .../nb14/dihedral_14_lj_atom_energy_impl.cuh | 28 - ..._force_with_atom_energy_and_virial_impl.cu | 139 - ...force_with_atom_energy_and_virial_impl.cuh | 28 - ...al_14_lj_cf_force_with_atom_energy_impl.cu | 140 - ...l_14_lj_cf_force_with_atom_energy_impl.cuh | 30 - .../sponge/nb14/dihedral_14_lj_energy_impl.cu | 98 - .../nb14/dihedral_14_lj_energy_impl.cuh | 29 - .../sponge/nb14/dihedral_14_lj_force_impl.cu | 111 - .../sponge/nb14/dihedral_14_lj_force_impl.cuh | 28 - ...ihedral_14_lj_force_with_direct_cf_impl.cu | 124 - ...hedral_14_lj_force_with_direct_cf_impl.cuh | 29 - .../neighbor_list/neighbor_list_impl.cu | 644 --- .../neighbor_list/neighbor_list_impl.cuh | 82 - .../md_iteration_gradient_descent_impl.cu | 41 - .../md_iteration_gradient_descent_impl.cuh | 26 - .../nvtit/md_iteration_leap_frog_impl.cu | 54 - .../nvtit/md_iteration_leap_frog_impl.cuh | 31 - ...md_iteration_leap_frog_liujian_gpu_impl.cu | 67 - ...d_iteration_leap_frog_liujian_gpu_impl.cuh | 29 - ...ion_leap_frog_liujian_with_max_vel_impl.cu | 80 - ...on_leap_frog_liujian_with_max_vel_impl.cuh | 28 - ...d_iteration_leap_frog_with_max_vel_impl.cu | 44 - ..._iteration_leap_frog_with_max_vel_impl.cuh | 27 - ...d_iteration_setup_random_state_gpu_impl.cu | 28 - ..._iteration_setup_random_state_gpu_impl.cuh | 25 - .../cuda_impl/sponge/pme/fft_3d_impl.cu | 27 - .../cuda_impl/sponge/pme/fft_3d_impl.cuh | 28 - .../cuda_impl/sponge/pme/ifft_3d_impl.cu | 28 - .../cuda_impl/sponge/pme/ifft_3d_impl.cuh | 28 - .../sponge/pme/pme_batched_fft_2d_impl.cu | 29 - .../sponge/pme/pme_batched_fft_2d_impl.cuh | 28 - .../cuda_impl/sponge/pme/pme_common.cuh | 357 -- .../cuda_impl/sponge/pme/pme_energy_impl.cu | 85 - .../cuda_impl/sponge/pme/pme_energy_impl.cuh | 32 - .../sponge/pme/pme_energy_update_impl.cu | 90 - .../sponge/pme/pme_energy_update_impl.cuh | 34 - .../sponge/pme/pme_excluded_force_impl.cu | 103 - .../sponge/pme/pme_excluded_force_impl.cuh | 28 - .../cuda_impl/sponge/pme/pme_fft_1d_impl.cu | 29 - .../cuda_impl/sponge/pme/pme_fft_1d_impl.cuh | 27 - .../cuda_impl/sponge/pme/pme_fft_2d_impl.cu | 29 - .../cuda_impl/sponge/pme/pme_fft_2d_impl.cuh | 27 - .../cuda_impl/sponge/pme/pme_ifft_1d_impl.cu | 29 - .../cuda_impl/sponge/pme/pme_ifft_1d_impl.cuh | 27 - .../cuda_impl/sponge/pme/pme_ifft_2d_impl.cu | 29 - .../cuda_impl/sponge/pme/pme_ifft_2d_impl.cuh | 27 - .../cuda_impl/sponge/pme/pme_irfft_2d_impl.cu | 28 - .../sponge/pme/pme_irfft_2d_impl.cuh | 27 - .../sponge/pme/pme_reciprocal_force_impl.cu | 107 - .../sponge/pme/pme_reciprocal_force_impl.cuh | 34 - .../cuda_impl/sponge/pme/pme_rfft_2d_impl.cu | 28 - .../cuda_impl/sponge/pme/pme_rfft_2d_impl.cuh | 28 - .../sponge/restrain/restrain_energy_impl.cu | 45 - .../sponge/restrain/restrain_energy_impl.cuh | 27 - .../restrain_force_atom_energy_virial_impl.cu | 59 - ...restrain_force_atom_energy_virial_impl.cuh | 29 - .../sponge/restrain/restrain_force_impl.cu | 56 - .../sponge/restrain/restrain_force_impl.cuh | 27 - .../constrain_force_cycle_impl.cu | 82 - .../constrain_force_cycle_impl.cuh | 34 - .../constrain_force_cycle_with_virial_impl.cu | 78 - ...constrain_force_cycle_with_virial_impl.cuh | 34 - .../constrain_force_virial_impl.cu | 180 - .../constrain_force_virial_impl.cuh | 49 - .../simple_constrain/last_crd_to_dr_impl.cu | 84 - .../simple_constrain/last_crd_to_dr_impl.cuh | 29 - .../simple_constrain/refresh_crd_vel_impl.cu | 61 - .../simple_constrain/refresh_crd_vel_impl.cuh | 27 - .../simple_constrain/refresh_uint_crd_impl.cu | 63 - .../refresh_uint_crd_impl.cuh | 27 - .../plugin/device/gpu/kernel/sponge/OWNERS | 2 - .../sponge/angle/angle_atom_energy_kernel.cc | 88 - .../sponge/angle/angle_atom_energy_kernel.h | 62 - .../sponge/angle/angle_energy_kernel.cc | 33 - .../kernel/sponge/angle/angle_energy_kernel.h | 99 - .../kernel/sponge/angle/angle_force_kernel.cc | 33 - .../kernel/sponge/angle/angle_force_kernel.h | 100 - .../angle_force_with_atom_energy_kernel.cc | 34 - .../angle_force_with_atom_energy_kernel.h | 101 - .../bond/bond_atom_energy_cuda_gpu_kernel.cc | 32 - .../bond/bond_atom_energy_cuda_gpu_kernel.h | 102 - .../bond/bond_energy_cuda_gpu_kernel.cc | 32 - .../sponge/bond/bond_energy_cuda_gpu_kernel.h | 103 - .../sponge/bond/bond_force_cuda_gpu_kernel.cc | 32 - .../sponge/bond/bond_force_cuda_gpu_kernel.h | 103 - ...orce_with_atom_energy_and_virial_kernel.cc | 34 - ...force_with_atom_energy_and_virial_kernel.h | 105 - .../bond_force_with_atom_energy_kernel.cc | 33 - .../bond/bond_force_with_atom_energy_kernel.h | 103 - .../bond_force_with_atom_virial_kernel.cc | 33 - .../bond/bond_force_with_atom_virial_kernel.h | 103 - .../sponge/common/atomcrdtocv_kernel.cc | 32 - .../kernel/sponge/common/atomcrdtocv_kernel.h | 99 - .../sponge/common/crd_to_uint_crd_kernel.cc | 26 - .../sponge/common/crd_to_uint_crd_kernel.h | 79 - .../common/crd_to_uint_crd_quarter_kernel.cc | 26 - .../common/crd_to_uint_crd_quarter_kernel.h | 79 - .../common/get_center_of_mass_kernel.cc | 31 - .../sponge/common/get_center_of_mass_kernel.h | 94 - .../kernel/sponge/common/getcenter_kernel.cc | 26 - .../kernel/sponge/common/getcenter_kernel.h | 82 - .../common/map_center_of_mass_kernel.cc | 33 - .../sponge/common/map_center_of_mass_kernel.h | 99 - .../sponge/common/mdtemperature_kernel.cc | 30 - .../sponge/common/mdtemperature_kernel.h | 89 - .../sponge/common/total_c6_get_kernel.cc | 30 - .../sponge/common/total_c6_get_kernel.h | 79 - .../sponge/crdmcmap/cal_no_wrap_crd_kernel.cc | 33 - .../sponge/crdmcmap/cal_no_wrap_crd_kernel.h | 85 - .../crdmcmap/refresh_boxmaptimes_kernel.cc | 34 - .../crdmcmap/refresh_boxmaptimes_kernel.h | 89 - .../dihedral/dihedral_atom_energy_kernel.cc | 37 - .../dihedral/dihedral_atom_energy_kernel.h | 119 - .../sponge/dihedral/dihedral_energy_kernel.cc | 37 - .../sponge/dihedral/dihedral_energy_kernel.h | 119 - .../sponge/dihedral/dihedral_force_kernel.cc | 37 - .../sponge/dihedral/dihedral_force_kernel.h | 119 - .../dihedral_force_with_atom_energy_kernel.cc | 38 - .../dihedral_force_with_atom_energy_kernel.h | 121 - ..._with_lj_virial_direct_cf_energy_kernel.cc | 39 - ...e_with_lj_virial_direct_cf_energy_kernel.h | 135 - .../gpu/kernel/sponge/lj/lj_energy_kernel.cc | 33 - .../gpu/kernel/sponge/lj/lj_energy_kernel.h | 122 - .../gpu/kernel/sponge/lj/lj_force_kernel.cc | 33 - .../gpu/kernel/sponge/lj/lj_force_kernel.h | 121 - .../lj_force_with_pme_direct_force_kernel.cc | 33 - .../lj_force_with_pme_direct_force_kernel.h | 125 - ...rce_with_pme_direct_force_update_kernel.cc | 38 - ...orce_with_pme_direct_force_update_kernel.h | 144 - ..._force_with_virial_energy_update_kernel.cc | 40 - ...j_force_with_virial_energy_update_kernel.h | 145 - .../nb14/dihedral_14_cf_atom_energy_kernel.cc | 33 - .../nb14/dihedral_14_cf_atom_energy_kernel.h | 106 - .../nb14/dihedral_14_cf_energy_kernel.cc | 33 - .../nb14/dihedral_14_cf_energy_kernel.h | 114 - .../nb14/dihedral_14_lj_atom_energy_kernel.cc | 35 - .../nb14/dihedral_14_lj_atom_energy_kernel.h | 115 - ...orce_with_atom_energy_and_virial_kernel.cc | 38 - ...force_with_atom_energy_and_virial_kernel.h | 138 - ..._14_lj_cf_force_with_atom_energy_kernel.cc | 37 - ...l_14_lj_cf_force_with_atom_energy_kernel.h | 134 - .../nb14/dihedral_14_lj_energy_kernel.cc | 35 - .../nb14/dihedral_14_lj_energy_kernel.h | 126 - .../nb14/dihedral_14_lj_force_gpu_kernel.cc | 35 - .../nb14/dihedral_14_lj_force_gpu_kernel.h | 114 - ...edral_14_lj_force_with_direct_cf_kernel.cc | 36 - ...hedral_14_lj_force_with_direct_cf_kernel.h | 122 - .../neighbor_list_update_kernel.cc | 49 - .../neighbor_list_update_kernel.h | 198 - .../neighbor_list_update_new_kernel.cc | 49 - .../neighbor_list_update_new_kernel.h | 203 - .../md_iteration_gradient_descent_kernel.cc | 26 - .../md_iteration_gradient_descent_kernel.h | 70 - .../nvtit/md_iteration_leap_frog_kernel.cc | 31 - .../nvtit/md_iteration_leap_frog_kernel.h | 79 - ..._iteration_leap_frog_liujian_gpu_kernel.cc | 34 - ...d_iteration_leap_frog_liujian_gpu_kernel.h | 104 - ...ap_frog_liujian_gpu_with_max_vel_kernel.cc | 34 - ...eap_frog_liujian_gpu_with_max_vel_kernel.h | 105 - ...iteration_leap_frog_with_max_vel_kernel.cc | 31 - ..._iteration_leap_frog_with_max_vel_kernel.h | 79 - .../nvtit/md_iteration_setup_random_state.cc | 24 - .../nvtit/md_iteration_setup_random_state.h | 67 - .../gpu/kernel/sponge/pme/fft_3d_kernel.cc | 23 - .../gpu/kernel/sponge/pme/fft_3d_kernel.h | 77 - .../gpu/kernel/sponge/pme/ifft_3d_kernel.cc | 23 - .../gpu/kernel/sponge/pme/ifft_3d_kernel.h | 77 - .../sponge/pme/pme_batched_fft_2d_kernel.cc | 24 - .../sponge/pme/pme_batched_fft_2d_kernel.h | 82 - .../kernel/sponge/pme/pme_energy_kernel.cc | 36 - .../gpu/kernel/sponge/pme/pme_energy_kernel.h | 272 - .../sponge/pme/pme_energy_update_kernel.cc | 42 - .../sponge/pme/pme_energy_update_kernel.h | 305 -- .../sponge/pme/pme_excluded_force_kernel.cc | 31 - .../sponge/pme/pme_excluded_force_kernel.h | 93 - .../pme/pme_excluded_force_update_kernel.cc | 36 - .../pme/pme_excluded_force_update_kernel.h | 106 - .../kernel/sponge/pme/pme_fft_1d_kernel.cc | 23 - .../gpu/kernel/sponge/pme/pme_fft_1d_kernel.h | 73 - .../kernel/sponge/pme/pme_fft_2d_kernel.cc | 23 - .../gpu/kernel/sponge/pme/pme_fft_2d_kernel.h | 74 - .../kernel/sponge/pme/pme_ifft_1d_kernel.cc | 23 - .../kernel/sponge/pme/pme_ifft_1d_kernel.h | 73 - .../kernel/sponge/pme/pme_ifft_2d_kernel.cc | 23 - .../kernel/sponge/pme/pme_ifft_2d_kernel.h | 74 - .../kernel/sponge/pme/pme_irfft_2d_kernel.cc | 23 - .../kernel/sponge/pme/pme_irfft_2d_kernel.h | 75 - .../sponge/pme/pme_reciprocal_force_kernel.cc | 25 - .../sponge/pme/pme_reciprocal_force_kernel.h | 243 - .../pme/pme_reciprocal_force_update_kernel.cc | 32 - .../pme/pme_reciprocal_force_update_kernel.h | 273 - .../kernel/sponge/pme/pme_rfft_2d_kernel.cc | 23 - .../kernel/sponge/pme/pme_rfft_2d_kernel.h | 75 - .../sponge/restrain/restrain_energy_kernel.cc | 34 - .../sponge/restrain/restrain_energy_kernel.h | 95 - ...in_force_atom_energy_virial_impl_kernel.cc | 36 - ...restrain_force_atom_energy_virial_kernel.h | 100 - .../sponge/restrain/restrain_force_kernel.cc | 30 - .../sponge/restrain/restrain_force_kernel.h | 95 - .../constrain_force_cycle_kernel.cc | 33 - .../constrain_force_cycle_kernel.h | 119 - ...onstrain_force_cycle_with_virial_kernel.cc | 38 - ...constrain_force_cycle_with_virial_kernel.h | 120 - .../constrain_force_kernel.cc | 40 - .../simple_constrain/constrain_force_kernel.h | 154 - .../constrain_force_virial_kernel.cc | 41 - .../constrain_force_virial_kernel.h | 151 - .../simple_constrain/constrain_kernel.cc | 41 - .../simple_constrain/constrain_kernel.h | 168 - .../simple_constrain/last_crd_to_dr_kernel.cc | 36 - .../simple_constrain/last_crd_to_dr_kernel.h | 117 - .../refresh_crd_vel_kernel.cc | 34 - .../simple_constrain/refresh_crd_vel_kernel.h | 97 - .../refresh_uint_crd_kernel.cc | 33 - .../refresh_uint_crd_kernel.h | 93 - .../mindspore/ops/operations/__init__.py | 85 - .../mindspore/ops/operations/sponge_ops.py | 3498 ------------ .../ops/operations/sponge_update_ops.py | 2546 --------- tests/st/sponge/__init__.py | 0 tests/st/sponge/data/NVT_290_10ns.in | 15 - tests/st/sponge/data/WATER_ALA.parm7 | 4764 ----------------- .../sponge/data/WATER_ALA_350_cool_290.rst7 | 1152 ---- tests/st/sponge/functions/__init__.py | 0 tests/st/sponge/functions/angle_energy.py | 64 - .../functions/angle_force_with_atom_energy.py | 79 - tests/st/sponge/functions/bond_energy.py | 63 - .../functions/bond_force_with_atom_energy.py | 61 - ...bond_force_with_atom_energy_with_virial.py | 64 - tests/st/sponge/functions/common.py | 198 - tests/st/sponge/functions/constrain.py | 112 - .../sponge/functions/constrain_force_cycle.py | 51 - .../constrain_force_cycle_with_virial.py | 58 - tests/st/sponge/functions/crd_to_uint_crd.py | 37 - .../functions/crd_to_uint_crd_quarter.py | 37 - .../sponge/functions/dihedral_14_cf_energy.py | 60 - .../sponge/functions/dihedral_14_lj_energy.py | 77 - ...dihedral_14_ljcf_force_with_atom_energy.py | 98 - ...ljcf_force_with_atom_energy_with_virial.py | 104 - tests/st/sponge/functions/dihedral_energy.py | 78 - .../dihedral_force_with_atom_energy.py | 127 - tests/st/sponge/functions/last_crd_to_dr.py | 48 - tests/st/sponge/functions/lj_energy.py | 81 - .../functions/lj_force_pme_direct_force.py | 97 - .../functions/lj_force_with_virial_energy.py | 101 - .../md_iteration_gradient_descent.py | 38 - .../md_iteration_leap_frog_liujian.py | 66 - ...teration_leap_frog_liujian_with_max_vel.py | 72 - tests/st/sponge/functions/md_temperature.py | 70 - .../sponge/functions/neighbor_list_update.py | 197 - tests/st/sponge/functions/pme_common.py | 171 - tests/st/sponge/functions/pme_energy.py | 98 - .../st/sponge/functions/pme_energy_update.py | 66 - .../st/sponge/functions/pme_excluded_force.py | 116 - .../sponge/functions/pme_reciprocal_force.py | 102 - .../sponge/functions/refresh_boxmap_times.py | 40 - tests/st/sponge/functions/refresh_crd_vel.py | 45 - tests/st/sponge/functions/refresh_uint_crd.py | 47 - tests/st/sponge/simulation_np.py | 404 -- tests/st/sponge/sponge_cuda/__init__.py | 14 - tests/st/sponge/sponge_cuda/angle.py | 175 - tests/st/sponge/sponge_cuda/bond.py | 164 - tests/st/sponge/sponge_cuda/dihedral.py | 227 - .../sponge/sponge_cuda/langevin_liujian_md.py | 112 - tests/st/sponge/sponge_cuda/lennard_jones.py | 170 - tests/st/sponge/sponge_cuda/md_information.py | 330 -- tests/st/sponge/sponge_cuda/nb14.py | 176 - tests/st/sponge/sponge_cuda/neighbor_list.py | 182 - .../sponge/sponge_cuda/particle_mesh_ewald.py | 128 - tests/st/sponge/sponge_cuda/restrain.py | 66 - .../st/sponge/sponge_cuda/simple_constrain.py | 206 - .../sponge/sponge_cuda/system_information.py | 291 - tests/st/sponge/test_polypeptide.py | 34 - 336 files changed, 36415 deletions(-) delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/angle/angle_atom_energy_impl.cu delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/angle/angle_atom_energy_impl.cuh delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/angle/angle_energy_impl.cu delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/angle/angle_energy_impl.cuh delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/angle/angle_force_impl.cu delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/angle/angle_force_impl.cuh delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/angle/angle_force_with_atom_energy_impl.cu delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/angle/angle_force_with_atom_energy_impl.cuh delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/bond/bond_atom_energy_cuda_gpu_impl.cu delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/bond/bond_atom_energy_cuda_gpu_impl.cuh delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/bond/bond_energy_cuda_gpu_impl.cu delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/bond/bond_energy_cuda_gpu_impl.cuh delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/bond/bond_force_cuda_gpu_impl.cu delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/bond/bond_force_cuda_gpu_impl.cuh delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/bond/bond_force_with_atom_energy_and_virial_impl.cu delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/bond/bond_force_with_atom_energy_and_virial_impl.cuh delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/bond/bond_force_with_atom_energy_impl.cu delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/bond/bond_force_with_atom_energy_impl.cuh delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/bond/bond_force_with_atom_virial_impl.cu delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/bond/bond_force_with_atom_virial_impl.cuh delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/common/atomcrdtocv_impl.cu delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/common/atomcrdtocv_impl.cuh delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/common/crd_to_uint_crd_impl.cu delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/common/crd_to_uint_crd_impl.cuh delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/common/crd_to_uint_crd_quarter_impl.cu delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/common/crd_to_uint_crd_quarter_impl.cuh delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/common/get_center_of_mass_impl.cu delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/common/get_center_of_mass_impl.cuh delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/common/getcenter_impl.cu delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/common/getcenter_impl.cuh delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/common/map_center_of_mass_impl.cu delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/common/map_center_of_mass_impl.cuh delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/common/mdtemperature_impl.cu delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/common/mdtemperature_impl.cuh delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/common/total_c6_get_impl.cu delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/common/total_c6_get_impl.cuh delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/common_sponge.cuh delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/crdmcmap/cal_no_wrap_crd_impl.cu delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/crdmcmap/cal_no_wrap_crd_impl.cuh delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/crdmcmap/refresh_boxmaptimes_impl.cu delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/crdmcmap/refresh_boxmaptimes_impl.cuh delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/dihedral/dihedral_atom_energy_impl.cu delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/dihedral/dihedral_atom_energy_impl.cuh delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/dihedral/dihedral_energy_impl.cu delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/dihedral/dihedral_energy_impl.cuh delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/dihedral/dihedral_force_impl.cu delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/dihedral/dihedral_force_impl.cuh delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/dihedral/dihedral_force_with_atom_energy_impl.cu delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/dihedral/dihedral_force_with_atom_energy_impl.cuh delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/lj/lj_direct_cf_force_with_lj_virial_direct_cf_energy_impl.cu delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/lj/lj_direct_cf_force_with_lj_virial_direct_cf_energy_impl.cuh delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/lj/lj_energy_impl.cu delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/lj/lj_energy_impl.cuh delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/lj/lj_force_impl.cu delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/lj/lj_force_impl.cuh delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/lj/lj_force_with_pme_direct_force_impl.cu delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/lj/lj_force_with_pme_direct_force_impl.cuh delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/lj/lj_pme_direct_force_with_atom_energy_impl.cu delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/lj/lj_pme_direct_force_with_atom_energy_impl.cuh delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/nb14/dihedral_14_cf_atom_energy_impl.cu delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/nb14/dihedral_14_cf_atom_energy_impl.cuh delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/nb14/dihedral_14_cf_energy_impl.cu delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/nb14/dihedral_14_cf_energy_impl.cuh delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/nb14/dihedral_14_lj_atom_energy_impl.cu delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/nb14/dihedral_14_lj_atom_energy_impl.cuh delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/nb14/dihedral_14_lj_cf_force_with_atom_energy_and_virial_impl.cu delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/nb14/dihedral_14_lj_cf_force_with_atom_energy_and_virial_impl.cuh delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/nb14/dihedral_14_lj_cf_force_with_atom_energy_impl.cu delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/nb14/dihedral_14_lj_cf_force_with_atom_energy_impl.cuh delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/nb14/dihedral_14_lj_energy_impl.cu delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/nb14/dihedral_14_lj_energy_impl.cuh delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/nb14/dihedral_14_lj_force_impl.cu delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/nb14/dihedral_14_lj_force_impl.cuh delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/nb14/dihedral_14_lj_force_with_direct_cf_impl.cu delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/nb14/dihedral_14_lj_force_with_direct_cf_impl.cuh delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/neighbor_list/neighbor_list_impl.cu delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/neighbor_list/neighbor_list_impl.cuh delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/nvtit/md_iteration_gradient_descent_impl.cu delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/nvtit/md_iteration_gradient_descent_impl.cuh delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/nvtit/md_iteration_leap_frog_impl.cu delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/nvtit/md_iteration_leap_frog_impl.cuh delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/nvtit/md_iteration_leap_frog_liujian_gpu_impl.cu delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/nvtit/md_iteration_leap_frog_liujian_gpu_impl.cuh delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/nvtit/md_iteration_leap_frog_liujian_with_max_vel_impl.cu delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/nvtit/md_iteration_leap_frog_liujian_with_max_vel_impl.cuh delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/nvtit/md_iteration_leap_frog_with_max_vel_impl.cu delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/nvtit/md_iteration_leap_frog_with_max_vel_impl.cuh delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/nvtit/md_iteration_setup_random_state_gpu_impl.cu delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/nvtit/md_iteration_setup_random_state_gpu_impl.cuh delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/pme/fft_3d_impl.cu delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/pme/fft_3d_impl.cuh delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/pme/ifft_3d_impl.cu delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/pme/ifft_3d_impl.cuh delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/pme/pme_batched_fft_2d_impl.cu delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/pme/pme_batched_fft_2d_impl.cuh delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/pme/pme_common.cuh delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/pme/pme_energy_impl.cu delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/pme/pme_energy_impl.cuh delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/pme/pme_energy_update_impl.cu delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/pme/pme_energy_update_impl.cuh delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/pme/pme_excluded_force_impl.cu delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/pme/pme_excluded_force_impl.cuh delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/pme/pme_fft_1d_impl.cu delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/pme/pme_fft_1d_impl.cuh delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/pme/pme_fft_2d_impl.cu delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/pme/pme_fft_2d_impl.cuh delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/pme/pme_ifft_1d_impl.cu delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/pme/pme_ifft_1d_impl.cuh delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/pme/pme_ifft_2d_impl.cu delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/pme/pme_ifft_2d_impl.cuh delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/pme/pme_irfft_2d_impl.cu delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/pme/pme_irfft_2d_impl.cuh delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/pme/pme_reciprocal_force_impl.cu delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/pme/pme_reciprocal_force_impl.cuh delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/pme/pme_rfft_2d_impl.cu delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/pme/pme_rfft_2d_impl.cuh delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/restrain/restrain_energy_impl.cu delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/restrain/restrain_energy_impl.cuh delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/restrain/restrain_force_atom_energy_virial_impl.cu delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/restrain/restrain_force_atom_energy_virial_impl.cuh delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/restrain/restrain_force_impl.cu delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/restrain/restrain_force_impl.cuh delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/simple_constrain/constrain_force_cycle_impl.cu delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/simple_constrain/constrain_force_cycle_impl.cuh delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/simple_constrain/constrain_force_cycle_with_virial_impl.cu delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/simple_constrain/constrain_force_cycle_with_virial_impl.cuh delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/simple_constrain/constrain_force_virial_impl.cu delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/simple_constrain/constrain_force_virial_impl.cuh delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/simple_constrain/last_crd_to_dr_impl.cu delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/simple_constrain/last_crd_to_dr_impl.cuh delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/simple_constrain/refresh_crd_vel_impl.cu delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/simple_constrain/refresh_crd_vel_impl.cuh delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/simple_constrain/refresh_uint_crd_impl.cu delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/simple_constrain/refresh_uint_crd_impl.cuh delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/OWNERS delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/angle/angle_atom_energy_kernel.cc delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/angle/angle_atom_energy_kernel.h delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/angle/angle_energy_kernel.cc delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/angle/angle_energy_kernel.h delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/angle/angle_force_kernel.cc delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/angle/angle_force_kernel.h delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/angle/angle_force_with_atom_energy_kernel.cc delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/angle/angle_force_with_atom_energy_kernel.h delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/bond/bond_atom_energy_cuda_gpu_kernel.cc delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/bond/bond_atom_energy_cuda_gpu_kernel.h delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/bond/bond_energy_cuda_gpu_kernel.cc delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/bond/bond_energy_cuda_gpu_kernel.h delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/bond/bond_force_cuda_gpu_kernel.cc delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/bond/bond_force_cuda_gpu_kernel.h delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/bond/bond_force_with_atom_energy_and_virial_kernel.cc delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/bond/bond_force_with_atom_energy_and_virial_kernel.h delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/bond/bond_force_with_atom_energy_kernel.cc delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/bond/bond_force_with_atom_energy_kernel.h delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/bond/bond_force_with_atom_virial_kernel.cc delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/bond/bond_force_with_atom_virial_kernel.h delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/common/atomcrdtocv_kernel.cc delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/common/atomcrdtocv_kernel.h delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/common/crd_to_uint_crd_kernel.cc delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/common/crd_to_uint_crd_kernel.h delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/common/crd_to_uint_crd_quarter_kernel.cc delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/common/crd_to_uint_crd_quarter_kernel.h delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/common/get_center_of_mass_kernel.cc delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/common/get_center_of_mass_kernel.h delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/common/getcenter_kernel.cc delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/common/getcenter_kernel.h delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/common/map_center_of_mass_kernel.cc delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/common/map_center_of_mass_kernel.h delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/common/mdtemperature_kernel.cc delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/common/mdtemperature_kernel.h delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/common/total_c6_get_kernel.cc delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/common/total_c6_get_kernel.h delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/crdmcmap/cal_no_wrap_crd_kernel.cc delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/crdmcmap/cal_no_wrap_crd_kernel.h delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/crdmcmap/refresh_boxmaptimes_kernel.cc delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/crdmcmap/refresh_boxmaptimes_kernel.h delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/dihedral/dihedral_atom_energy_kernel.cc delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/dihedral/dihedral_atom_energy_kernel.h delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/dihedral/dihedral_energy_kernel.cc delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/dihedral/dihedral_energy_kernel.h delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/dihedral/dihedral_force_kernel.cc delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/dihedral/dihedral_force_kernel.h delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/dihedral/dihedral_force_with_atom_energy_kernel.cc delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/dihedral/dihedral_force_with_atom_energy_kernel.h delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/lj/lj_direct_cf_force_with_lj_virial_direct_cf_energy_kernel.cc delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/lj/lj_direct_cf_force_with_lj_virial_direct_cf_energy_kernel.h delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/lj/lj_energy_kernel.cc delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/lj/lj_energy_kernel.h delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/lj/lj_force_kernel.cc delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/lj/lj_force_kernel.h delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/lj/lj_force_with_pme_direct_force_kernel.cc delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/lj/lj_force_with_pme_direct_force_kernel.h delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/lj/lj_force_with_pme_direct_force_update_kernel.cc delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/lj/lj_force_with_pme_direct_force_update_kernel.h delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/lj/lj_force_with_virial_energy_update_kernel.cc delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/lj/lj_force_with_virial_energy_update_kernel.h delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/nb14/dihedral_14_cf_atom_energy_kernel.cc delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/nb14/dihedral_14_cf_atom_energy_kernel.h delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/nb14/dihedral_14_cf_energy_kernel.cc delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/nb14/dihedral_14_cf_energy_kernel.h delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/nb14/dihedral_14_lj_atom_energy_kernel.cc delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/nb14/dihedral_14_lj_atom_energy_kernel.h delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/nb14/dihedral_14_lj_cf_force_with_atom_energy_and_virial_kernel.cc delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/nb14/dihedral_14_lj_cf_force_with_atom_energy_and_virial_kernel.h delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/nb14/dihedral_14_lj_cf_force_with_atom_energy_kernel.cc delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/nb14/dihedral_14_lj_cf_force_with_atom_energy_kernel.h delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/nb14/dihedral_14_lj_energy_kernel.cc delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/nb14/dihedral_14_lj_energy_kernel.h delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/nb14/dihedral_14_lj_force_gpu_kernel.cc delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/nb14/dihedral_14_lj_force_gpu_kernel.h delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/nb14/dihedral_14_lj_force_with_direct_cf_kernel.cc delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/nb14/dihedral_14_lj_force_with_direct_cf_kernel.h delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/neighbor_list/neighbor_list_update_kernel.cc delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/neighbor_list/neighbor_list_update_kernel.h delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/neighbor_list/neighbor_list_update_new_kernel.cc delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/neighbor_list/neighbor_list_update_new_kernel.h delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/nvtit/md_iteration_gradient_descent_kernel.cc delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/nvtit/md_iteration_gradient_descent_kernel.h delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/nvtit/md_iteration_leap_frog_kernel.cc delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/nvtit/md_iteration_leap_frog_kernel.h delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/nvtit/md_iteration_leap_frog_liujian_gpu_kernel.cc delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/nvtit/md_iteration_leap_frog_liujian_gpu_kernel.h delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/nvtit/md_iteration_leap_frog_liujian_gpu_with_max_vel_kernel.cc delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/nvtit/md_iteration_leap_frog_liujian_gpu_with_max_vel_kernel.h delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/nvtit/md_iteration_leap_frog_with_max_vel_kernel.cc delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/nvtit/md_iteration_leap_frog_with_max_vel_kernel.h delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/nvtit/md_iteration_setup_random_state.cc delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/nvtit/md_iteration_setup_random_state.h delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/pme/fft_3d_kernel.cc delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/pme/fft_3d_kernel.h delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/pme/ifft_3d_kernel.cc delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/pme/ifft_3d_kernel.h delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/pme/pme_batched_fft_2d_kernel.cc delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/pme/pme_batched_fft_2d_kernel.h delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/pme/pme_energy_kernel.cc delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/pme/pme_energy_kernel.h delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/pme/pme_energy_update_kernel.cc delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/pme/pme_energy_update_kernel.h delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/pme/pme_excluded_force_kernel.cc delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/pme/pme_excluded_force_kernel.h delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/pme/pme_excluded_force_update_kernel.cc delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/pme/pme_excluded_force_update_kernel.h delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/pme/pme_fft_1d_kernel.cc delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/pme/pme_fft_1d_kernel.h delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/pme/pme_fft_2d_kernel.cc delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/pme/pme_fft_2d_kernel.h delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/pme/pme_ifft_1d_kernel.cc delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/pme/pme_ifft_1d_kernel.h delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/pme/pme_ifft_2d_kernel.cc delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/pme/pme_ifft_2d_kernel.h delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/pme/pme_irfft_2d_kernel.cc delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/pme/pme_irfft_2d_kernel.h delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/pme/pme_reciprocal_force_kernel.cc delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/pme/pme_reciprocal_force_kernel.h delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/pme/pme_reciprocal_force_update_kernel.cc delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/pme/pme_reciprocal_force_update_kernel.h delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/pme/pme_rfft_2d_kernel.cc delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/pme/pme_rfft_2d_kernel.h delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/restrain/restrain_energy_kernel.cc delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/restrain/restrain_energy_kernel.h delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/restrain/restrain_force_atom_energy_virial_impl_kernel.cc delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/restrain/restrain_force_atom_energy_virial_kernel.h delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/restrain/restrain_force_kernel.cc delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/restrain/restrain_force_kernel.h delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/simple_constrain/constrain_force_cycle_kernel.cc delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/simple_constrain/constrain_force_cycle_kernel.h delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/simple_constrain/constrain_force_cycle_with_virial_kernel.cc delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/simple_constrain/constrain_force_cycle_with_virial_kernel.h delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/simple_constrain/constrain_force_kernel.cc delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/simple_constrain/constrain_force_kernel.h delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/simple_constrain/constrain_force_virial_kernel.cc delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/simple_constrain/constrain_force_virial_kernel.h delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/simple_constrain/constrain_kernel.cc delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/simple_constrain/constrain_kernel.h delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/simple_constrain/last_crd_to_dr_kernel.cc delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/simple_constrain/last_crd_to_dr_kernel.h delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/simple_constrain/refresh_crd_vel_kernel.cc delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/simple_constrain/refresh_crd_vel_kernel.h delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/simple_constrain/refresh_uint_crd_kernel.cc delete mode 100644 mindspore/ccsrc/plugin/device/gpu/kernel/sponge/simple_constrain/refresh_uint_crd_kernel.h delete mode 100644 mindspore/python/mindspore/ops/operations/sponge_ops.py delete mode 100644 mindspore/python/mindspore/ops/operations/sponge_update_ops.py delete mode 100644 tests/st/sponge/__init__.py delete mode 100644 tests/st/sponge/data/NVT_290_10ns.in delete mode 100644 tests/st/sponge/data/WATER_ALA.parm7 delete mode 100644 tests/st/sponge/data/WATER_ALA_350_cool_290.rst7 delete mode 100644 tests/st/sponge/functions/__init__.py delete mode 100644 tests/st/sponge/functions/angle_energy.py delete mode 100644 tests/st/sponge/functions/angle_force_with_atom_energy.py delete mode 100644 tests/st/sponge/functions/bond_energy.py delete mode 100644 tests/st/sponge/functions/bond_force_with_atom_energy.py delete mode 100644 tests/st/sponge/functions/bond_force_with_atom_energy_with_virial.py delete mode 100644 tests/st/sponge/functions/common.py delete mode 100644 tests/st/sponge/functions/constrain.py delete mode 100644 tests/st/sponge/functions/constrain_force_cycle.py delete mode 100644 tests/st/sponge/functions/constrain_force_cycle_with_virial.py delete mode 100644 tests/st/sponge/functions/crd_to_uint_crd.py delete mode 100644 tests/st/sponge/functions/crd_to_uint_crd_quarter.py delete mode 100644 tests/st/sponge/functions/dihedral_14_cf_energy.py delete mode 100644 tests/st/sponge/functions/dihedral_14_lj_energy.py delete mode 100644 tests/st/sponge/functions/dihedral_14_ljcf_force_with_atom_energy.py delete mode 100644 tests/st/sponge/functions/dihedral_14_ljcf_force_with_atom_energy_with_virial.py delete mode 100644 tests/st/sponge/functions/dihedral_energy.py delete mode 100644 tests/st/sponge/functions/dihedral_force_with_atom_energy.py delete mode 100644 tests/st/sponge/functions/last_crd_to_dr.py delete mode 100644 tests/st/sponge/functions/lj_energy.py delete mode 100644 tests/st/sponge/functions/lj_force_pme_direct_force.py delete mode 100644 tests/st/sponge/functions/lj_force_with_virial_energy.py delete mode 100644 tests/st/sponge/functions/md_iteration_gradient_descent.py delete mode 100644 tests/st/sponge/functions/md_iteration_leap_frog_liujian.py delete mode 100644 tests/st/sponge/functions/md_iteration_leap_frog_liujian_with_max_vel.py delete mode 100644 tests/st/sponge/functions/md_temperature.py delete mode 100644 tests/st/sponge/functions/neighbor_list_update.py delete mode 100644 tests/st/sponge/functions/pme_common.py delete mode 100644 tests/st/sponge/functions/pme_energy.py delete mode 100644 tests/st/sponge/functions/pme_energy_update.py delete mode 100644 tests/st/sponge/functions/pme_excluded_force.py delete mode 100644 tests/st/sponge/functions/pme_reciprocal_force.py delete mode 100644 tests/st/sponge/functions/refresh_boxmap_times.py delete mode 100644 tests/st/sponge/functions/refresh_crd_vel.py delete mode 100644 tests/st/sponge/functions/refresh_uint_crd.py delete mode 100644 tests/st/sponge/simulation_np.py delete mode 100644 tests/st/sponge/sponge_cuda/__init__.py delete mode 100644 tests/st/sponge/sponge_cuda/angle.py delete mode 100644 tests/st/sponge/sponge_cuda/bond.py delete mode 100644 tests/st/sponge/sponge_cuda/dihedral.py delete mode 100644 tests/st/sponge/sponge_cuda/langevin_liujian_md.py delete mode 100644 tests/st/sponge/sponge_cuda/lennard_jones.py delete mode 100644 tests/st/sponge/sponge_cuda/md_information.py delete mode 100644 tests/st/sponge/sponge_cuda/nb14.py delete mode 100644 tests/st/sponge/sponge_cuda/neighbor_list.py delete mode 100644 tests/st/sponge/sponge_cuda/particle_mesh_ewald.py delete mode 100644 tests/st/sponge/sponge_cuda/restrain.py delete mode 100644 tests/st/sponge/sponge_cuda/simple_constrain.py delete mode 100644 tests/st/sponge/sponge_cuda/system_information.py delete mode 100644 tests/st/sponge/test_polypeptide.py diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/angle/angle_atom_energy_impl.cu b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/angle/angle_atom_energy_impl.cu deleted file mode 100644 index 4b5084008ba..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/angle/angle_atom_energy_impl.cu +++ /dev/null @@ -1,66 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "plugin/device/gpu/kernel/cuda_impl/sponge/angle/angle_atom_energy_impl.cuh" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/util.cuh" -#include "plugin/device/gpu/kernel/cuda_impl/sponge/common_sponge.cuh" - -__global__ void AngleAtomEnergyKernel(int angle_numbers, const UNSIGNED_INT_VECTOR *uint_crd, const VECTOR *scaler, - const int *atom_a, const int *atom_b, const int *atom_c, const float *angle_k, - const float *angle_theta0, float *atom_energy) { - int angle_i = blockDim.x * blockIdx.x + threadIdx.x; - if (angle_i < angle_numbers) { - int atom_i = atom_a[angle_i]; - int atom_j = atom_b[angle_i]; - int atom_k = atom_c[angle_i]; - - float theta0 = angle_theta0[angle_i]; - float k = angle_k[angle_i]; - - VECTOR drij = Get_Periodic_Displacement(uint_crd[atom_i], uint_crd[atom_j], scaler[0]); - VECTOR drkj = Get_Periodic_Displacement(uint_crd[atom_k], uint_crd[atom_j], scaler[0]); - - float rij_2 = 1. / (drij * drij); - float rkj_2 = 1. / (drkj * drkj); - float rij_1_rkj_1 = sqrtf(rij_2 * rkj_2); - - float costheta = drij * drkj * rij_1_rkj_1; - costheta = fmaxf(-0.999999, fminf(costheta, 0.999999)); - float theta = acosf(costheta); - - float dtheta = theta - theta0; - - atomicAdd(&atom_energy[atom_i], k * dtheta * dtheta); - } -} - -void AngleAtomEnergy(int angle_numbers, int atom_numbers, const int *uint_crd_f, const float *scaler_f, - const int *atom_a, const int *atom_b, const int *atom_c, const float *angle_k, - const float *angle_theta0, float *ene, cudaStream_t stream) { - Reset_List<<(atom_numbers) / 128), 128, 0, stream>>>(atom_numbers, ene, 0.); - size_t thread_per_block = 128; - size_t block_per_grid = ceilf(static_cast(angle_numbers) / 128); - UNSIGNED_INT_VECTOR *uint_crd = - const_cast(reinterpret_cast(uint_crd_f)); - VECTOR *scaler = const_cast(reinterpret_cast(scaler_f)); - - AngleAtomEnergyKernel<<>>(angle_numbers, uint_crd, scaler, atom_a, - atom_b, atom_c, angle_k, angle_theta0, ene); - return; -} -void AngleAtomEnergy(int angle_numbers, int atom_numbers, const int *uint_crd_f, const float *scaler_f, - const int *atom_a, const int *atom_b, const int *atom_c, const float *angle_k, - const float *angle_theta0, float *ene, cudaStream_t stream); diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/angle/angle_atom_energy_impl.cuh b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/angle/angle_atom_energy_impl.cuh deleted file mode 100644 index 538652a1bc2..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/angle/angle_atom_energy_impl.cuh +++ /dev/null @@ -1,27 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_ANGLE_ANGLE_ATOM_ENERGY_IMPL_H_ -#define MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_ANGLE_ANGLE_ATOM_ENERGY_IMPL_H_ - -#include -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_device_info.h" - -CUDA_LIB_EXPORT void AngleAtomEnergy(int angle_numbers, int atom_numbers, const int *uint_crd_f, const float *scaler_f, - const int *atom_a, const int *atom_b, const int *atom_c, const float *angle_k, - const float *angle_theta0, float *ene, cudaStream_t stream); -#endif diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/angle/angle_energy_impl.cu b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/angle/angle_energy_impl.cu deleted file mode 100644 index c57befff8eb..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/angle/angle_energy_impl.cu +++ /dev/null @@ -1,63 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "plugin/device/gpu/kernel/cuda_impl/sponge/angle/angle_energy_impl.cuh" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/util.cuh" -#include "plugin/device/gpu/kernel/cuda_impl/sponge/common_sponge.cuh" - -__global__ void AngleEnergyKernel(int angle_numbers, const UNSIGNED_INT_VECTOR *uint_crd, const VECTOR *scaler, - const int *atom_a, const int *atom_b, const int *atom_c, const float *angle_k, - const float *angle_theta0, float *angle_energy) { - int angle_i = blockDim.x * blockIdx.x + threadIdx.x; - if (angle_i < angle_numbers) { - int atom_i = atom_a[angle_i]; - int atom_j = atom_b[angle_i]; - int atom_k = atom_c[angle_i]; - - float theta0 = angle_theta0[angle_i]; - float k = angle_k[angle_i]; - - VECTOR drij = Get_Periodic_Displacement(uint_crd[atom_i], uint_crd[atom_j], scaler[0]); - VECTOR drkj = Get_Periodic_Displacement(uint_crd[atom_k], uint_crd[atom_j], scaler[0]); - - float rij_2 = 1. / (drij * drij); - float rkj_2 = 1. / (drkj * drkj); - float rij_1_rkj_1 = sqrtf(rij_2 * rkj_2); - - float costheta = drij * drkj * rij_1_rkj_1; - costheta = fmaxf(-0.999999, fminf(costheta, 0.999999)); - float theta = acosf(costheta); - - float dtheta = theta - theta0; - - angle_energy[angle_i] = k * dtheta * dtheta; - } -} - -void AngleEnergy(int angle_numbers, const int *uint_crd_f, const float *scaler_f, const int *atom_a, const int *atom_b, - const int *atom_c, const float *angle_k, const float *angle_theta0, float *ene, cudaStream_t stream) { - size_t thread_per_block = 128; - size_t block_per_grid = ceilf(static_cast(angle_numbers) / 128); - UNSIGNED_INT_VECTOR *uint_crd = - const_cast(reinterpret_cast(uint_crd_f)); - VECTOR *scaler = const_cast(reinterpret_cast(scaler_f)); - - AngleEnergyKernel<<>>(angle_numbers, uint_crd, scaler, atom_a, atom_b, - atom_c, angle_k, angle_theta0, ene); - return; -} -void AngleEnergy(int angle_numbers, const int *uint_crd_f, const float *scaler_f, const int *atom_a, const int *atom_b, - const int *atom_c, const float *angle_k, const float *angle_theta0, float *ene, cudaStream_t stream); diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/angle/angle_energy_impl.cuh b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/angle/angle_energy_impl.cuh deleted file mode 100644 index b1eeb53a72b..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/angle/angle_energy_impl.cuh +++ /dev/null @@ -1,27 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_ANGLE_ANGLE_ENERGY_IMPL_H_ -#define MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_ANGLE_ANGLE_ENERGY_IMPL_H_ - -#include -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_device_info.h" - -CUDA_LIB_EXPORT void AngleEnergy(int angle_numbers, const int *uint_crd_f, const float *scaler_f, const int *atom_a, - const int *atom_b, const int *atom_c, const float *angle_k, const float *angle_theta0, - float *ene, cudaStream_t stream); -#endif diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/angle/angle_force_impl.cu b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/angle/angle_force_impl.cu deleted file mode 100644 index 6457de26a8f..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/angle/angle_force_impl.cu +++ /dev/null @@ -1,86 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "plugin/device/gpu/kernel/cuda_impl/sponge/angle/angle_force_impl.cuh" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/util.cuh" -#include "plugin/device/gpu/kernel/cuda_impl/sponge/common_sponge.cuh" - -__global__ void AngleForceKernel(int angle_numbers, const UNSIGNED_INT_VECTOR *uint_crd, const VECTOR *scaler, - const int *atom_a, const int *atom_b, const int *atom_c, const float *angle_k, - const float *angle_theta0, VECTOR *frc) { - int angle_i = blockDim.x * blockIdx.x + threadIdx.x; - if (angle_i < angle_numbers) { - int atom_i = atom_a[angle_i]; - int atom_j = atom_b[angle_i]; - int atom_k = atom_c[angle_i]; - - float theta0 = angle_theta0[angle_i]; - float k = angle_k[angle_i]; - - VECTOR drij = Get_Periodic_Displacement(uint_crd[atom_i], uint_crd[atom_j], scaler[0]); - VECTOR drkj = Get_Periodic_Displacement(uint_crd[atom_k], uint_crd[atom_j], scaler[0]); - - float rij_2 = 1. / (drij * drij); - float rkj_2 = 1. / (drkj * drkj); - float rij_1_rkj_1 = sqrtf(rij_2 * rkj_2); - - float costheta = drij * drkj * rij_1_rkj_1; - costheta = fmaxf(-0.999999, fminf(costheta, 0.999999)); - float theta = acosf(costheta); - - float dtheta = theta - theta0; - k = -2 * k * dtheta / sinf(theta); - - float common_factor_cross = k * rij_1_rkj_1; - float common_factor_self = k * costheta; - - VECTOR fi = common_factor_self * rij_2 * drij - common_factor_cross * drkj; - VECTOR fk = common_factor_self * rkj_2 * drkj - common_factor_cross * drij; - - atomicAdd(&frc[atom_i].x, fi.x); - atomicAdd(&frc[atom_i].y, fi.y); - atomicAdd(&frc[atom_i].z, fi.z); - - atomicAdd(&frc[atom_k].x, fk.x); - atomicAdd(&frc[atom_k].y, fk.y); - atomicAdd(&frc[atom_k].z, fk.z); - - fi = -fi - fk; - - atomicAdd(&frc[atom_j].x, fi.x); - atomicAdd(&frc[atom_j].y, fi.y); - atomicAdd(&frc[atom_j].z, fi.z); - } -} - -void AngleForce(int angle_numbers, int atom_numbers, const int *uint_crd_f, const float *scaler_f, const int *atom_a, - const int *atom_b, const int *atom_c, const float *angle_k, const float *angle_theta0, float *frc_f, - cudaStream_t stream) { - Reset_List<<(3. * atom_numbers) / 128), 128, 0, stream>>>(3 * atom_numbers, frc_f, 0.); - size_t thread_per_block = 128; - size_t block_per_grid = ceilf(static_cast(angle_numbers) / 128); - UNSIGNED_INT_VECTOR *uint_crd = - const_cast(reinterpret_cast(uint_crd_f)); - VECTOR *frc = const_cast(reinterpret_cast(frc_f)); - VECTOR *scaler = const_cast(reinterpret_cast(scaler_f)); - - AngleForceKernel<<>>(angle_numbers, uint_crd, scaler, atom_a, atom_b, - atom_c, angle_k, angle_theta0, frc); - return; -} -void AngleForce(int angle_numbers, int atom_numbers, const int *uint_crd_f, const float *scaler_f, const int *atom_a, - const int *atom_b, const int *atom_c, const float *angle_k, const float *angle_theta0, float *frc_f, - cudaStream_t stream); diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/angle/angle_force_impl.cuh b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/angle/angle_force_impl.cuh deleted file mode 100644 index 15314403924..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/angle/angle_force_impl.cuh +++ /dev/null @@ -1,27 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_ANGLE_ANGLE_FORCE_IMPL_H_ -#define MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_ANGLE_ANGLE_FORCE_IMPL_H_ - -#include -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_device_info.h" - -CUDA_LIB_EXPORT void AngleForce(int angle_numbers, int atom_numbers, const int *uint_crd_f, const float *scaler_f, - const int *atom_a, const int *atom_b, const int *atom_c, const float *angle_k, - const float *angle_theta0, float *frc_f, cudaStream_t stream); -#endif diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/angle/angle_force_with_atom_energy_impl.cu b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/angle/angle_force_with_atom_energy_impl.cu deleted file mode 100644 index b186683585a..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/angle/angle_force_with_atom_energy_impl.cu +++ /dev/null @@ -1,90 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "plugin/device/gpu/kernel/cuda_impl/sponge/angle/angle_force_with_atom_energy_impl.cuh" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/util.cuh" -#include "plugin/device/gpu/kernel/cuda_impl/sponge/common_sponge.cuh" - -__global__ void AngleForceWithAtomEnergyKernel(int angle_numbers, const UNSIGNED_INT_VECTOR *uint_crd, - const VECTOR *scaler, const int *atom_a, const int *atom_b, - const int *atom_c, const float *angle_k, const float *angle_theta0, - VECTOR *frc, float *atom_energy) { - int angle_i = blockDim.x * blockIdx.x + threadIdx.x; - if (angle_i < angle_numbers) { - int atom_i = atom_a[angle_i]; - int atom_j = atom_b[angle_i]; - int atom_k = atom_c[angle_i]; - - float theta0 = angle_theta0[angle_i]; - float k = angle_k[angle_i]; - float k2 = k; - - VECTOR drij = Get_Periodic_Displacement(uint_crd[atom_i], uint_crd[atom_j], scaler[0]); - VECTOR drkj = Get_Periodic_Displacement(uint_crd[atom_k], uint_crd[atom_j], scaler[0]); - - float rij_2 = 1. / (drij * drij); - float rkj_2 = 1. / (drkj * drkj); - float rij_1_rkj_1 = sqrtf(rij_2 * rkj_2); - - float costheta = drij * drkj * rij_1_rkj_1; - costheta = fmaxf(-0.999999, fminf(costheta, 0.999999)); - float theta = acosf(costheta); - - float dtheta = theta - theta0; - k = -2 * k * dtheta / sinf(theta); - - float common_factor_cross = k * rij_1_rkj_1; - float common_factor_self = k * costheta; - - VECTOR fi = common_factor_self * rij_2 * drij - common_factor_cross * drkj; - VECTOR fk = common_factor_self * rkj_2 * drkj - common_factor_cross * drij; - - atomicAdd(&frc[atom_i].x, fi.x); - atomicAdd(&frc[atom_i].y, fi.y); - atomicAdd(&frc[atom_i].z, fi.z); - - atomicAdd(&frc[atom_k].x, fk.x); - atomicAdd(&frc[atom_k].y, fk.y); - atomicAdd(&frc[atom_k].z, fk.z); - - fi = -fi - fk; - - atomicAdd(&frc[atom_j].x, fi.x); - atomicAdd(&frc[atom_j].y, fi.y); - atomicAdd(&frc[atom_j].z, fi.z); - - atomicAdd(&atom_energy[atom_i], k2 * dtheta * dtheta); - } -} - -void AngleForceWithAtomEnergy(int angle_numbers, int atom_numbers, const int *uint_crd_f, const float *scaler_f, - const int *atom_a, const int *atom_b, const int *atom_c, const float *angle_k, - const float *angle_theta0, float *frc_f, float *ene, cudaStream_t stream) { - Reset_List<<(3. * atom_numbers) / 128), 128, 0, stream>>>(3 * atom_numbers, frc_f, 0.); - size_t thread_per_block = 128; - size_t block_per_grid = ceilf(static_cast(angle_numbers) / 128); - UNSIGNED_INT_VECTOR *uint_crd = - const_cast(reinterpret_cast(uint_crd_f)); - VECTOR *frc = const_cast(reinterpret_cast(frc_f)); - VECTOR *scaler = const_cast(reinterpret_cast(scaler_f)); - - AngleForceWithAtomEnergyKernel<<>>( - angle_numbers, uint_crd, scaler, atom_a, atom_b, atom_c, angle_k, angle_theta0, frc, ene); - return; -} -void AngleForceWithAtomEnergy(int angle_numbers, int atom_numbers, const int *uint_crd_f, const float *scaler_f, - const int *atom_a, const int *atom_b, const int *atom_c, const float *angle_k, - const float *angle_theta0, float *frc_f, float *ene, cudaStream_t stream); diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/angle/angle_force_with_atom_energy_impl.cuh b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/angle/angle_force_with_atom_energy_impl.cuh deleted file mode 100644 index ac2445b52dc..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/angle/angle_force_with_atom_energy_impl.cuh +++ /dev/null @@ -1,28 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_ANGLE_ANGLE_FORCE_WITH_ATOM_ENERGY_IMPL_H_ -#define MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_ANGLE_ANGLE_FORCE_WITH_ATOM_ENERGY_IMPL_H_ - -#include -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_device_info.h" - -CUDA_LIB_EXPORT void AngleForceWithAtomEnergy(int angle_numbers, int atom_numbers, const int *uint_crd_f, - const float *scaler_f, const int *atom_a, const int *atom_b, - const int *atom_c, const float *angle_k, const float *angle_theta0, - float *frc_f, float *ene, cudaStream_t stream); -#endif diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/bond/bond_atom_energy_cuda_gpu_impl.cu b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/bond/bond_atom_energy_cuda_gpu_impl.cu deleted file mode 100644 index bab1f905c68..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/bond/bond_atom_energy_cuda_gpu_impl.cu +++ /dev/null @@ -1,57 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "plugin/device/gpu/kernel/cuda_impl/sponge/bond/bond_atom_energy_cuda_gpu_impl.cuh" -#include "plugin/device/gpu/kernel/cuda_impl/sponge/common_sponge.cuh" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/util.cuh" - -__global__ void BondAtomEnergyCudaKernel(const int bond_numbers, const UNSIGNED_INT_VECTOR *uint_crd, - const VECTOR *scaler, const int *atom_a, const int *atom_b, - const float *bond_k, const float *bond_r0, float *atom_ene) { - int bond_i = blockDim.x * blockIdx.x + threadIdx.x; - if (bond_i < bond_numbers) { - int atom_i = atom_a[bond_i]; - int atom_j = atom_b[bond_i]; - - float k = bond_k[bond_i]; - float r0 = bond_r0[bond_i]; - - VECTOR dr = Get_Periodic_Displacement(uint_crd[atom_i], uint_crd[atom_j], scaler[0]); - - float r1 = norm3df(dr.x, dr.y, dr.z); - float tempf = r1 - r0; - - atomicAdd(&atom_ene[atom_i], k * tempf * tempf); - } -} - -void BondAtomEnergy(int bond_numbers, int atom_numbers, const int *uint_crd_f, const float *scaler_f, const int *atom_a, - const int *atom_b, const float *bond_k, const float *bond_r0, float *atom_ene, - cudaStream_t stream) { - Reset_List<<(atom_numbers) / 128), 128, 0, stream>>>(atom_numbers, atom_ene, 0.); - size_t thread_per_block = 128; - size_t block_per_grid = ceilf(static_cast(bond_numbers) / 128); - UNSIGNED_INT_VECTOR *uint_crd = - const_cast(reinterpret_cast(uint_crd_f)); - VECTOR *scaler = const_cast(reinterpret_cast(scaler_f)); - - BondAtomEnergyCudaKernel<<>>(bond_numbers, uint_crd, scaler, atom_a, - atom_b, bond_k, bond_r0, atom_ene); - return; -} - -void BondAtomEnergy(int bond_numbers, int atom_numbers, const int *uint_crd_f, const float *scaler_f, const int *atom_a, - const int *atom_b, const float *bond_k, const float *bond_r0, float *atom_ene, cudaStream_t stream); diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/bond/bond_atom_energy_cuda_gpu_impl.cuh b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/bond/bond_atom_energy_cuda_gpu_impl.cuh deleted file mode 100644 index 96f97609e87..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/bond/bond_atom_energy_cuda_gpu_impl.cuh +++ /dev/null @@ -1,28 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_BOND_BOND_ATOM_ENERGY_CUDA_GPU_IMPL_H_ -#define MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_BOND_BOND_ATOM_ENERGY_GPU_IMPL_H_ - -#include -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_device_info.h" - -CUDA_LIB_EXPORT void BondAtomEnergy(int bond_numbers, int atom_numbers, const int *uint_crd_f, const float *scaler_f, - const int *atom_a, const int *atom_b, const float *bond_k, const float *bond_r0, - float *atom_ene, cudaStream_t stream); - -#endif // MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_BOND_ATOM_ENERGY_GPU_IMPL_H_ diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/bond/bond_energy_cuda_gpu_impl.cu b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/bond/bond_energy_cuda_gpu_impl.cu deleted file mode 100644 index bb1007036c6..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/bond/bond_energy_cuda_gpu_impl.cu +++ /dev/null @@ -1,61 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "plugin/device/gpu/kernel/cuda_impl/sponge/bond/bond_energy_cuda_gpu_impl.cuh" -#include "plugin/device/gpu/kernel/cuda_impl/sponge/common_sponge.cuh" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/util.cuh" - -__global__ void BondEnergyCudaKernel(const int bond_numbers, const UNSIGNED_INT_VECTOR *uint_crd, const VECTOR *scaler, - const int *atom_a, const int *atom_b, const float *bond_k, const float *bond_r0, - float *bond_ene) { - int bond_i = blockDim.x * blockIdx.x + threadIdx.x; - if (bond_i < bond_numbers) { - int atom_i = atom_a[bond_i]; - int atom_j = atom_b[bond_i]; - - float k = bond_k[bond_i]; - float r0 = bond_r0[bond_i]; - - VECTOR dr = Get_Periodic_Displacement(uint_crd[atom_i], uint_crd[atom_j], scaler[0]); - - float r1 = norm3df(dr.x, dr.y, dr.z); - float tempf = r1 - r0; - float temp = k * tempf * tempf; - bond_ene[bond_i] = temp; - } -} - -void BondEnergy(int bond_numbers, int atom_numbers, const unsigned int *uint_crd_f, const float *scaler_f, - const int *atom_a, const int *atom_b, const float *bond_k, const float *bond_r0, float *bond_ene, - cudaStream_t stream) { - size_t thread_per_block = 128; - size_t block_per_grid = ceilf(static_cast(bond_numbers) / 128); - UNSIGNED_INT_VECTOR *uint_crd = - const_cast(reinterpret_cast(uint_crd_f)); - VECTOR *scaler = const_cast(reinterpret_cast(scaler_f)); - - Reset_List<<<(unsigned int)ceilf(static_cast(bond_numbers) / 128), 128, 0, stream>>>(bond_numbers, - bond_ene, 0.); - - BondEnergyCudaKernel<<>>(bond_numbers, uint_crd, scaler, - atom_a, atom_b, bond_k, bond_r0, - bond_ene); - return; -} - -void BondEnergy(int bond_numbers, int atom_numbers, const unsigned int *uint_crd_f, const float *scaler_f, - const int *atom_a, const int *atom_b, const float *bond_k, const float *bond_r0, - float *bond_ene, cudaStream_t stream); diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/bond/bond_energy_cuda_gpu_impl.cuh b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/bond/bond_energy_cuda_gpu_impl.cuh deleted file mode 100644 index cd728d86d4c..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/bond/bond_energy_cuda_gpu_impl.cuh +++ /dev/null @@ -1,28 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_BOND_BOND_ENERGY_CUDA_GPU_IMPL_H_ -#define MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_BOND_BOND_ENERGY_CUDA_GPU_IMPL_H_ - -#include -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_device_info.h" - -CUDA_LIB_EXPORT void BondEnergy(int bond_numbers, int atom_numbers, const unsigned int *uint_crd_f, - const float *scaler_f, const int *atom_a, const int *atom_b, const float *bond_k, - const float *bond_r0, float *bond_ene, cudaStream_t stream); - -#endif // MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_BOND_ENERGY_CUDA_GPU_IMPL_H_ diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/bond/bond_force_cuda_gpu_impl.cu b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/bond/bond_force_cuda_gpu_impl.cu deleted file mode 100644 index d3ae9f8f47b..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/bond/bond_force_cuda_gpu_impl.cu +++ /dev/null @@ -1,63 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "plugin/device/gpu/kernel/cuda_impl/sponge/bond/bond_force_cuda_gpu_impl.cuh" -#include "plugin/device/gpu/kernel/cuda_impl/sponge/common_sponge.cuh" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/util.cuh" - -__global__ void BondForceCudaKernel(int bond_numbers, const UNSIGNED_INT_VECTOR *uint_crd, const VECTOR *scaler, - const int *atom_a, const int *atom_b, const float *bond_k, const float *bond_r0, - VECTOR *frc) { - int bond_i = blockDim.x * blockIdx.x + threadIdx.x; - if (bond_i < bond_numbers) { - int atom_i = atom_a[bond_i]; - int atom_j = atom_b[bond_i]; - - float k = bond_k[bond_i]; - float r0 = bond_r0[bond_i]; - VECTOR dr = Get_Periodic_Displacement(uint_crd[atom_i], uint_crd[atom_j], scaler[0]); - float r_1 = rnorm3df(dr.x, dr.y, dr.z); - float tempf = 1.0 - r0 * r_1; - - VECTOR f = 2 * tempf * k * dr; - atomicAdd(&frc[atom_i].x, -f.x); - atomicAdd(&frc[atom_i].y, -f.y); - atomicAdd(&frc[atom_i].z, -f.z); - - atomicAdd(&frc[atom_j].x, f.x); - atomicAdd(&frc[atom_j].y, f.y); - atomicAdd(&frc[atom_j].z, f.z); - } -} - -void BondForce(int bond_numbers, int atom_numbers, const unsigned int *uint_crd_f, const float *scaler_f, - const int *atom_a, const int *atom_b, const float *bond_k, const float *bond_r0, float *frc_f, - cudaStream_t stream) { - Reset_List<<(3. * atom_numbers) / 128), 128, 0, stream>>>(3 * atom_numbers, frc_f, 0.); - size_t thread_per_block = 128; - size_t block_per_grid = ceilf(static_cast(bond_numbers) / 128); - UNSIGNED_INT_VECTOR *uint_crd = - const_cast(reinterpret_cast(uint_crd_f)); - VECTOR *scaler = const_cast(reinterpret_cast(scaler_f)); - VECTOR *frc = const_cast(reinterpret_cast(frc_f)); - BondForceCudaKernel<<>>(bond_numbers, uint_crd, scaler, atom_a, atom_b, - bond_k, bond_r0, frc); - return; -} - -void BondForce(int bond_numbers, int atom_numbers, const unsigned int *uint_crd_f, const float *scaler_f, - const int *atom_a, const int *atom_b, const float *bond_k, const float *bond_r0, - float *frc_f, cudaStream_t stream); diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/bond/bond_force_cuda_gpu_impl.cuh b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/bond/bond_force_cuda_gpu_impl.cuh deleted file mode 100644 index 5e9899a8cb9..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/bond/bond_force_cuda_gpu_impl.cuh +++ /dev/null @@ -1,27 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_BOND_BOND_FORCE_CUDA_GPU_IMPL_H_ -#define MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_BOND_BOND_FORCE_CUDA_GPU_IMPL_H_ - -#include -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_device_info.h" - -CUDA_LIB_EXPORT void BondForce(int bond_numbers, int atom_numbers, const unsigned int *uint_crd_f, - const float *scaler_f, const int *atom_a, const int *atom_b, const float *bond_k, - const float *bond_r0, float *frc_f, cudaStream_t stream); -#endif // MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_BOND_FORCE_CUDA_GPU_IMPL_H_ diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/bond/bond_force_with_atom_energy_and_virial_impl.cu b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/bond/bond_force_with_atom_energy_and_virial_impl.cu deleted file mode 100644 index fab62a62d90..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/bond/bond_force_with_atom_energy_and_virial_impl.cu +++ /dev/null @@ -1,75 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "plugin/device/gpu/kernel/cuda_impl/sponge/bond/bond_force_with_atom_energy_and_virial_impl.cuh" -#include "plugin/device/gpu/kernel/cuda_impl/sponge/common_sponge.cuh" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/util.cuh" - -__global__ void BondForceWithAtomEnergyAndVirialKernel(const int bond_numbers, const UNSIGNED_INT_VECTOR *uint_crd, - const VECTOR *scaler, const int *atom_a, const int *atom_b, - const float *bond_k, const float *bond_r0, VECTOR *frc, - float *atom_energy, float *atom_virial) { - int bond_i = blockDim.x * blockIdx.x + threadIdx.x; - if (bond_i < bond_numbers) { - int atom_i = atom_a[bond_i]; - int atom_j = atom_b[bond_i]; - - float k = bond_k[bond_i]; - float r0 = bond_r0[bond_i]; - - VECTOR dr = Get_Periodic_Displacement(uint_crd[atom_i], uint_crd[atom_j], scaler[0]); - float abs_r = norm3df(dr.x, dr.y, dr.z); - float r_1 = 1. / abs_r; - - float tempf2 = abs_r - r0; - float tempf = 2 * tempf2 * k; - VECTOR f = tempf * r_1 * dr; - - atomicAdd(&frc[atom_i].x, -f.x); - atomicAdd(&frc[atom_i].y, -f.y); - atomicAdd(&frc[atom_i].z, -f.z); - - atomicAdd(&frc[atom_j].x, f.x); - atomicAdd(&frc[atom_j].y, f.y); - atomicAdd(&frc[atom_j].z, f.z); - - atomicAdd(&atom_virial[atom_i], -tempf * abs_r); - atomicAdd(&atom_energy[atom_i], k * tempf2 * tempf2); - } -} - -void BondForceWithAtomEnergyAndVirial(int bond_numbers, int atom_numbers, const unsigned int *uint_crd_f, - const float *scaler_f, const int *atom_a, const int *atom_b, const float *bond_k, - const float *bond_r0, float *frc_f, float *atom_energy, float *atom_v, - cudaStream_t stream) { - Reset_List<<(3. * atom_numbers) / 128), 128, 0, stream>>>(3 * atom_numbers, frc_f, 0.); - Reset_List<<(atom_numbers) / 128), 128, 0, stream>>>(atom_numbers, atom_v, 0.); - Reset_List<<(atom_numbers) / 128), 128, 0, stream>>>(atom_numbers, atom_energy, 0.); - size_t thread_per_block = 128; - size_t block_per_grid = ceilf(static_cast(bond_numbers) / 128); - UNSIGNED_INT_VECTOR *uint_crd = - const_cast(reinterpret_cast(uint_crd_f)); - VECTOR *frc = const_cast(reinterpret_cast(frc_f)); - VECTOR *scaler = const_cast(reinterpret_cast(scaler_f)); - BondForceWithAtomEnergyAndVirialKernel<<>>( - bond_numbers, uint_crd, scaler, atom_a, atom_b, bond_k, bond_r0, frc, atom_energy, atom_v); - return; -} - -void BondForceWithAtomEnergyAndVirial(int bond_numbers, int atom_numbers, const unsigned int *uint_crd_f, - const float *scaler_f, const int *atom_a, const int *atom_b, const float *bond_k, - const float *bond_r0, float *frc_f, float *atom_energy, float *atom_v, - cudaStream_t stream); diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/bond/bond_force_with_atom_energy_and_virial_impl.cuh b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/bond/bond_force_with_atom_energy_and_virial_impl.cuh deleted file mode 100644 index fa5f0408729..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/bond/bond_force_with_atom_energy_and_virial_impl.cuh +++ /dev/null @@ -1,29 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_BOND_BOND_FORCE_WITH_ATOM_VIRIAL_IMPL_H_ -#define MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_BOND_BOND_FORCE_WITH_ATOM_VIRIAL_IMPL_H_ - -#include -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_device_info.h" - -CUDA_LIB_EXPORT void BondForceWithAtomEnergyAndVirial(int bond_numbers, int atom_numbers, - const unsigned int *uint_crd_f, const float *scaler_f, - const int *atom_a, const int *atom_b, const float *bond_k, - const float *bond_r0, float *frc_f, float *atom_energy, - float *atom_v, cudaStream_t stream); -#endif diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/bond/bond_force_with_atom_energy_impl.cu b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/bond/bond_force_with_atom_energy_impl.cu deleted file mode 100644 index 11064927158..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/bond/bond_force_with_atom_energy_impl.cu +++ /dev/null @@ -1,69 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "plugin/device/gpu/kernel/cuda_impl/sponge/bond/bond_force_with_atom_energy_impl.cuh" -#include "plugin/device/gpu/kernel/cuda_impl/sponge/common_sponge.cuh" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/util.cuh" - -__global__ void BondForceWithAtomEnergyKernel(int bond_numbers, const UNSIGNED_INT_VECTOR *uint_crd, - const VECTOR *scaler, const int *atom_a, const int *atom_b, - const float *bond_k, const float *bond_r0, VECTOR *frc, - float *atom_energy) { - int bond_i = blockDim.x * blockIdx.x + threadIdx.x; - if (bond_i < bond_numbers) { - int atom_i = atom_a[bond_i]; - int atom_j = atom_b[bond_i]; - - float k = bond_k[bond_i]; - float r0 = bond_r0[bond_i]; - - VECTOR dr = Get_Periodic_Displacement(uint_crd[atom_i], uint_crd[atom_j], scaler[0]); - - float abs_r = norm3df(dr.x, dr.y, dr.z); - float r_1 = 1. / abs_r; - float tempf = abs_r - r0; - VECTOR f = 2 * tempf * r_1 * k * dr; - - atomicAdd(&frc[atom_i].x, -f.x); - atomicAdd(&frc[atom_i].y, -f.y); - atomicAdd(&frc[atom_i].z, -f.z); - - atomicAdd(&frc[atom_j].x, f.x); - atomicAdd(&frc[atom_j].y, f.y); - atomicAdd(&frc[atom_j].z, f.z); - - atomicAdd(&atom_energy[atom_i], k * tempf * tempf); - } -} - -void BondForceWithAtomEnergy(int bond_numbers, int atom_numbers, const int *uint_crd_f, const float *scaler_f, - const int *atom_a, const int *atom_b, const float *bond_k, const float *bond_r0, - float *frc_f, float *atom_e, cudaStream_t stream) { - Reset_List<<(3. * atom_numbers) / 128), 128, 0, stream>>>(3 * atom_numbers, frc_f, 0.); - Reset_List<<(atom_numbers) / 128), 128, 0, stream>>>(atom_numbers, atom_e, 0.); - size_t thread_per_block = 128; - size_t block_per_grid = ceilf(static_cast(bond_numbers) / 128); - UNSIGNED_INT_VECTOR *uint_crd = - const_cast(reinterpret_cast(uint_crd_f)); - VECTOR *frc = const_cast(reinterpret_cast(frc_f)); - VECTOR *scaler = const_cast(reinterpret_cast(scaler_f)); - BondForceWithAtomEnergyKernel<<>>(bond_numbers, uint_crd, scaler, atom_a, - atom_b, bond_k, bond_r0, frc, atom_e); - return; -} -void BondForceWithAtomEnergy(int bond_numbers, int atom_numbers, const int *uint_crd_f, const float *scaler_f, - const int *atom_a, const int *atom_b, const float *bond_k, const float *bond_r0, - float *frc_f, float *atom_e, cudaStream_t stream); diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/bond/bond_force_with_atom_energy_impl.cuh b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/bond/bond_force_with_atom_energy_impl.cuh deleted file mode 100644 index 4841de06336..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/bond/bond_force_with_atom_energy_impl.cuh +++ /dev/null @@ -1,28 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_BOND_BOND_FORCE_WITH_ATOM_ENERGY_IMPL_H_ -#define MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_BOND_BOND_FORCE_WITH_ATOM_ENERGY_IMPL_H_ - -#include -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_device_info.h" - -CUDA_LIB_EXPORT void BondForceWithAtomEnergy(int bond_numbers, int atom_numbers, const int *uint_crd_f, - const float *scaler_f, const int *atom_a, const int *atom_b, - const float *bond_k, const float *bond_r0, float *frc_f, float *atom_e, - cudaStream_t stream); -#endif diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/bond/bond_force_with_atom_virial_impl.cu b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/bond/bond_force_with_atom_virial_impl.cu deleted file mode 100644 index 78dd7f56754..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/bond/bond_force_with_atom_virial_impl.cu +++ /dev/null @@ -1,69 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "plugin/device/gpu/kernel/cuda_impl/sponge/bond/bond_force_with_atom_virial_impl.cuh" -#include "plugin/device/gpu/kernel/cuda_impl/sponge/common_sponge.cuh" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/util.cuh" - -__global__ void BondForceWithAtomVirialKernel(int bond_numbers, const UNSIGNED_INT_VECTOR *uint_crd, - const VECTOR *scaler, const int *atom_a, const int *atom_b, - const float *bond_k, const float *bond_r0, VECTOR *frc, - float *atom_virial) { - int bond_i = blockDim.x * blockIdx.x + threadIdx.x; - if (bond_i < bond_numbers) { - int atom_i = atom_a[bond_i]; - int atom_j = atom_b[bond_i]; - - float k = bond_k[bond_i]; - float r0 = bond_r0[bond_i]; - - VECTOR dr = Get_Periodic_Displacement(uint_crd[atom_i], uint_crd[atom_j], scaler[0]); - - float abs_r = norm3df(dr.x, dr.y, dr.z); - float r_1 = 1. / abs_r; - float tempf = (abs_r - r0) * k; - VECTOR f = 2 * tempf * r_1 * dr; - - atomicAdd(&frc[atom_i].x, -f.x); - atomicAdd(&frc[atom_i].y, -f.y); - atomicAdd(&frc[atom_i].z, -f.z); - - atomicAdd(&frc[atom_j].x, f.x); - atomicAdd(&frc[atom_j].y, f.y); - atomicAdd(&frc[atom_j].z, f.z); - - atomicAdd(&atom_virial[atom_i], abs_r * tempf); - } -} - -void BondForceWithAtomVirial(int bond_numbers, int atom_numbers, const int *uint_crd_f, const float *scaler_f, - const int *atom_a, const int *atom_b, const float *bond_k, const float *bond_r0, - float *frc_f, float *atom_v, cudaStream_t stream) { - Reset_List<<(3. * atom_numbers) / 128), 128, 0, stream>>>(3 * atom_numbers, frc_f, 0.); - Reset_List<<(atom_numbers) / 128), 128, 0, stream>>>(atom_numbers, atom_v, 0.); - size_t thread_per_block = 128; - size_t block_per_grid = ceilf(static_cast(bond_numbers) / 128); - UNSIGNED_INT_VECTOR *uint_crd = - const_cast(reinterpret_cast(uint_crd_f)); - VECTOR *frc = const_cast(reinterpret_cast(frc_f)); - VECTOR *scaler = const_cast(reinterpret_cast(scaler_f)); - BondForceWithAtomVirialKernel<<>>(bond_numbers, uint_crd, scaler, atom_a, - atom_b, bond_k, bond_r0, frc, atom_v); - return; -} -void BondForceWithAtomVirial(int bond_numbers, int atom_numbers, const int *uint_crd_f, const float *scaler_f, - const int *atom_a, const int *atom_b, const float *bond_k, const float *bond_r0, - float *frc_f, float *atom_v, cudaStream_t stream); diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/bond/bond_force_with_atom_virial_impl.cuh b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/bond/bond_force_with_atom_virial_impl.cuh deleted file mode 100644 index 0ebe0dbb4a6..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/bond/bond_force_with_atom_virial_impl.cuh +++ /dev/null @@ -1,28 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_BOND_BOND_FORCE_WITH_ATOM_VIRIAL_IMPL_H_ -#define MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_BOND_BOND_FORCE_WITH_ATOM_VIRIAL_IMPL_H_ - -#include -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_device_info.h" - -CUDA_LIB_EXPORT void BondForceWithAtomVirial(int bond_numbers, int atom_numbers, const int *uint_crd_f, - const float *scaler_f, const int *atom_a, const int *atom_b, - const float *bond_k, const float *bond_r0, float *frc_f, float *atom_v, - cudaStream_t stream); -#endif diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/common/atomcrdtocv_impl.cu b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/common/atomcrdtocv_impl.cu deleted file mode 100644 index 8c9abb85541..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/common/atomcrdtocv_impl.cu +++ /dev/null @@ -1,123 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "plugin/device/gpu/kernel/cuda_impl/sponge/common/atomcrdtocv_impl.cuh" -#include "plugin/device/gpu/kernel/cuda_impl/sponge/common_sponge.cuh" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/util.cuh" - -__device__ __host__ float fc(float Rij) { - const float PI = 3.141592654; - const float Rc = 1000.0; - return 0.5 * cosf(PI / Rc * Rij) + 0.5; -} - -__global__ void Record_Box_Map_Times(int atom_numbers, const float *crd, const float *old_crd, float *box, - int *box_map_times) { - float half_box[3] = {0.5F * box[0], 0.5F * box[1], 0.5F * box[2]}; - int i = blockDim.x * blockIdx.x + threadIdx.x; - if (i < atom_numbers) { - if (crd[3 * i + 0] - old_crd[3 * i + 0] > half_box[0]) { - box_map_times[3 * i + 0] = box_map_times[3 * i + 0] - 1; - } else if (crd[3 * i + 0] - old_crd[3 * i + 0] < -half_box[0]) { - box_map_times[3 * i + 0] = box_map_times[3 * i + 0] + 1; - } - if (crd[3 * i + 1] - old_crd[3 * i + 1] > half_box[1]) { - box_map_times[3 * i + 1] = box_map_times[3 * i + 1] - 1; - } else if (crd[3 * i + 1] - old_crd[3 * i + 1] < -half_box[1]) { - box_map_times[3 * i + 1] = box_map_times[3 * i + 1] + 1; - } - if (crd[3 * i + 2] - old_crd[3 * i + 2] > half_box[2]) { - box_map_times[3 * i + 2] = box_map_times[3 * i + 2] - 1; - } else if (crd[3 * i + 2] - old_crd[3 * i + 2] < -half_box[2]) { - box_map_times[3 * i + 2] = box_map_times[3 * i + 2] + 1; - } - } -} - -__global__ void gen_nowarp_crd(int atom_numbers, const float *crd, float *box, int *box_map_times, float *nowarp_crd) { - int i = blockDim.x * blockIdx.x + threadIdx.x; - if (i < atom_numbers) { - nowarp_crd[3 * i + 0] = static_cast(box_map_times[3 * i + 0]) * box[0] + crd[3 * i + 0]; - nowarp_crd[3 * i + 1] = static_cast(box_map_times[3 * i + 1]) * box[1] + crd[3 * i + 1]; - nowarp_crd[3 * i + 2] = static_cast(box_map_times[3 * i + 2]) * box[2] + crd[3 * i + 2]; - } -} - -__global__ void G_Radial(const int start_serial, const int end_serial, const float *crd, float *g_radial) { - const float Rs = 0.5, Eta = 0.5; - int i = blockDim.x * blockIdx.x + threadIdx.x; - if (i >= start_serial && i < end_serial) { - float rij; - float g_radial_lin = 0.; - for (int j = start_serial; j < end_serial; j = j + 1) { - if (j != i) { - // rij = sqrtf((crd[3*i+0] - crd[j]) * (crd[i] - crd[j])); - rij = sqrtf(normfloat(crd, crd, i, j)); - g_radial_lin = g_radial_lin + expf(-Eta * (rij - Rs) * (rij - Rs)) * fc(rij); - } else { - continue; - } - } - g_radial[i] = g_radial_lin; - } -} - -__global__ void G_Angular(const int start_serial, const int end_serial, const float *crd, float *g_angular) { - const float Rs = 0.5, Thetas = 3.14, Eta = 0.5, Zeta = 2.0; - int i = blockDim.x * blockIdx.x + threadIdx.x; - if (i >= start_serial && i < end_serial) { - float rij, rik, rjk, theta_jik; - float g_angular_lin = 0.; - for (int j = start_serial; j < end_serial; j = j + 1) { - if (j != i) { - rij = sqrtf(normfloat(crd, crd, i, j)); - for (int k = j + 1; k < end_serial; k = k + 1) { - if (k != i) { - rik = sqrtf(normfloat(crd, crd, i, k)); - rjk = sqrtf(normfloat(crd, crd, j, k)); - theta_jik = - acosf(fmaxf(fminf((rij * rij + rik * rik - rjk * rjk) / (2. * rij * rik), 0.999999), -0.999999)); - g_angular_lin = g_angular_lin + powf(1. + cosf(theta_jik - Thetas), Zeta) * - expf(-Eta * powf(0.5 * (rij + rik) - Rs, 2.)) * fc(rij) * fc(rik); - } else { - continue; - } - } - } else { - continue; - } - } - g_angular[i] = powf(2., 1. - Zeta) * g_angular_lin; - } -} - -void AtomCrdToCV(int atom_numbers, int start_serial, int end_serial, int number, const float *crd_f, - const float *old_crd, float *nowarp_crd, int *box_map_times, float *box, float *g_radial, - float *g_angular, cudaStream_t stream) { - Reset_List<<(3. * atom_numbers) / 128), 128, 0, stream>>>(3 * atom_numbers, box_map_times, - 0); - Record_Box_Map_Times<<(3. * atom_numbers) / 128), 128, 0, stream>>>( - atom_numbers, crd_f, old_crd, box, box_map_times); - gen_nowarp_crd<<(3. * atom_numbers) / 128), 128, 0, stream>>>(atom_numbers, crd_f, box, - box_map_times, nowarp_crd); - G_Radial<<<1, number, 0, stream>>>(start_serial, end_serial, nowarp_crd, g_radial); - G_Angular<<<1, number, 0, stream>>>(start_serial, end_serial, nowarp_crd, g_angular); - return; -} - -void AtomCrdToCV(int atom_numbers, int start_serial, int end_serial, int number, const float *crd_f, - const float *old_crd, float *nowarp_crd, int *box_map_times, float *box, float *g_radial, - float *g_angular, cudaStream_t stream); diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/common/atomcrdtocv_impl.cuh b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/common/atomcrdtocv_impl.cuh deleted file mode 100644 index 85338ade301..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/common/atomcrdtocv_impl.cuh +++ /dev/null @@ -1,27 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_COMMON_ATOMCRDTOCV_IMPL_H_ -#define MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_COMMON_ATOMCRDTOCV_IMPL_H_ - -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_device_info.h" - -CUDA_LIB_EXPORT void AtomCrdToCV(int atom_numbers, int start_serial, int end_serial, int number, const float *crd_f, - const float *old_crd, float *nowarp_crd, int *box_map_times, float *box, - float *g_radial, float *g_angular, cudaStream_t stream); - -#endif // MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_COMMON_ATOMCRDTOCV_IMPL_H_ diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/common/crd_to_uint_crd_impl.cu b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/common/crd_to_uint_crd_impl.cu deleted file mode 100644 index 83dd52f5420..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/common/crd_to_uint_crd_impl.cu +++ /dev/null @@ -1,51 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "plugin/device/gpu/kernel/cuda_impl/sponge/common_sponge.cuh" -#include "plugin/device/gpu/kernel/cuda_impl/sponge/common/crd_to_uint_crd_impl.cuh" - -__global__ void Crd_To_Uint_Crd(const int atom_numbers, const VECTOR *scale_factor, const VECTOR *crd, - UNSIGNED_INT_VECTOR *uint_crd) { - int atom_i = blockDim.x * blockIdx.x + threadIdx.x; - if (atom_i < atom_numbers) { - uint_crd[atom_i].uint_x = crd[atom_i].x * scale_factor[0].x; - uint_crd[atom_i].uint_y = crd[atom_i].y * scale_factor[0].y; - uint_crd[atom_i].uint_z = crd[atom_i].z * scale_factor[0].z; - /*uint_crd[atom_i].uint_x = 2 * uint_crd[atom_i].uint_x; - uint_crd[atom_i].uint_y = 2 * uint_crd[atom_i].uint_y; - uint_crd[atom_i].uint_z = 2 * uint_crd[atom_i].uint_z;*/ - uint_crd[atom_i].uint_x = uint_crd[atom_i].uint_x << 1; - uint_crd[atom_i].uint_y = uint_crd[atom_i].uint_y << 1; - uint_crd[atom_i].uint_z = uint_crd[atom_i].uint_z << 1; - } -} - -void CrdToUintCrd(const int atom_numbers, const float *crd_to_uint_crd_cof_f, const float *crd_f, - unsigned int *uint_crd_f, cudaStream_t stream) { - VECTOR *crd = const_cast(reinterpret_cast(crd_f)); - VECTOR *crd_to_uint_crd_cof = const_cast(reinterpret_cast(crd_to_uint_crd_cof_f)); - - UNSIGNED_INT_VECTOR *uint_crd = - const_cast(reinterpret_cast(uint_crd_f)); - - Crd_To_Uint_Crd<<(atom_numbers) / 128.0), 128, 0, stream>>>( - atom_numbers, crd_to_uint_crd_cof, crd, uint_crd); - - return; -} - -void CrdToUintCrd(const int atom_numbers, const float *crd_to_uint_crd_cof_f, const float *crd_f, - unsigned int *uint_crd_f, cudaStream_t stream); diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/common/crd_to_uint_crd_impl.cuh b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/common/crd_to_uint_crd_impl.cuh deleted file mode 100644 index 99f13cdeac7..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/common/crd_to_uint_crd_impl.cuh +++ /dev/null @@ -1,27 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_CRD_TO_UINT_CRD_IMPL_H_ -#define MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_CRD_TO_UINT_CRD_IMPL_H_ - -#include -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_device_info.h" - -CUDA_LIB_EXPORT void CrdToUintCrd(const int atom_numbers, const float *crd_to_uint_crd_cof_f, const float *crd_f, - unsigned int *uint_crd_f, cudaStream_t stream); - -#endif // MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_CRD_TO_UINT_CRD_IMPL_H_ diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/common/crd_to_uint_crd_quarter_impl.cu b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/common/crd_to_uint_crd_quarter_impl.cu deleted file mode 100644 index 9f7aa284894..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/common/crd_to_uint_crd_quarter_impl.cu +++ /dev/null @@ -1,54 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#include "plugin/device/gpu/kernel/cuda_impl/sponge/common/crd_to_uint_crd_quarter_impl.cuh" -#include "plugin/device/gpu/kernel/cuda_impl/sponge/common_sponge.cuh" - -__global__ void Crd_To_Uint_Crd_Quarter(const int atom_numbers, const VECTOR *scale_factor, const VECTOR *crd, - UNSIGNED_INT_VECTOR *uint_crd) { - int atom_i = blockDim.x * blockIdx.x + threadIdx.x; - if (atom_i < atom_numbers) { - INT_VECTOR tempi; - VECTOR temp = crd[atom_i]; - temp.x *= scale_factor[0].x; - temp.y *= scale_factor[0].y; - temp.z *= scale_factor[0].z; - - tempi.int_x = temp.x; - tempi.int_y = temp.y; - tempi.int_z = temp.z; - - uint_crd[atom_i].uint_x = (tempi.int_x << 2); - uint_crd[atom_i].uint_y = (tempi.int_y << 2); - uint_crd[atom_i].uint_z = (tempi.int_z << 2); - } -} - -void CrdToUintCrdQuarter(const int atom_numbers, const float *crd_to_uint_crd_cof_f, const float *crd_f, - unsigned int *uint_crd_f, cudaStream_t stream) { - VECTOR *crd = const_cast(reinterpret_cast(crd_f)); - VECTOR *crd_to_uint_crd_cof = const_cast(reinterpret_cast(crd_to_uint_crd_cof_f)); - - UNSIGNED_INT_VECTOR *uint_crd = - const_cast(reinterpret_cast(uint_crd_f)); - - Crd_To_Uint_Crd_Quarter<<(atom_numbers) / 128.0), 128, 0, stream>>>( - atom_numbers, crd_to_uint_crd_cof, crd, uint_crd); - - return; -} - -void CrdToUintCrdQuarter(const int atom_numbers, const float *crd_to_uint_crd_cof_f, const float *crd_f, - unsigned int *uint_crd_f, cudaStream_t stream); diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/common/crd_to_uint_crd_quarter_impl.cuh b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/common/crd_to_uint_crd_quarter_impl.cuh deleted file mode 100644 index e2360b42fc8..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/common/crd_to_uint_crd_quarter_impl.cuh +++ /dev/null @@ -1,27 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_CRD_TO_UINT_CRD_QUARTER_IMPL_H_ -#define MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_CRD_TO_UINT_CRD_QUARTER_IMPL_H_ - -#include -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_device_info.h" - -CUDA_LIB_EXPORT void CrdToUintCrdQuarter(const int atom_numbers, const float *crd_to_uint_crd_cof_f, const float *crd_f, - unsigned int *uint_crd_f, cudaStream_t stream); - -#endif // MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_CRD_TO_UINT_CRD_QUARTER_IMPL_H_ diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/common/get_center_of_mass_impl.cu b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/common/get_center_of_mass_impl.cu deleted file mode 100644 index b581bac9d1f..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/common/get_center_of_mass_impl.cu +++ /dev/null @@ -1,44 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "plugin/device/gpu/kernel/cuda_impl/sponge/common_sponge.cuh" -#include "plugin/device/gpu/kernel/cuda_impl/sponge/common/get_center_of_mass_impl.cuh" - -__global__ void Get_Center_Of_Mass(int residue_numbers, int *start, int *end, VECTOR *crd, float *atom_mass, - float *residue_mass_inverse, VECTOR *center_of_mass) { - for (int residue_i = blockDim.x * blockIdx.x + threadIdx.x; residue_i < residue_numbers; - residue_i += gridDim.x * blockDim.x) { - VECTOR com_lin = {0.0f, 0.0f, 0.0f}; - for (int atom_i = start[residue_i]; atom_i < end[residue_i]; atom_i += 1) { - com_lin = com_lin + atom_mass[atom_i] * crd[atom_i]; - } - center_of_mass[residue_i] = residue_mass_inverse[residue_i] * com_lin; - } -} - -void GetCenterOfMass(int residue_numbers, int *start, int *end, float *crd_f, float *atom_mass, - float *residue_mass_inverse, float *center_of_mass_f, cudaStream_t stream) { - Reset_List<<(3. * residue_numbers) / 128), 128, 0, stream>>>(3 * residue_numbers, - center_of_mass_f, 0.); - VECTOR *crd = reinterpret_cast(crd_f); - VECTOR *center_of_mass = reinterpret_cast(center_of_mass_f); - Get_Center_Of_Mass<<<20, 32, 0, stream>>>(residue_numbers, start, end, crd, atom_mass, residue_mass_inverse, - center_of_mass); - return; -} - -void GetCenterOfMass(int residue_numbers, int *start, int *end, float *crd_f, float *atom_mass, - float *residue_mass_inverse, float *center_of_mass_f, cudaStream_t stream); diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/common/get_center_of_mass_impl.cuh b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/common/get_center_of_mass_impl.cuh deleted file mode 100644 index 75bd130e5ac..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/common/get_center_of_mass_impl.cuh +++ /dev/null @@ -1,27 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_COMMON_GETCENTEROFMASS_IMPL_H_ -#define MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_COMMON_GETCENTEROFMASS_IMPL_H_ - -#include -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_device_info.h" - -CUDA_LIB_EXPORT void GetCenterOfMass(int residue_numbers, int *start, int *end, float *crd_f, float *atom_mass, - float *residue_mass_inverse, float *center_of_mass_f, cudaStream_t stream); - -#endif // MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_COMMON_GETCENTER_IMPL_H_ diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/common/getcenter_impl.cu b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/common/getcenter_impl.cu deleted file mode 100644 index 9e93f160a03..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/common/getcenter_impl.cu +++ /dev/null @@ -1,43 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "plugin/device/gpu/kernel/cuda_impl/sponge/common_sponge.cuh" -#include "plugin/device/gpu/kernel/cuda_impl/sponge/common/getcenter_impl.cuh" - -__global__ void GetCenterOfGeometryKernel(const int center_numbers, float center_numbers_inverse, - const int *center_atoms, const VECTOR *crd, VECTOR *center_of_geometry) { - int i = blockDim.x * blockIdx.x + threadIdx.x; - if (i < center_numbers) { - int atom_i = center_atoms[i]; - VECTOR temp = center_numbers_inverse * crd[atom_i]; - atomicAdd(¢er_of_geometry[0].x, temp.x); - atomicAdd(¢er_of_geometry[0].y, temp.y); - atomicAdd(¢er_of_geometry[0].z, temp.z); - } -} - -void GetCenterOfGeometry(const int center_numbers, float center_numbers_inverse, const int *center_atoms, - const float *crd_f, float *center_of_geometry_f, cudaStream_t stream) { - VECTOR *crd = const_cast(reinterpret_cast(crd_f)); - VECTOR *center_of_geometry = const_cast(reinterpret_cast(center_of_geometry_f)); - GetCenterOfGeometryKernel<<(center_numbers) / 32), 32, 0, stream>>>( - center_numbers, center_numbers_inverse, center_atoms, crd, center_of_geometry); - - return; -} - -void GetCenterOfGeometry(const int center_numbers, float center_numbers_inverse, const int *center_atoms, float *crd_f, - float *center_of_geometry_f, cudaStream_t stream); diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/common/getcenter_impl.cuh b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/common/getcenter_impl.cuh deleted file mode 100644 index df366b2fede..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/common/getcenter_impl.cuh +++ /dev/null @@ -1,28 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_COMMON_GETCENTER_IMPL_H_ -#define MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_COMMON_GETCENTER_IMPL_H_ - -#include -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_device_info.h" - -CUDA_LIB_EXPORT void GetCenterOfGeometry(const int center_numbers, float center_numbers_inverse, - const int *center_atoms, const float *crd_f, float *center_of_geometry_f, - cudaStream_t stream); - -#endif // MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_COMMON_GETCENTER_IMPL_H_ diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/common/map_center_of_mass_impl.cu b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/common/map_center_of_mass_impl.cu deleted file mode 100644 index 01807f2e34d..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/common/map_center_of_mass_impl.cu +++ /dev/null @@ -1,51 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "plugin/device/gpu/kernel/cuda_impl/sponge/common_sponge.cuh" -#include "plugin/device/gpu/kernel/cuda_impl/sponge/common/map_center_of_mass_impl.cuh" - -__global__ void Map_Center_Of_Mass(int residue_numbers, int *start, int *end, - float *scaler, VECTOR *center_of_mass, VECTOR *box_length, VECTOR *no_wrap_crd, VECTOR *crd) { - VECTOR trans_vec; - VECTOR com; - for (int residue_i = blockDim.x*blockIdx.x + threadIdx.x; residue_i < residue_numbers; - residue_i += gridDim.x * blockDim.x) { - com = center_of_mass[residue_i]; - - trans_vec.x = com.x - floorf(com.x / box_length[0].x) * box_length[0].x; - trans_vec.y = com.y - floorf(com.y / box_length[0].y) * box_length[0].y; - trans_vec.z = com.z - floorf(com.z / box_length[0].z) * box_length[0].z; - trans_vec = scaler[0] * trans_vec - com; - - for (int atom_i = start[residue_i] + threadIdx.y; atom_i < end[residue_i]; atom_i += blockDim.y) { - crd[atom_i] = no_wrap_crd[atom_i] + trans_vec; - } - } -} - -void MapCenterOfMass(int residue_numbers, int *start, int *end, float *center_of_mass_f, - float *box_length_f, float *no_wrap_crd_f, float *crd_f, float* scaler, cudaStream_t stream) { - VECTOR *crd = reinterpret_cast(crd_f); - VECTOR *no_wrap_crd = reinterpret_cast(no_wrap_crd_f); - VECTOR *box_length = reinterpret_cast(box_length_f); - VECTOR *center_of_mass = reinterpret_cast(center_of_mass_f); - Map_Center_Of_Mass<<<20, { 32, 4 } , 0, stream>>>(residue_numbers, start, end, scaler, center_of_mass, box_length, - no_wrap_crd, crd); - return; -} - -void MapCenterOfMass(int residue_numbers, int *start, int *end, float *center_of_mass_f, - float *box_length_f, float *no_wrap_crd_f, float *crd_f, float* scaler, cudaStream_t stream); diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/common/map_center_of_mass_impl.cuh b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/common/map_center_of_mass_impl.cuh deleted file mode 100644 index 151ba37eb15..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/common/map_center_of_mass_impl.cuh +++ /dev/null @@ -1,28 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_COMMON_MAPCENTEROFMASS_IMPL_H_ -#define MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_COMMON_MAPCENTEROFMASS_IMPL_H_ - -#include -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_device_info.h" - -CUDA_LIB_EXPORT void MapCenterOfMass(int residue_numbers, int *start, int *end, float *center_of_mass_f, - float *box_length_f, float *no_wrap_crd_f, float *crd_f, float *scaler, - cudaStream_t stream); - -#endif // MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_COMMON_MAPCENTEROFMASS_IMPL_H_ diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/common/mdtemperature_impl.cu b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/common/mdtemperature_impl.cu deleted file mode 100644 index 39ee4f6761f..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/common/mdtemperature_impl.cu +++ /dev/null @@ -1,49 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#include "plugin/device/gpu/kernel/cuda_impl/sponge/common_sponge.cuh" -#include "plugin/device/gpu/kernel/cuda_impl/sponge/common/mdtemperature_impl.cuh" - -__global__ void MDTemperatureKernel(const int residue_numbers, const int *start, const int *end, const VECTOR *atom_vel, - const float *atom_mass, float *ek) { - int residue_i = blockDim.x * blockIdx.x + threadIdx.x; - if (residue_i < residue_numbers) { - VECTOR momentum = {0., 0., 0.}; - float res_mass = 0.; - int s = start[residue_i]; - int e = end[residue_i]; - float mass_lin; - for (int atom_i = s; atom_i < e; atom_i = atom_i + 1) { - mass_lin = atom_mass[atom_i]; - - momentum.x = momentum.x + mass_lin * atom_vel[atom_i].x; - momentum.y = momentum.y + mass_lin * atom_vel[atom_i].y; - momentum.z = momentum.z + mass_lin * atom_vel[atom_i].z; - res_mass = res_mass + mass_lin; - } - ek[residue_i] = 0.5 * (momentum.x * momentum.x + momentum.y * momentum.y + momentum.z * momentum.z) / res_mass * - 2. / 3. / CONSTANT_kB / residue_numbers; - } -} - -void MDTemperature(const int residue_numbers, const int *start, const int *end, const float *atom_vel_f, - const float *atom_mass, float *ek, cudaStream_t stream) { - VECTOR *atom_vel = const_cast(reinterpret_cast(atom_vel_f)); - MDTemperatureKernel<<(residue_numbers) / 32), 32, 0, stream>>>(residue_numbers, start, end, - atom_vel, atom_mass, ek); - return; -} -void MDTemperature(const int residue_numbers, const int *start, const int *end, const float *atom_vel_f, - const float *atom_mass, float *ek, cudaStream_t stream); diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/common/mdtemperature_impl.cuh b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/common/mdtemperature_impl.cuh deleted file mode 100644 index b2b08648f3f..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/common/mdtemperature_impl.cuh +++ /dev/null @@ -1,26 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_COMMON_MDTEMPERATURE_IMPL_H_ -#define MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_COMMON_MDTEMPERATURE_IMPL_H_ - -#include -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_device_info.h" - -CUDA_LIB_EXPORT void MDTemperature(const int residue_numbers, const int *start, const int *end, const float *atom_vel_f, - const float *atom_mass, float *ek, cudaStream_t stream); -#endif // MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_COMMON_MDTEMPERATURE_IMPL_H_ diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/common/total_c6_get_impl.cu b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/common/total_c6_get_impl.cu deleted file mode 100644 index efe5c96689d..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/common/total_c6_get_impl.cu +++ /dev/null @@ -1,48 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "plugin/device/gpu/kernel/cuda_impl/sponge/common/total_c6_get_impl.cuh" -#include "plugin/device/gpu/kernel/cuda_impl/sponge/common_sponge.cuh" - -__global__ void Total_C6_Get(int atom_numbers, int *atom_lj_type, float *d_lj_b, float *d_factor) { - int i, j; - float temp_sum = 0; - d_factor[0] = 0; - int x, y; - int itype, jtype, atom_pair_LJ_type; - for (i = blockIdx.x * blockDim.x + threadIdx.x; i < atom_numbers; i += gridDim.x * blockDim.x) { - itype = atom_lj_type[i]; - for (j = blockIdx.y * blockDim.y + threadIdx.y; j < atom_numbers; j += gridDim.y * blockDim.y) { - jtype = atom_lj_type[j]; - y = (jtype - itype); - x = y >> 31; - y = (y ^ x) - x; - x = jtype + itype; - jtype = (x + y) >> 1; - x = (x - y) >> 1; - atom_pair_LJ_type = (jtype * (jtype + 1) >> 1) + x; - temp_sum += d_lj_b[atom_pair_LJ_type]; - } - } - atomicAdd(d_factor, temp_sum); -} - -void total_c6_get(int atom_numbers, int *atom_lj_type, float *d_lj_b, float *d_factor, cudaStream_t stream) { - Total_C6_Get<<<{4, 4}, {32, 32}, 0, stream>>>(atom_numbers, atom_lj_type, d_lj_b, d_factor); - return; -} - -void total_c6_get(int atom_numbers, int *atom_lj_type, float *d_lj_b, float *d_factor, cudaStream_t stream); diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/common/total_c6_get_impl.cuh b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/common/total_c6_get_impl.cuh deleted file mode 100644 index 76d582b7ae5..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/common/total_c6_get_impl.cuh +++ /dev/null @@ -1,26 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_COMMON_TOTAL_C6_GET_IMPL_H_ -#define MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_COMMON_TOTAL_C6_GET_IMPL_H_ - -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_device_info.h" - -CUDA_LIB_EXPORT void total_c6_get(int atom_numbers, int *atom_lj_type, float *d_lj_b, float *d_factor, - cudaStream_t stream); - -#endif // MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_COMMON_TOTAL_C6_GET_IMPL_H_ diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/common_sponge.cuh b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/common_sponge.cuh deleted file mode 100644 index 349de212bec..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/common_sponge.cuh +++ /dev/null @@ -1,366 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_COMMON_SPONGE_H_ -#define MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_COMMON_SPONGE_H_ - -#include -#include -#include -#include - -#include - -#include -#include -#include -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" - -#define TWO_DIVIDED_BY_SQRT_PI 1.1283791670218446 -#define CONSTANT_kB 0.00198716 -#define CONSTANT_Pi 3.1415926535897932f -static dim3 thread_LJ(8, 32); - -__constant__ float XRD3D_Ma[4] = {1.0 / 6.0, -0.5, 0.5, -1.0 / 6.0}; -__constant__ float XRD3D_Mb[4] = {0, 0.5, -1, 0.5}; -__constant__ float XRD3D_Mc[4] = {0, 0.5, 0, -0.5}; -__constant__ float XRD3D_Md[4] = {0, 1.0 / 6.0, 4.0 / 6.0, 1.0 / 6.0}; -__constant__ float XRD3D_dMa[4] = {0.5, -1.5, 1.5, -0.5}; -__constant__ float XRD3D_dMb[4] = {0, 1, -2, 1}; -__constant__ float XRD3D_dMc[4] = {0, 0.5, 0, -0.5}; - -struct VECTOR { - float x; - float y; - float z; -}; - -struct INT_VECTOR { - int int_x; - int int_y; - int int_z; -}; - -struct UNSIGNED_INT_VECTOR { - unsigned int uint_x; - unsigned int uint_y; - unsigned int uint_z; -}; - -struct NEIGHBOR_LIST { - int atom_numbers; - int *atom_serial; -}; -struct UINT_VECTOR_LJ_TYPE { - unsigned int uint_x; - unsigned int uint_y; - unsigned int uint_z; - int LJ_type; - float charge; -}; - -struct ATOM_NEAR { - int *atom_serial; -}; - -struct GRID_BUCKET { - int *atom_serial; -}; - -struct GRID_POINTER { - int *grid_serial; -}; - -struct VIRTUAL_TYPE_0 { - float virtual_atom; - float from_1; - float h_double; -}; - -struct VIRTUAL_TYPE_1 { - float virtual_atom; - float from_1; - float from_2; - float a; -}; - -struct VIRTUAL_TYPE_2 { - float virtual_atom; - float from_1; - float from_2; - float from_3; - float a; - float b; -}; - -struct VIRTUAL_TYPE_3 { - float virtual_atom; - float from_1; - float from_2; - float from_3; - float d; - float k; -}; - -struct CONSTRAIN_PAIR { - int atom_i_serial; - int atom_j_serial; - float constant_r; - float constrain_k; -}; - -__device__ __host__ static inline VECTOR operator-(const VECTOR &veca, const VECTOR &vecb) { - VECTOR vec; - vec.x = veca.x - vecb.x; - vec.y = veca.y - vecb.y; - vec.z = veca.z - vecb.z; - return vec; -} - -__device__ __host__ static inline VECTOR Get_Periodic_Displacement(const UNSIGNED_INT_VECTOR uvec_a, - const UNSIGNED_INT_VECTOR uvec_b, - const VECTOR scaler) { - VECTOR dr; - dr.x = (static_cast(uvec_a.uint_x - uvec_b.uint_x)) * scaler.x; - dr.y = (static_cast(uvec_a.uint_y - uvec_b.uint_y)) * scaler.y; - dr.z = (static_cast(uvec_a.uint_z - uvec_b.uint_z)) * scaler.z; - return dr; -} - -__device__ __host__ static inline VECTOR Get_Periodic_Displacement(const UINT_VECTOR_LJ_TYPE uvec_a, - const UINT_VECTOR_LJ_TYPE uvec_b, - const VECTOR scaler) { - VECTOR dr; - dr.x = (static_cast(uvec_a.uint_x - uvec_b.uint_x)) * scaler.x; - dr.y = (static_cast(uvec_a.uint_y - uvec_b.uint_y)) * scaler.y; - dr.z = (static_cast(uvec_a.uint_z - uvec_b.uint_z)) * scaler.z; - return dr; -} - -__device__ __host__ static inline VECTOR Get_Periodic_Displacement(const VECTOR vec_a, const VECTOR vec_b, - const VECTOR box_length) { - VECTOR dr; - dr = vec_a - vec_b; - dr.x = dr.x - floorf(dr.x / box_length.x + 0.5) * box_length.x; - dr.y = dr.y - floorf(dr.y / box_length.y + 0.5) * box_length.y; - dr.z = dr.z - floorf(dr.z / box_length.z + 0.5) * box_length.z; - return dr; -} - -__device__ __host__ static inline VECTOR Get_Periodic_Displacement(const VECTOR vec_a, const VECTOR vec_b, - const VECTOR box_length, - const VECTOR box_length_inverse) { - VECTOR dr; - dr = vec_a - vec_b; - dr.x = dr.x - floorf(dr.x * box_length_inverse.x + 0.5) * box_length.x; - dr.y = dr.y - floorf(dr.y * box_length_inverse.y + 0.5) * box_length.y; - dr.z = dr.z - floorf(dr.z * box_length_inverse.z + 0.5) * box_length.z; - return dr; -} - -__device__ __host__ static inline VECTOR operator+(const VECTOR &veca, const VECTOR &vecb) { - VECTOR vec; - vec.x = veca.x + vecb.x; - vec.y = veca.y + vecb.y; - vec.z = veca.z + vecb.z; - return vec; -} - -__device__ __host__ static inline float operator*(const VECTOR &veca, const VECTOR &vecb) { - return veca.x * vecb.x + veca.y * vecb.y + veca.z * vecb.z; -} -__device__ __host__ static inline VECTOR operator*(const float &a, const VECTOR &vecb) { - VECTOR vec; - vec.x = a * vecb.x; - vec.y = a * vecb.y; - vec.z = a * vecb.z; - return vec; -} - -__device__ __host__ static inline VECTOR operator-(const VECTOR &vecb) { - VECTOR vec; - vec.x = -vecb.x; - vec.y = -vecb.y; - vec.z = -vecb.z; - return vec; -} - -__device__ __host__ static inline VECTOR operator^(const VECTOR &veca, const VECTOR &vecb) { - VECTOR vec; - vec.x = veca.y * vecb.z - veca.z * vecb.y; - vec.y = veca.z * vecb.x - veca.x * vecb.z; - vec.z = veca.x * vecb.y - veca.y * vecb.x; - return vec; -} - -__device__ __host__ static inline float normfloat(const float *x, const float *y, int i, int j) { - float s = 0; - s += (x[3 * i + 0] - y[3 * j + 0]) * (x[3 * i + 0] - y[3 * j + 0]); - s += (x[3 * i + 1] - y[3 * j + 1]) * (x[3 * i + 1] - y[3 * j + 1]); - s += (x[3 * i + 2] - y[3 * j + 2]) * (x[3 * i + 2] - y[3 * j + 2]); - return s; -} - -__global__ static void construct_neighbor_list_kernel(int atom_numbers, int max_neighbor_numbers, int *nl_atom_numbers, - int *nl_atom_serial, NEIGHBOR_LIST *nl) { - for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < atom_numbers; i += gridDim.x * blockDim.x) { - nl[i].atom_numbers = nl_atom_numbers[i]; - nl[i].atom_serial = nl_atom_serial + i * max_neighbor_numbers; - } -} - -__global__ static void construct_atom_near(int atom_numbers, int near_numbers, int *atom_serial, ATOM_NEAR *an) { - for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < atom_numbers; i += gridDim.x * blockDim.x) { - an[i].atom_serial = atom_serial + i * near_numbers; - } -} - -static inline bool Malloc_Safely(void **address, size_t size) { - address[0] = NULL; - address[0] = reinterpret_cast(malloc(size)); - if (address[0] != NULL) { - return true; - } else { - printf("malloc failed!\n"); - getchar(); - return false; - } -} - -static inline bool Cuda_Malloc_Safely(void **address, size_t size) { - cudaError_t cuda_error = cudaMalloc(&address[0], size); - if (cuda_error == 0) { - return true; - } else { - printf("cudaMalloc failed! error %d\n", cuda_error); - getchar(); - return false; - } -} - -__global__ static void construct_constrain_pair(int constrain_pair_numbers, const int *atom_i_serials, - const int *atom_j_serials, const float *constant_rs, - const float *constrain_ks, CONSTRAIN_PAIR *constrain_pair) { - int atom_i = blockDim.x * blockIdx.x + threadIdx.x; - if (atom_i < constrain_pair_numbers) { - constrain_pair[atom_i].atom_i_serial = atom_i_serials[atom_i]; - constrain_pair[atom_i].atom_j_serial = atom_j_serials[atom_i]; - constrain_pair[atom_i].constant_r = constant_rs[atom_i]; - constrain_pair[atom_i].constrain_k = constrain_ks[atom_i]; - } -} - -__global__ static void Copy_Crd_To_New_Crd_Start(const int atom_numbers, const UNSIGNED_INT_VECTOR *crd, - UINT_VECTOR_LJ_TYPE *new_crd, const int *LJ_type, - const float *charge) { - int atom_i = blockDim.x * blockIdx.x + threadIdx.x; - if (atom_i < atom_numbers) { - new_crd[atom_i].uint_x = crd[atom_i].uint_x; - new_crd[atom_i].uint_y = crd[atom_i].uint_y; - new_crd[atom_i].uint_z = crd[atom_i].uint_z; - new_crd[atom_i].LJ_type = LJ_type[atom_i]; - new_crd[atom_i].charge = charge[atom_i]; - } -} - -// void Constrain_Force_Cycle_With_Virial(int atom_numbers, int constrain_pair_numbers, const unsigned int *uint_crd_f, -// const float *scaler_f, float *constrain_pair_f, const float *pair_dr_f, -// const int *atom_i_serials, const int *atom_j_serials, const float -// *constant_rs, const float *constrain_ks, float *test_frc_f, float -// *d_atom_virial, cudaStream_t stream); - -__global__ static void Rand_Normal(const int float4_numbers, curandStatePhilox4_32_10_t *rand_state, - float4 *rand_float4) { - int i = blockDim.x * blockIdx.x + threadIdx.x; - if (i < float4_numbers) { - rand_float4[i] = curand_normal4(&rand_state[i]); - } -} - -__global__ static void Setup_Rand_Normal_Kernel(const int float4_numbers, curandStatePhilox4_32_10_t *rand_state, - const int seed) { - int id = threadIdx.x + blockIdx.x * blockDim.x; - /* Each thread gets same seed, a different sequence - number, no offset */ - if (id < float4_numbers) { - curand_init(seed, id, 0, &rand_state[id]); - } -} - -__global__ static void Reset_List(const int element_numbers, int *list, const int replace_element) { - int i = blockDim.x * blockIdx.x + threadIdx.x; - if (i < element_numbers) { - list[i] = replace_element; - } -} - -__global__ static void Reset_List(const int element_numbers, float *list, const float replace_element) { - int i = blockDim.x * blockIdx.x + threadIdx.x; - if (i < element_numbers) { - list[i] = replace_element; - } -} - -__global__ static void Sum_Of_List(const int element_numbers, const float *list, float *sum) { - if (threadIdx.x == 0) { - sum[0] = 0.; - } - __syncthreads(); - float lin = 0.; - for (int i = threadIdx.x; i < element_numbers; i = i + blockDim.x) { - lin = lin + list[i]; - } - atomicAdd(sum, lin); -} - -__global__ static void Scale_List(const int element_numbers, float *list, float scaler) { - int i = blockDim.x * blockIdx.x + threadIdx.x; - if (i < element_numbers) { - list[i] = list[i] * scaler; - } -} - -__global__ static void Copy_List(const int element_numbers, const int *origin_list, int *list) { - int i = blockDim.x * blockIdx.x + threadIdx.x; - if (i < element_numbers) { - list[i] = origin_list[i]; - } -} -__global__ static void Copy_List(const int element_numbers, const float *origin_list, float *list) { - int i = blockDim.x * blockIdx.x + threadIdx.x; - if (i < element_numbers) { - list[i] = origin_list[i]; - } -} - -__global__ static void Print(const size_t size, const float *input_x) { - for (size_t i = 0; i < size; i++) { - printf("%f\n", input_x[i]); - } - return; -} -__global__ static void Print(const size_t size, const int *input_x) { - for (size_t i = 0; i < size; i++) { - printf("%d\n", input_x[i]); - } - return; -} - -__device__ static VECTOR Make_Vector_Not_Exceed_Value(VECTOR vector, const float value) { - return fminf(1.0, value * rnorm3df(vector.x, vector.y, vector.z)) * vector; -} - -#endif // MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_COMMON_SPONGE_H_ diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/crdmcmap/cal_no_wrap_crd_impl.cu b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/crdmcmap/cal_no_wrap_crd_impl.cu deleted file mode 100644 index eef09d37a45..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/crdmcmap/cal_no_wrap_crd_impl.cu +++ /dev/null @@ -1,46 +0,0 @@ - -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "plugin/device/gpu/kernel/cuda_impl/sponge/crdmcmap/cal_no_wrap_crd_impl.cuh" -#include "plugin/device/gpu/kernel/cuda_impl/sponge/common_sponge.cuh" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/util.cuh" - -__global__ void Calculate_No_Wrap_Crd(int atom_numbers, INT_VECTOR *box_map_times, VECTOR *box, VECTOR *crd, - VECTOR *nowrap_crd) { - for (int i = threadIdx.x; i < atom_numbers; i = i + blockDim.x) { - nowrap_crd[i].x = static_cast(box_map_times[i].int_x) * box[0].x + crd[i].x; - nowrap_crd[i].y = static_cast(box_map_times[i].int_y) * box[0].y + crd[i].y; - nowrap_crd[i].z = static_cast(box_map_times[i].int_z) * box[0].z + crd[i].z; - } -} - -void calculatenowrapcrd(int atom_numbers, int *box_map_times_f, float *box_f, float *crd_f, float *nowrap_crd_f, - cudaStream_t stream) { - Reset_List<<(3. * atom_numbers) / 128), 128, 0, stream>>>(3 * atom_numbers, nowrap_crd_f, - 0.); - INT_VECTOR *box_map_times = reinterpret_cast(box_map_times_f); - VECTOR *box = reinterpret_cast(box_f); - VECTOR *crd = reinterpret_cast(crd_f); - VECTOR *nowrap_crd = reinterpret_cast(nowrap_crd_f); - - Calculate_No_Wrap_Crd<<<20, 256, 0, stream>>>(atom_numbers, box_map_times, box, crd, - nowrap_crd); - return; -} - -void calculatenowrapcrd(int atom_numbers, int *box_map_times_f, float *box_f, float *crd_f, float *nowrap_crd_f, - cudaStream_t stream); diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/crdmcmap/cal_no_wrap_crd_impl.cuh b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/crdmcmap/cal_no_wrap_crd_impl.cuh deleted file mode 100644 index 3e925124f70..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/crdmcmap/cal_no_wrap_crd_impl.cuh +++ /dev/null @@ -1,28 +0,0 @@ - -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_CRDMCMAP_CAL_NO_WRAP_CRD_IMPL_H_ -#define MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_CRDMCMAP_CAL_NO_WRAP_CRD_IMPL_H_ - -#include -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_device_info.h" - -CUDA_LIB_EXPORT void calculatenowrapcrd(int atom_numbers, int *box_map_times_f, float *box_f, float *crd_f, - float *nowrap_crd_f, cudaStream_t stream); - -#endif // MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_CRDMCMAP_CAL_NO_WRAP_CRD_IMPL_H_ diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/crdmcmap/refresh_boxmaptimes_impl.cu b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/crdmcmap/refresh_boxmaptimes_impl.cu deleted file mode 100644 index c4adee2a760..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/crdmcmap/refresh_boxmaptimes_impl.cu +++ /dev/null @@ -1,47 +0,0 @@ - -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "plugin/device/gpu/kernel/cuda_impl/sponge/crdmcmap/refresh_boxmaptimes_impl.cuh" -#include "plugin/device/gpu/kernel/cuda_impl/sponge/common_sponge.cuh" - -__global__ void Refresh_BoxMapTimes_CUDA(int atom_numbers, VECTOR *box_length_inverse, VECTOR *crd, - INT_VECTOR *box_map_times, VECTOR *old_crd) { - VECTOR crd_i, old_crd_i; - for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < atom_numbers; i += gridDim.x * blockDim.x) { - crd_i = crd[i]; - old_crd_i = old_crd[i]; - box_map_times[i].int_x += floor((old_crd_i.x - crd_i.x) * box_length_inverse[0].x + 0.5); - box_map_times[i].int_y += floor((old_crd_i.y - crd_i.y) * box_length_inverse[0].y + 0.5); - box_map_times[i].int_z += floor((old_crd_i.z - crd_i.z) * box_length_inverse[0].z + 0.5); - old_crd[i] = crd_i; - } -} - -void refresh_boxmaptimes(int atom_numbers, float *box_length_inverse_f, float *crd_f, float *old_crd_f, - int *box_map_times_f, cudaStream_t stream) { - INT_VECTOR *box_map_times = reinterpret_cast(box_map_times_f); - VECTOR *box_length_inverse = reinterpret_cast(box_length_inverse_f); - VECTOR *crd = reinterpret_cast(crd_f); - VECTOR *old_crd = reinterpret_cast(old_crd_f); - - Refresh_BoxMapTimes_CUDA<<<1, 256, 0, stream>>>(atom_numbers, box_length_inverse, crd, - box_map_times, old_crd); - return; -} - -void refresh_boxmaptimes(int atom_numbers, float *box_length_inverse, float *crd_f, float *old_crd_f, - int *box_map_times_f, cudaStream_t stream); diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/crdmcmap/refresh_boxmaptimes_impl.cuh b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/crdmcmap/refresh_boxmaptimes_impl.cuh deleted file mode 100644 index cbd8402bf35..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/crdmcmap/refresh_boxmaptimes_impl.cuh +++ /dev/null @@ -1,28 +0,0 @@ - -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_CRDMCMAP_REFRESH_BOXMAPTIMES_IMPL_H_ -#define MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_CRDMCMAP_REFRESH_BOXMAPTIMES_IMPL_H_ - -#include -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_device_info.h" - -CUDA_LIB_EXPORT void refresh_boxmaptimes(int atom_numbers, float *box_length_inverse, float *crd_f, float *old_crd_f, - int *box_map_times_f, cudaStream_t stream); - -#endif // MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_CRDMCMAP_REFRESH_BOXMAPTIMES_IMPL_H_ diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/dihedral/dihedral_atom_energy_impl.cu b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/dihedral/dihedral_atom_energy_impl.cu deleted file mode 100644 index 4803c11f25f..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/dihedral/dihedral_atom_energy_impl.cu +++ /dev/null @@ -1,84 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "plugin/device/gpu/kernel/cuda_impl/sponge/dihedral/dihedral_atom_energy_impl.cuh" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/util.cuh" -#include "plugin/device/gpu/kernel/cuda_impl/sponge/common_sponge.cuh" - -__global__ void DihedralAtomEnergyKernel(int dihedral_numbers, const UNSIGNED_INT_VECTOR *uint_crd, - const VECTOR *scaler, const int *atom_a, const int *atom_b, const int *atom_c, - const int *atom_d, const int *ipn, const float *pk, const float *gamc, - const float *gams, const float *pn, float *ene) { - int dihedral_i = blockDim.x * blockIdx.x + threadIdx.x; - if (dihedral_i < dihedral_numbers) { - int atom_i = atom_a[dihedral_i]; - int atom_j = atom_b[dihedral_i]; - int atom_k = atom_c[dihedral_i]; - int atom_l = atom_d[dihedral_i]; - - float temp_pk = pk[dihedral_i]; - float temp_pn = pn[dihedral_i]; - float temp_gamc = gamc[dihedral_i]; - float temp_gams = gams[dihedral_i]; - - VECTOR drij = Get_Periodic_Displacement(uint_crd[atom_i], uint_crd[atom_j], scaler[0]); - VECTOR drkj = Get_Periodic_Displacement(uint_crd[atom_k], uint_crd[atom_j], scaler[0]); - VECTOR drkl = Get_Periodic_Displacement(uint_crd[atom_k], uint_crd[atom_l], scaler[0]); - - VECTOR r1 = drij ^ drkj; - VECTOR r2 = drkl ^ drkj; - - float r1_1 = rnorm3df(r1.x, r1.y, r1.z); - float r2_1 = rnorm3df(r2.x, r2.y, r2.z); - float r1_1_r2_1 = r1_1 * r2_1; - - float phi = r1 * r2 * r1_1_r2_1; - phi = fmaxf(-0.999999, fminf(phi, 0.999999)); - phi = acosf(phi); - - float sign = (r2 ^ r1) * drkj; - copysignf(phi, sign); - - phi = CONSTANT_Pi - phi; - - float nphi = temp_pn * phi; - - float cos_nphi = cosf(nphi); - float sin_nphi = sinf(nphi); - - atomicAdd(&ene[atom_i], (temp_pk + cos_nphi * temp_gamc + sin_nphi * temp_gams)); - } -} - -void DihedralAtomEnergy(int dihedral_numbers, int atom_numbers, const int *uint_crd_f, const float *scaler_f, - const int *atom_a, const int *atom_b, const int *atom_c, const int *atom_d, const int *ipn, - const float *pk, const float *gamc, const float *gams, const float *pn, float *ene, - cudaStream_t stream) { - Reset_List<<(atom_numbers) / 128), 128, 0, stream>>>(atom_numbers, ene, 0.); - size_t thread_per_block = 128; - size_t block_per_grid = ceilf(static_cast(dihedral_numbers) / 128); - UNSIGNED_INT_VECTOR *uint_crd = - const_cast(reinterpret_cast(uint_crd_f)); - VECTOR *scaler = const_cast(reinterpret_cast(scaler_f)); - - DihedralAtomEnergyKernel<<>>( - dihedral_numbers, uint_crd, scaler, atom_a, atom_b, atom_c, atom_d, ipn, pk, gamc, gams, pn, ene); - return; -} -void DihedralAtomEnergy(int dihedral_numbers, int atom_numbers, const int *uint_crd_f, const float *scaler_f, - const int *atom_a, const int *atom_b, const int *atom_c, const int *atom_d, const int *ipn, - const float *pk, const float *gamc, const float *gams, const float *pn, float *ene, - cudaStream_t stream); diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/dihedral/dihedral_atom_energy_impl.cuh b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/dihedral/dihedral_atom_energy_impl.cuh deleted file mode 100644 index adc6b16ad9e..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/dihedral/dihedral_atom_energy_impl.cuh +++ /dev/null @@ -1,28 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_DIHEDRAL_DIHEDRAL_ATOM_ENERGY_IMPL_H_ -#define MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_DIHEDRAL_DIHEDRAL_ATOM_ENERGY_IMPL_H_ - -#include -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_device_info.h" - -CUDA_LIB_EXPORT void DihedralAtomEnergy(int dihedral_numbers, int atom_numbers, const int *uint_crd_f, - const float *scaler_f, const int *atom_a, const int *atom_b, const int *atom_c, - const int *atom_d, const int *ipn, const float *pk, const float *gamc, - const float *gams, const float *pn, float *ene, cudaStream_t stream); -#endif diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/dihedral/dihedral_energy_impl.cu b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/dihedral/dihedral_energy_impl.cu deleted file mode 100644 index ea58fa92c31..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/dihedral/dihedral_energy_impl.cu +++ /dev/null @@ -1,81 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "plugin/device/gpu/kernel/cuda_impl/sponge/dihedral/dihedral_energy_impl.cuh" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/util.cuh" -#include "plugin/device/gpu/kernel/cuda_impl/sponge/common_sponge.cuh" - -__global__ void DihedralEnergyKernel(int dihedral_numbers, const UNSIGNED_INT_VECTOR *uint_crd, const VECTOR *scaler, - const int *atom_a, const int *atom_b, const int *atom_c, const int *atom_d, - const int *ipn, const float *pk, const float *gamc, const float *gams, - const float *pn, float *ene) { - int dihedral_i = blockDim.x * blockIdx.x + threadIdx.x; - if (dihedral_i < dihedral_numbers) { - int atom_i = atom_a[dihedral_i]; - int atom_j = atom_b[dihedral_i]; - int atom_k = atom_c[dihedral_i]; - int atom_l = atom_d[dihedral_i]; - - float temp_pk = pk[dihedral_i]; - float temp_pn = pn[dihedral_i]; - float temp_gamc = gamc[dihedral_i]; - float temp_gams = gams[dihedral_i]; - - VECTOR drij = Get_Periodic_Displacement(uint_crd[atom_i], uint_crd[atom_j], scaler[0]); - VECTOR drkj = Get_Periodic_Displacement(uint_crd[atom_k], uint_crd[atom_j], scaler[0]); - VECTOR drkl = Get_Periodic_Displacement(uint_crd[atom_k], uint_crd[atom_l], scaler[0]); - - VECTOR r1 = drij ^ drkj; - VECTOR r2 = drkl ^ drkj; - - float r1_1 = rnorm3df(r1.x, r1.y, r1.z); - float r2_1 = rnorm3df(r2.x, r2.y, r2.z); - float r1_1_r2_1 = r1_1 * r2_1; - - float phi = r1 * r2 * r1_1_r2_1; - phi = fmaxf(-0.999999, fminf(phi, 0.999999)); - phi = acosf(phi); - - float sign = (r2 ^ r1) * drkj; - copysignf(phi, sign); - - phi = CONSTANT_Pi - phi; - - float nphi = temp_pn * phi; - - float cos_nphi = cosf(nphi); - float sin_nphi = sinf(nphi); - - ene[dihedral_i] = (temp_pk + cos_nphi * temp_gamc + sin_nphi * temp_gams); - } -} - -void DihedralEnergy(int dihedral_numbers, const int *uint_crd_f, const float *scaler_f, const int *atom_a, - const int *atom_b, const int *atom_c, const int *atom_d, const int *ipn, const float *pk, - const float *gamc, const float *gams, const float *pn, float *ene, cudaStream_t stream) { - size_t thread_per_block = 128; - size_t block_per_grid = ceilf(static_cast(dihedral_numbers) / 128); - UNSIGNED_INT_VECTOR *uint_crd = - const_cast(reinterpret_cast(uint_crd_f)); - VECTOR *scaler = const_cast(reinterpret_cast(scaler_f)); - - DihedralEnergyKernel<<>>( - dihedral_numbers, uint_crd, scaler, atom_a, atom_b, atom_c, atom_d, ipn, pk, gamc, gams, pn, ene); - return; -} -void DihedralEnergy(int dihedral_numbers, const int *uint_crd_f, const float *scaler_f, const int *atom_a, - const int *atom_b, const int *atom_c, const int *atom_d, const int *ipn, const float *pk, - const float *gamc, const float *gams, const float *pn, float *ene, cudaStream_t stream); diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/dihedral/dihedral_energy_impl.cuh b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/dihedral/dihedral_energy_impl.cuh deleted file mode 100644 index f8fcb5dfe72..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/dihedral/dihedral_energy_impl.cuh +++ /dev/null @@ -1,28 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_DIHEDRAL_DIHEDRAL_ENERGY_IMPL_H_ -#define MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_DIHEDRAL_DIHEDRAL_ENERGY_IMPL_H_ - -#include -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_device_info.h" - -CUDA_LIB_EXPORT void DihedralEnergy(int dihedral_numbers, const int *uint_crd_f, const float *scaler_f, - const int *atom_a, const int *atom_b, const int *atom_c, const int *atom_d, - const int *ipn, const float *pk, const float *gamc, const float *gams, - const float *pn, float *ene, cudaStream_t stream); -#endif diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/dihedral/dihedral_force_impl.cu b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/dihedral/dihedral_force_impl.cu deleted file mode 100644 index e90780cf1ec..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/dihedral/dihedral_force_impl.cu +++ /dev/null @@ -1,121 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "plugin/device/gpu/kernel/cuda_impl/sponge/dihedral/dihedral_force_impl.cuh" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/util.cuh" -#include "plugin/device/gpu/kernel/cuda_impl/sponge/common_sponge.cuh" - -__global__ void DihedralForceKernel(int dihedral_numbers, const UNSIGNED_INT_VECTOR *uint_crd, const VECTOR *scaler, - const int *atom_a, const int *atom_b, const int *atom_c, const int *atom_d, - const int *ipn, const float *pk, const float *gamc, const float *gams, - const float *pn, VECTOR *frc) { - int dihedral_i = blockDim.x * blockIdx.x + threadIdx.x; - if (dihedral_i < dihedral_numbers) { - int atom_i = atom_a[dihedral_i]; - int atom_j = atom_b[dihedral_i]; - int atom_k = atom_c[dihedral_i]; - int atom_l = atom_d[dihedral_i]; - - int temp_ipn = ipn[dihedral_i]; - - float temp_pn = pn[dihedral_i]; - float temp_gamc = gamc[dihedral_i]; - float temp_gams = gams[dihedral_i]; - - VECTOR drij = Get_Periodic_Displacement(uint_crd[atom_i], uint_crd[atom_j], scaler[0]); - VECTOR drkj = Get_Periodic_Displacement(uint_crd[atom_k], uint_crd[atom_j], scaler[0]); - VECTOR drkl = Get_Periodic_Displacement(uint_crd[atom_k], uint_crd[atom_l], scaler[0]); - - VECTOR r1 = drij ^ drkj; - VECTOR r2 = drkl ^ drkj; - - float r1_1 = rnorm3df(r1.x, r1.y, r1.z); - float r2_1 = rnorm3df(r2.x, r2.y, r2.z); - float r1_2 = r1_1 * r1_1; - float r2_2 = r2_1 * r2_1; - float r1_1_r2_1 = r1_1 * r2_1; - - float phi = r1 * r2 * r1_1_r2_1; - phi = fmaxf(-0.999999, fminf(phi, 0.999999)); - phi = acosf(phi); - - float sign = (r2 ^ r1) * drkj; - copysignf(phi, sign); - - phi = CONSTANT_Pi - phi; - - float nphi = temp_pn * phi; - - float cos_phi = cosf(phi); - float sin_phi = sinf(phi); - float cos_nphi = cosf(nphi); - float sin_nphi = sinf(nphi); - - float dE_dphi; - if (fabsf(sin_phi) < 1e-6) { - temp_ipn *= temp_ipn % 2; // (((temp_ipn - 1) & 1) ^ 1) - dE_dphi = temp_gamc * (temp_pn - temp_ipn + temp_ipn * cos_phi); - } else { - dE_dphi = temp_pn * (temp_gamc * sin_nphi - temp_gams * cos_nphi) / sin_phi; - } - - VECTOR dphi_dr1 = r1_1_r2_1 * r2 + cos_phi * r1_2 * r1; - VECTOR dphi_dr2 = r1_1_r2_1 * r1 + cos_phi * r2_2 * r2; - - VECTOR dE_dri = dE_dphi * drkj ^ dphi_dr1; - VECTOR dE_drl = dE_dphi * dphi_dr2 ^ drkj; - VECTOR dE_drj_part = dE_dphi * ((drij ^ dphi_dr1) + (drkl ^ dphi_dr2)); - - VECTOR fi = dE_dri; - VECTOR fj = dE_drj_part - dE_dri; - VECTOR fk = -dE_drl - dE_drj_part; - VECTOR fl = dE_drl; - - atomicAdd(&frc[atom_i].x, fi.x); - atomicAdd(&frc[atom_i].y, fi.y); - atomicAdd(&frc[atom_i].z, fi.z); - atomicAdd(&frc[atom_j].x, fj.x); - atomicAdd(&frc[atom_j].y, fj.y); - atomicAdd(&frc[atom_j].z, fj.z); - atomicAdd(&frc[atom_k].x, fk.x); - atomicAdd(&frc[atom_k].y, fk.y); - atomicAdd(&frc[atom_k].z, fk.z); - atomicAdd(&frc[atom_l].x, fl.x); - atomicAdd(&frc[atom_l].y, fl.y); - atomicAdd(&frc[atom_l].z, fl.z); - } -} - -void DihedralForce(int dihedral_numbers, int atom_numbers, const int *uint_crd_f, const float *scaler_f, - const int *atom_a, const int *atom_b, const int *atom_c, const int *atom_d, const int *ipn, - const float *pk, const float *gamc, const float *gams, const float *pn, float *frc_f, - cudaStream_t stream) { - Reset_List<<(3. * atom_numbers) / 128), 128, 0, stream>>>(3 * atom_numbers, frc_f, 0.); - size_t thread_per_block = 128; - size_t block_per_grid = ceilf(static_cast(dihedral_numbers) / 128); - UNSIGNED_INT_VECTOR *uint_crd = - const_cast(reinterpret_cast(uint_crd_f)); - VECTOR *frc = const_cast(reinterpret_cast(frc_f)); - VECTOR *scaler = const_cast(reinterpret_cast(scaler_f)); - - DihedralForceKernel<<>>( - dihedral_numbers, uint_crd, scaler, atom_a, atom_b, atom_c, atom_d, ipn, pk, gamc, gams, pn, frc); - return; -} -void DihedralForce(int dihedral_numbers, int atom_numbers, const int *uint_crd_f, const float *scaler_f, - const int *atom_a, const int *atom_b, const int *atom_c, const int *atom_d, const int *ipn, - const float *pk, const float *gamc, const float *gams, const float *pn, float *frc_f, - cudaStream_t stream); diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/dihedral/dihedral_force_impl.cuh b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/dihedral/dihedral_force_impl.cuh deleted file mode 100644 index 33038fac47a..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/dihedral/dihedral_force_impl.cuh +++ /dev/null @@ -1,28 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_DIHEDRAL_DIHEDRAL_FORCE_IMPL_H_ -#define MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_DIHEDRAL_DIHEDRAL_FORCE_IMPL_H_ - -#include -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_device_info.h" - -CUDA_LIB_EXPORT void DihedralForce(int dihedral_numbers, int atom_numbers, const int *uint_crd_f, const float *scaler_f, - const int *atom_a, const int *atom_b, const int *atom_c, const int *atom_d, - const int *ipn, const float *pk, const float *gamc, const float *gams, - const float *pn, float *frc_f, cudaStream_t stream); -#endif diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/dihedral/dihedral_force_with_atom_energy_impl.cu b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/dihedral/dihedral_force_with_atom_energy_impl.cu deleted file mode 100644 index 46874a0c78a..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/dihedral/dihedral_force_with_atom_energy_impl.cu +++ /dev/null @@ -1,125 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "plugin/device/gpu/kernel/cuda_impl/sponge/dihedral/dihedral_force_with_atom_energy_impl.cuh" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/util.cuh" -#include "plugin/device/gpu/kernel/cuda_impl/sponge/common_sponge.cuh" - -__global__ void DihedralForceWithAtomEnergyKernel(int dihedral_numbers, const UNSIGNED_INT_VECTOR *uint_crd, - const VECTOR *scaler, const int *atom_a, const int *atom_b, - const int *atom_c, const int *atom_d, const int *ipn, const float *pk, - const float *gamc, const float *gams, const float *pn, VECTOR *frc, - float *ene) { - int dihedral_i = blockDim.x * blockIdx.x + threadIdx.x; - if (dihedral_i < dihedral_numbers) { - int atom_i = atom_a[dihedral_i]; - int atom_j = atom_b[dihedral_i]; - int atom_k = atom_c[dihedral_i]; - int atom_l = atom_d[dihedral_i]; - - int temp_ipn = ipn[dihedral_i]; - - float temp_pk = pk[dihedral_i]; - float temp_pn = pn[dihedral_i]; - float temp_gamc = gamc[dihedral_i]; - float temp_gams = gams[dihedral_i]; - - VECTOR drij = Get_Periodic_Displacement(uint_crd[atom_i], uint_crd[atom_j], scaler[0]); - VECTOR drkj = Get_Periodic_Displacement(uint_crd[atom_k], uint_crd[atom_j], scaler[0]); - VECTOR drkl = Get_Periodic_Displacement(uint_crd[atom_k], uint_crd[atom_l], scaler[0]); - - VECTOR r1 = drij ^ drkj; - VECTOR r2 = drkl ^ drkj; - - float r1_1 = rnorm3df(r1.x, r1.y, r1.z); - float r2_1 = rnorm3df(r2.x, r2.y, r2.z); - float r1_2 = r1_1 * r1_1; - float r2_2 = r2_1 * r2_1; - float r1_1_r2_1 = r1_1 * r2_1; - - float phi = r1 * r2 * r1_1_r2_1; - phi = fmaxf(-0.999999, fminf(phi, 0.999999)); - phi = acosf(phi); - - float sign = (r2 ^ r1) * drkj; - copysignf(phi, sign); - - phi = CONSTANT_Pi - phi; - - float nphi = temp_pn * phi; - - float cos_phi = cosf(phi); - float sin_phi = sinf(phi); - float cos_nphi = cosf(nphi); - float sin_nphi = sinf(nphi); - - float dE_dphi; - if (fabsf(sin_phi) < 1e-6) { - temp_ipn *= (((temp_ipn - 1) & 1) ^ 1); - dE_dphi = temp_gamc * (temp_pn - temp_ipn + temp_ipn * cos_phi); - } else { - dE_dphi = temp_pn * (temp_gamc * sin_nphi - temp_gams * cos_nphi) / sin_phi; - } - - VECTOR dphi_dr1 = r1_1_r2_1 * r2 + cos_phi * r1_2 * r1; - VECTOR dphi_dr2 = r1_1_r2_1 * r1 + cos_phi * r2_2 * r2; - - VECTOR dE_dri = dE_dphi * drkj ^ dphi_dr1; - VECTOR dE_drl = dE_dphi * dphi_dr2 ^ drkj; - VECTOR dE_drj_part = dE_dphi * ((drij ^ dphi_dr1) + (drkl ^ dphi_dr2)); - - VECTOR fi = dE_dri; - VECTOR fj = dE_drj_part - dE_dri; - VECTOR fk = -dE_drl - dE_drj_part; - VECTOR fl = dE_drl; - - atomicAdd(&frc[atom_i].x, fi.x); - atomicAdd(&frc[atom_i].y, fi.y); - atomicAdd(&frc[atom_i].z, fi.z); - atomicAdd(&frc[atom_j].x, fj.x); - atomicAdd(&frc[atom_j].y, fj.y); - atomicAdd(&frc[atom_j].z, fj.z); - atomicAdd(&frc[atom_k].x, fk.x); - atomicAdd(&frc[atom_k].y, fk.y); - atomicAdd(&frc[atom_k].z, fk.z); - atomicAdd(&frc[atom_l].x, fl.x); - atomicAdd(&frc[atom_l].y, fl.y); - atomicAdd(&frc[atom_l].z, fl.z); - - atomicAdd(&ene[atom_i], (temp_pk + cos_nphi * temp_gamc + sin_nphi * temp_gams)); - } -} - -void DihedralForceWithAtomEnergy(int dihedral_numbers, int atom_numbers, const int *uint_crd_f, const float *scaler_f, - const int *atom_a, const int *atom_b, const int *atom_c, const int *atom_d, - const int *ipn, const float *pk, const float *gamc, const float *gams, const float *pn, - float *frc_f, float *ene, cudaStream_t stream) { - Reset_List<<(3. * atom_numbers) / 128), 128, 0, stream>>>(3 * atom_numbers, frc_f, 0.); - size_t thread_per_block = 128; - size_t block_per_grid = ceilf(static_cast(dihedral_numbers) / 128); - UNSIGNED_INT_VECTOR *uint_crd = - const_cast(reinterpret_cast(uint_crd_f)); - VECTOR *frc = const_cast(reinterpret_cast(frc_f)); - VECTOR *scaler = const_cast(reinterpret_cast(scaler_f)); - - DihedralForceWithAtomEnergyKernel<<>>( - dihedral_numbers, uint_crd, scaler, atom_a, atom_b, atom_c, atom_d, ipn, pk, gamc, gams, pn, frc, ene); - return; -} -void DihedralForceWithAtomEnergy(int dihedral_numbers, int atom_numbers, const int *uint_crd_f, const float *scaler_f, - const int *atom_a, const int *atom_b, const int *atom_c, const int *atom_d, - const int *ipn, const float *pk, const float *gamc, const float *gams, const float *pn, - float *frc_f, float *ene, cudaStream_t stream); diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/dihedral/dihedral_force_with_atom_energy_impl.cuh b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/dihedral/dihedral_force_with_atom_energy_impl.cuh deleted file mode 100644 index 82dc3e26bb6..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/dihedral/dihedral_force_with_atom_energy_impl.cuh +++ /dev/null @@ -1,29 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_DIHEDRAL_DIHEDRAL_FORCE_WITH_ATOM_ENERGY_IMPL_H_ -#define MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_DIHEDRAL_DIHEDRAL_FORCE_WITH_ATOM_ENERGY_IMPL_H_ - -#include -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_device_info.h" - -CUDA_LIB_EXPORT void DihedralForceWithAtomEnergy(int dihedral_numbers, int atom_numbers, const int *uint_crd_f, - const float *scaler_f, const int *atom_a, const int *atom_b, - const int *atom_c, const int *atom_d, const int *ipn, const float *pk, - const float *gamc, const float *gams, const float *pn, float *frc_f, - float *ene, cudaStream_t stream); -#endif diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/lj/lj_direct_cf_force_with_lj_virial_direct_cf_energy_impl.cu b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/lj/lj_direct_cf_force_with_lj_virial_direct_cf_energy_impl.cu deleted file mode 100644 index 4a77a61bb11..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/lj/lj_direct_cf_force_with_lj_virial_direct_cf_energy_impl.cu +++ /dev/null @@ -1,144 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - /** - * Note: - * LJForce. This is an experimental interface that is subject to change and/or deletion. - */ - -#include "plugin/device/gpu/kernel/cuda_impl/sponge/lj/lj_direct_cf_force_with_lj_virial_direct_cf_energy_impl.cuh" -#include "plugin/device/gpu/kernel/cuda_impl/sponge/common_sponge.cuh" - -__global__ void LJ_Direct_CF_Force_With_LJ_Virial_Direct_CF_Energy_CUDA( - const int atom_numbers, const NEIGHBOR_LIST *nl, const UINT_VECTOR_LJ_TYPE *uint_crd, const VECTOR *boxlength, - const float *LJ_type_A, const float *LJ_type_B, const float cutoff, VECTOR *frc, const float pme_beta, - const float sqrt_pi, float *atom_lj_virial, float *atom_direct_cf_energy) { - int atom_i = blockDim.x * blockIdx.x + threadIdx.x; - if (atom_i < atom_numbers) { - NEIGHBOR_LIST nl_i = nl[atom_i]; - int N = nl_i.atom_numbers; - int atom_j; - int int_x; - int int_y; - int int_z; - UINT_VECTOR_LJ_TYPE r1 = uint_crd[atom_i], r2; - VECTOR dr; - float dr_2; - float dr_4; - float dr_8; - float dr_6; - float frc_abs = 0.; - VECTOR frc_lin; - VECTOR frc_record = {0., 0., 0.}; - float charge_i = r1.charge; - float charge_j; - float dr_abs; - float dr_1; - float beta_dr; - float frc_cf_abs; - float virial_lin = 0.; - float energy_lin = 0.; - - int x, y; - int atom_pair_LJ_type; - for (int j = threadIdx.y; j < N; j = j + blockDim.y) { - atom_j = nl_i.atom_serial[j]; - r2 = uint_crd[atom_j]; - charge_j = r2.charge; - - int_x = r2.uint_x - r1.uint_x; - int_y = r2.uint_y - r1.uint_y; - int_z = r2.uint_z - r1.uint_z; - dr.x = boxlength[0].x * int_x; - dr.y = boxlength[0].y * int_y; - dr.z = boxlength[0].z * int_z; - dr_abs = norm3df(dr.x, dr.y, dr.z); - if (dr_abs < cutoff) { - dr_1 = 1. / dr_abs; - dr_2 = dr_1 * dr_1; - dr_4 = dr_2 * dr_2; - dr_8 = dr_4 * dr_4; - dr_6 = dr_4 * dr_2; - - y = (r2.LJ_type - r1.LJ_type); - x = y >> 31; - y = (y ^ x) - x; - x = r2.LJ_type + r1.LJ_type; - r2.LJ_type = (x + y) >> 1; - x = (x - y) >> 1; - atom_pair_LJ_type = (r2.LJ_type * (r2.LJ_type + 1) >> 1) + x; - - frc_abs = (-LJ_type_A[atom_pair_LJ_type] * dr_6 + LJ_type_B[atom_pair_LJ_type]) * dr_8; - beta_dr = pme_beta * dr_abs; - frc_cf_abs = beta_dr * sqrt_pi * expf(-beta_dr * beta_dr) + erfcf(beta_dr); - frc_cf_abs = frc_cf_abs * dr_2 * dr_1; - frc_cf_abs = charge_i * charge_j * frc_cf_abs; - energy_lin = energy_lin + charge_i * charge_j * erfcf(beta_dr) * dr_1; - virial_lin = virial_lin - frc_abs * dr_abs * dr_abs; - frc_abs = frc_abs - frc_cf_abs; - frc_lin.x = frc_abs * dr.x; - frc_lin.y = frc_abs * dr.y; - frc_lin.z = frc_abs * dr.z; - frc_record.x = frc_record.x + frc_lin.x; - frc_record.y = frc_record.y + frc_lin.y; - frc_record.z = frc_record.z + frc_lin.z; - atomicAdd(&frc[atom_j].x, -frc_lin.x); - atomicAdd(&frc[atom_j].y, -frc_lin.y); - atomicAdd(&frc[atom_j].z, -frc_lin.z); - } - } - atomicAdd(&frc[atom_i].x, frc_record.x); - atomicAdd(&frc[atom_i].y, frc_record.y); - atomicAdd(&frc[atom_i].z, frc_record.z); - - atomicAdd(&atom_direct_cf_energy[atom_i], energy_lin); - atomicAdd(&atom_lj_virial[atom_i], virial_lin); - } -} - -void LJ_Direct_CF_Force_With_LJ_Virial_Direct_CF_Energy( - const int atom_numbers, const float cutoff, const float pme_beta, const unsigned int *uint_crd_f, const int *LJtype, - const float *charge, const float *scaler_f, float *uint_crd_with_LJ, int *nl_atom_numbers, int *nl_atom_serial, - int *nl, const float *d_LJ_A, const float *d_LJ_B, float *frc_f, float *atom_lj_virial, float *atom_energy, - int max_neighbor_numbers, cudaStream_t stream) { - Reset_List<<(3. * atom_numbers) / 128), 128, 0, stream>>>(3 * atom_numbers, frc_f, 0.); - Reset_List<<(atom_numbers) / 128), 128, 0, stream>>>(atom_numbers, atom_energy, 0.); - Reset_List<<(atom_numbers) / 128), 128, 0, stream>>>(atom_numbers, atom_lj_virial, 0.); - VECTOR *frc = reinterpret_cast(frc_f); - VECTOR *scaler = const_cast(reinterpret_cast(scaler_f)); - NEIGHBOR_LIST *nl_a = reinterpret_cast(nl); - construct_neighbor_list_kernel<<(atom_numbers) / 128), 128, 0, stream>>>( - atom_numbers, max_neighbor_numbers, nl_atom_numbers, nl_atom_serial, nl_a); - - UINT_VECTOR_LJ_TYPE *uint_crd_with_LJ_a = reinterpret_cast(uint_crd_with_LJ); - - UNSIGNED_INT_VECTOR *uint_crd = - const_cast(reinterpret_cast(uint_crd_f)); - - Copy_Crd_To_New_Crd_Start<<(atom_numbers) / 32), 32, 0, stream>>>( - atom_numbers, uint_crd, uint_crd_with_LJ_a, LJtype, charge); - - LJ_Direct_CF_Force_With_LJ_Virial_Direct_CF_Energy_CUDA<<(atom_numbers) / 8), thread_LJ, 0, - stream>>>( - atom_numbers, nl_a, uint_crd_with_LJ_a, scaler, d_LJ_A, d_LJ_B, cutoff, frc, pme_beta, TWO_DIVIDED_BY_SQRT_PI, - atom_lj_virial, atom_energy); - return; -} - -void LJ_Direct_CF_Force_With_LJ_Virial_Direct_CF_Energy( - const int atom_numbers, const float cutoff, const float pme_beta, const unsigned int *uint_crd_f, const int *LJtype, - const float *charge, const float *scaler_f, float *uint_crd_with_LJ, int *nl_atom_numbers, int *nl_atom_serial, - int *nl, const float *d_LJ_A, const float *d_LJ_B, float *frc_f, float *atom_lj_virial, float *atom_energy, - int max_neighbor_numbers, cudaStream_t stream); diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/lj/lj_direct_cf_force_with_lj_virial_direct_cf_energy_impl.cuh b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/lj/lj_direct_cf_force_with_lj_virial_direct_cf_energy_impl.cuh deleted file mode 100644 index 5ad9f2f4609..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/lj/lj_direct_cf_force_with_lj_virial_direct_cf_energy_impl.cuh +++ /dev/null @@ -1,34 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/** - * Note: - * LJForce. This is an experimental interface that is subject to change and/or deletion. - */ - -#ifndef MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_LJ_DIRECT_CF_FORCE_WITH_LJ_VIRIAL_DIRECT_CF_ENERGY_IMPL_H_ -#define MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_LJ_DIRECT_CF_FORCE_WITH_LJ_VIRIAL_DIRECT_CF_ENERGY_IMPL_H_ - -#include -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_device_info.h" - -CUDA_LIB_EXPORT void LJ_Direct_CF_Force_With_LJ_Virial_Direct_CF_Energy( - const int atom_numbers, const float cutoff, const float pme_beta, const unsigned int *uint_crd_f, const int *LJtype, - const float *charge, const float *scaler_f, float *uint_crd_with_LJ, int *nl_atom_numbers, int *nl_atom_serial, - int *nl, const float *d_LJ_A, const float *d_LJ_B, float *frc_f, float *atom_lj_virial, float *atom_energy, - int max_neighbor_numbers, cudaStream_t stream); - -#endif diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/lj/lj_energy_impl.cu b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/lj/lj_energy_impl.cu deleted file mode 100644 index 5b87d36ea75..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/lj/lj_energy_impl.cu +++ /dev/null @@ -1,102 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "plugin/device/gpu/kernel/cuda_impl/sponge/lj/lj_energy_impl.cuh" -#include "plugin/device/gpu/kernel/cuda_impl/sponge/common_sponge.cuh" - -__global__ void LJ_Energy_CUDA(const int atom_numbers, const NEIGHBOR_LIST *nl, const UINT_VECTOR_LJ_TYPE *uint_crd, - const VECTOR *boxlength, const float *LJ_type_A, const float *LJ_type_B, - const float cutoff_square, float *lj_ene) { - int atom_i = blockDim.x * blockIdx.x + threadIdx.x; - if (atom_i < atom_numbers) { - NEIGHBOR_LIST nl_i = nl[atom_i]; - int N = nl_i.atom_numbers; - int atom_j; - int int_x; - int int_y; - int int_z; - UINT_VECTOR_LJ_TYPE r1 = uint_crd[atom_i], r2; - VECTOR dr; - float dr2; - float dr_2; - float dr_4; - float dr_6; - float ene_lin = 0.; - - int x, y; - int atom_pair_LJ_type; - for (int j = threadIdx.y; j < N; j = j + blockDim.y) { - atom_j = nl_i.atom_serial[j]; - r2 = uint_crd[atom_j]; - - int_x = r2.uint_x - r1.uint_x; - int_y = r2.uint_y - r1.uint_y; - int_z = r2.uint_z - r1.uint_z; - dr.x = boxlength[0].x * int_x; - dr.y = boxlength[0].y * int_y; - dr.z = boxlength[0].z * int_z; - - dr2 = dr.x * dr.x + dr.y * dr.y + dr.z * dr.z; - if (dr2 < cutoff_square) { - dr_2 = 1. / dr2; - dr_4 = dr_2 * dr_2; - dr_6 = dr_4 * dr_2; - - y = (r2.LJ_type - r1.LJ_type); - x = y >> 31; - y = (y ^ x) - x; - x = r2.LJ_type + r1.LJ_type; - r2.LJ_type = (x + y) >> 1; - x = (x - y) >> 1; - atom_pair_LJ_type = (r2.LJ_type * (r2.LJ_type + 1) >> 1) + x; - - dr_2 = (0.083333333 * LJ_type_A[atom_pair_LJ_type] * dr_6 - 0.166666666 * LJ_type_B[atom_pair_LJ_type]) * dr_6; - ene_lin = ene_lin + dr_2; - } - } - atomicAdd(&lj_ene[atom_i], ene_lin); - } -} - -void LJEnergy(const int atom_numbers, const float cutoff_square, const int *uint_crd_f, const int *LJtype, - const float *charge, const float *scaler_f, float *uint_crd_with_LJ, int *nl_atom_numbers, - int *nl_atom_serial, int *nl, const float *d_LJ_A, const float *d_LJ_B, float *d_LJ_energy_atom, - cudaStream_t stream) { - VECTOR *scaler = const_cast(reinterpret_cast(scaler_f)); - int max_neighbor_numbers = 800; - NEIGHBOR_LIST *nl_a = reinterpret_cast(nl); - construct_neighbor_list_kernel<<(atom_numbers) / 128), 128, 0, stream>>>( - atom_numbers, max_neighbor_numbers, nl_atom_numbers, nl_atom_serial, nl_a); - - UINT_VECTOR_LJ_TYPE *uint_crd_with_LJ_a = reinterpret_cast(uint_crd_with_LJ); - - UNSIGNED_INT_VECTOR *uint_crd = - const_cast(reinterpret_cast(uint_crd_f)); - - Copy_Crd_To_New_Crd_Start<<(atom_numbers) / 32), 32, 0, stream>>>( - atom_numbers, uint_crd, uint_crd_with_LJ_a, LJtype, charge); - - Reset_List<<(atom_numbers) / 32), 32, 0, stream>>>(atom_numbers, d_LJ_energy_atom, 0.); - - LJ_Energy_CUDA<<(atom_numbers) / 8), thread_LJ, 0, stream>>>( - atom_numbers, nl_a, uint_crd_with_LJ_a, scaler, d_LJ_A, d_LJ_B, cutoff_square, d_LJ_energy_atom); - - return; -} -void LJEnergy(const int atom_numbers, const float cutoff_square, const int *uint_crd_f, const int *LJtype, - const float *charge, const float *scaler_f, float *uint_crd_with_LJ, int *nl_atom_numbers, - int *nl_atom_serial, int *nl, const float *d_LJ_A, const float *d_LJ_B, float *d_LJ_energy_atom, - cudaStream_t stream); diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/lj/lj_energy_impl.cuh b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/lj/lj_energy_impl.cuh deleted file mode 100644 index a97a462aad8..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/lj/lj_energy_impl.cuh +++ /dev/null @@ -1,28 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_LJ_LJ_ENERGY_IMPL_H_ -#define MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_LJ_LJ_ENERGY_IMPL_H_ - -#include -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_device_info.h" - -CUDA_LIB_EXPORT void LJEnergy(const int atom_numbers, const float cutoff_square, const int *uint_crd_f, - const int *LJtype, const float *charge, const float *scaler_f, float *uint_crd_with_LJ, - int *nl_atom_numbers, int *nl_atom_serial, int *nl, const float *d_LJ_A, - const float *d_LJ_B, float *d_LJ_energy_atom, cudaStream_t stream); -#endif diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/lj/lj_force_impl.cu b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/lj/lj_force_impl.cu deleted file mode 100644 index 38cc77f2b3e..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/lj/lj_force_impl.cu +++ /dev/null @@ -1,117 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "plugin/device/gpu/kernel/cuda_impl/sponge/lj/lj_force_impl.cuh" -#include "plugin/device/gpu/kernel/cuda_impl/sponge/common_sponge.cuh" - -__global__ void LJ_Force_CUDA(const int atom_numbers, const NEIGHBOR_LIST *nl, const UINT_VECTOR_LJ_TYPE *uint_crd, - const VECTOR *boxlength, const float *LJ_type_A, const float *LJ_type_B, - const float cutoff_square, VECTOR *frc) { - int atom_i = blockDim.x * blockIdx.x + threadIdx.x; - if (atom_i < atom_numbers) { - NEIGHBOR_LIST nl_i = nl[atom_i]; - int N = nl_i.atom_numbers; - int B = ceilf(static_cast(N) / blockDim.y); - int atom_j; - int int_x; - int int_y; - int int_z; - UINT_VECTOR_LJ_TYPE r1 = uint_crd[atom_i], r2; - VECTOR dr; - float dr2; - float dr_2; - float dr_4; - float dr_8; - float dr_14; - float frc_abs = 0.; - VECTOR frc_lin; - VECTOR frc_record = {0., 0., 0.}; - - int x, y; - int atom_pair_LJ_type; - for (int j = threadIdx.y * B; j < (threadIdx.y + 1) * B; j = j + 1) { - if (j < N) { - atom_j = nl_i.atom_serial[j]; - r2 = uint_crd[atom_j]; - int_x = r2.uint_x - r1.uint_x; - int_y = r2.uint_y - r1.uint_y; - int_z = r2.uint_z - r1.uint_z; - dr.x = boxlength[0].x * int_x; - dr.y = boxlength[0].y * int_y; - dr.z = boxlength[0].z * int_z; - dr2 = dr.x * dr.x + dr.y * dr.y + dr.z * dr.z; - if (dr2 < cutoff_square) { - dr_2 = 1. / dr2; - dr_4 = dr_2 * dr_2; - dr_8 = dr_4 * dr_4; - dr_14 = dr_8 * dr_4 * dr_2; - - y = (r2.LJ_type - r1.LJ_type); - x = y >> 31; - y = (y ^ x) - x; - x = r2.LJ_type + r1.LJ_type; - r2.LJ_type = (x + y) >> 1; - x = (x - y) >> 1; - atom_pair_LJ_type = (r2.LJ_type * (r2.LJ_type + 1) >> 1) + x; - - frc_abs = -LJ_type_A[atom_pair_LJ_type] * dr_14 + LJ_type_B[atom_pair_LJ_type] * dr_8; - frc_lin.x = frc_abs * dr.x; - frc_lin.y = frc_abs * dr.y; - frc_lin.z = frc_abs * dr.z; - - frc_record.x = frc_record.x + frc_lin.x; - frc_record.y = frc_record.y + frc_lin.y; - frc_record.z = frc_record.z + frc_lin.z; - - atomicAdd(&frc[atom_j].x, -frc_lin.x); - atomicAdd(&frc[atom_j].y, -frc_lin.y); - atomicAdd(&frc[atom_j].z, -frc_lin.z); - } - } - } - atomicAdd(&frc[atom_i].x, frc_record.x); - atomicAdd(&frc[atom_i].y, frc_record.y); - atomicAdd(&frc[atom_i].z, frc_record.z); - } -} - -void LJForce(const int atom_numbers, const float cutoff_square, const int *uint_crd_f, const int *LJtype, - const float *charge, const float *scaler_f, float *uint_crd_with_LJ, int *nl_atom_numbers, - int *nl_atom_serial, int *nl, const float *d_LJ_A, const float *d_LJ_B, float *frc_f, - cudaStream_t stream) { - Reset_List<<(3. * atom_numbers) / 128), 128, 0, stream>>>(3 * atom_numbers, frc_f, 0.); - VECTOR *frc = reinterpret_cast(frc_f); - VECTOR *scaler = const_cast(reinterpret_cast(scaler_f)); - int max_neighbor_numbers = 800; - NEIGHBOR_LIST *nl_a = reinterpret_cast(nl); - construct_neighbor_list_kernel<<(atom_numbers) / 128), 128, 0, stream>>>( - atom_numbers, max_neighbor_numbers, nl_atom_numbers, nl_atom_serial, nl_a); - - UINT_VECTOR_LJ_TYPE *uint_crd_with_LJ_a = reinterpret_cast(uint_crd_with_LJ); - - UNSIGNED_INT_VECTOR *uint_crd = - const_cast(reinterpret_cast(uint_crd_f)); - - Copy_Crd_To_New_Crd_Start<<(atom_numbers) / 32), 32, 0, stream>>>( - atom_numbers, uint_crd, uint_crd_with_LJ_a, LJtype, charge); - - LJ_Force_CUDA<<(atom_numbers) / 8), thread_LJ, 0, stream>>>( - atom_numbers, nl_a, uint_crd_with_LJ_a, scaler, d_LJ_A, d_LJ_B, cutoff_square, frc); - return; -} -void LJForce(const int atom_numbers, const float cutoff_square, const int *uint_crd_f, const int *LJtype, - const float *charge, const float *scaler_f, float *uint_crd_with_LJ, int *nl_atom_numbers, - int *nl_atom_serial, int *nl, const float *d_LJ_A, const float *d_LJ_B, float *frc_f, cudaStream_t stream); diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/lj/lj_force_impl.cuh b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/lj/lj_force_impl.cuh deleted file mode 100644 index 5c8462144b6..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/lj/lj_force_impl.cuh +++ /dev/null @@ -1,29 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_LJ_LJ_FORCE_IMPL_H_ -#define MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_LJ_LJ_FORCE_IMPL_H_ - -#include -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_device_info.h" - -CUDA_LIB_EXPORT void LJForce(const int atom_numbers, const float cutoff_square, const int *uint_crd_f, - const int *LJtype, const float *charge, const float *scaler_f, float *uint_crd_with_LJ, - int *nl_atom_numbers, int *nl_atom_serial, int *nl, const float *d_LJ_A, - const float *d_LJ_B, float *frc_f, cudaStream_t stream); - -#endif diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/lj/lj_force_with_pme_direct_force_impl.cu b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/lj/lj_force_with_pme_direct_force_impl.cu deleted file mode 100644 index 237ddea3e95..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/lj/lj_force_with_pme_direct_force_impl.cu +++ /dev/null @@ -1,133 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "plugin/device/gpu/kernel/cuda_impl/sponge/lj/lj_force_with_pme_direct_force_impl.cuh" -#include "plugin/device/gpu/kernel/cuda_impl/sponge/common_sponge.cuh" - -__global__ void LJ_Force_With_Direct_CF_CUDA(const int atom_numbers, const NEIGHBOR_LIST *nl, - const UINT_VECTOR_LJ_TYPE *uint_crd, const VECTOR *boxlength, - const float *LJ_type_A, const float *LJ_type_B, const float cutoff, - VECTOR *frc, const float pme_beta, const float sqrt_pi) { - int atom_i = blockDim.x * blockIdx.x + threadIdx.x; - if (atom_i < atom_numbers) { - NEIGHBOR_LIST nl_i = nl[atom_i]; - int N = nl_i.atom_numbers; - int atom_j; - int int_x; - int int_y; - int int_z; - UINT_VECTOR_LJ_TYPE r1 = uint_crd[atom_i], r2; - VECTOR dr; - float dr_2; - float dr_4; - float dr_8; - float dr_6; - float frc_abs = 0.; - VECTOR frc_lin; - VECTOR frc_record = {0., 0., 0.}; - - float charge_i = r1.charge; - float charge_j; - float dr_abs; - float dr_1; - float beta_dr; - float frc_cf_abs; - - int x, y; - int atom_pair_LJ_type; - for (int j = threadIdx.y; j < N; j = j + blockDim.y) { - atom_j = nl_i.atom_serial[j]; - r2 = uint_crd[atom_j]; - charge_j = r2.charge; - - int_x = r2.uint_x - r1.uint_x; - int_y = r2.uint_y - r1.uint_y; - int_z = r2.uint_z - r1.uint_z; - dr.x = boxlength[0].x * int_x; - dr.y = boxlength[0].y * int_y; - dr.z = boxlength[0].z * int_z; - dr_abs = norm3df(dr.x, dr.y, dr.z); - if (dr_abs < cutoff) { - dr_1 = 1. / dr_abs; - dr_2 = dr_1 * dr_1; - dr_4 = dr_2 * dr_2; - dr_8 = dr_4 * dr_4; - dr_6 = dr_4 * dr_2; - - y = (r2.LJ_type - r1.LJ_type); - x = y >> 31; - y = (y ^ x) - x; - x = r2.LJ_type + r1.LJ_type; - r2.LJ_type = (x + y) >> 1; - x = (x - y) >> 1; - atom_pair_LJ_type = (r2.LJ_type * (r2.LJ_type + 1) >> 1) + x; - - frc_abs = (-LJ_type_A[atom_pair_LJ_type] * dr_6 + LJ_type_B[atom_pair_LJ_type]) * dr_8; - beta_dr = pme_beta * dr_abs; - frc_cf_abs = beta_dr * sqrt_pi * expf(-beta_dr * beta_dr) + erfcf(beta_dr); - frc_cf_abs = frc_cf_abs * dr_2 * dr_1; - frc_cf_abs = charge_i * charge_j * frc_cf_abs; - - frc_abs = frc_abs - frc_cf_abs; - - frc_lin.x = frc_abs * dr.x; - frc_lin.y = frc_abs * dr.y; - frc_lin.z = frc_abs * dr.z; - - frc_record.x = frc_record.x + frc_lin.x; - frc_record.y = frc_record.y + frc_lin.y; - frc_record.z = frc_record.z + frc_lin.z; - - atomicAdd(&frc[atom_j].x, -frc_lin.x); - atomicAdd(&frc[atom_j].y, -frc_lin.y); - atomicAdd(&frc[atom_j].z, -frc_lin.z); - } - } - atomicAdd(&frc[atom_i].x, frc_record.x); - atomicAdd(&frc[atom_i].y, frc_record.y); - atomicAdd(&frc[atom_i].z, frc_record.z); - } -} - -void LJForceWithPMEDirectForce(const int atom_numbers, const float cutoff, const float pme_beta, const int *uint_crd_f, - const int *LJtype, const float *charge, const float *scaler_f, float *uint_crd_with_LJ, - int *nl_atom_numbers, int *nl_atom_serial, int *nl, const float *d_LJ_A, - const float *d_LJ_B, float *frc_f, cudaStream_t stream) { - Reset_List<<(3. * atom_numbers) / 128), 128, 0, stream>>>(3 * atom_numbers, frc_f, 0.); - VECTOR *frc = reinterpret_cast(frc_f); - VECTOR *scaler = const_cast(reinterpret_cast(scaler_f)); - int max_neighbor_numbers = 800; - NEIGHBOR_LIST *nl_a = reinterpret_cast(nl); - construct_neighbor_list_kernel<<(atom_numbers) / 128), 128, 0, stream>>>( - atom_numbers, max_neighbor_numbers, nl_atom_numbers, nl_atom_serial, nl_a); - - UINT_VECTOR_LJ_TYPE *uint_crd_with_LJ_a = reinterpret_cast(uint_crd_with_LJ); - - UNSIGNED_INT_VECTOR *uint_crd = - const_cast(reinterpret_cast(uint_crd_f)); - - Copy_Crd_To_New_Crd_Start<<(atom_numbers) / 32), 32, 0, stream>>>( - atom_numbers, uint_crd, uint_crd_with_LJ_a, LJtype, charge); - - LJ_Force_With_Direct_CF_CUDA<<(atom_numbers) / 8), thread_LJ, 0, stream>>>( - atom_numbers, nl_a, uint_crd_with_LJ_a, scaler, d_LJ_A, d_LJ_B, cutoff, frc, pme_beta, TWO_DIVIDED_BY_SQRT_PI); - return; -} - -void LJForceWithPMEDirectForce(const int atom_numbers, const float cutoff, const float pme_beta, const int *uint_crd_f, - const int *LJtype, const float *charge, const float *scaler_f, float *uint_crd_with_LJ, - int *nl_atom_numbers, int *nl_atom_serial, int *nl, const float *d_LJ_A, - const float *d_LJ_B, float *frc_f, cudaStream_t stream); diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/lj/lj_force_with_pme_direct_force_impl.cuh b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/lj/lj_force_with_pme_direct_force_impl.cuh deleted file mode 100644 index 22c603c9642..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/lj/lj_force_with_pme_direct_force_impl.cuh +++ /dev/null @@ -1,30 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_LJ_LJ_FORCE_WITH_PME_DIRECT_FORCE_IMPL_H_ -#define MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_LJ_LJ_FORCE_WITH_PME_DIRECT_FORCE_IMPL_H_ - -#include -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_device_info.h" - -CUDA_LIB_EXPORT void LJForceWithPMEDirectForce(const int atom_numbers, const float cutoff, const float pme_beta, - const int *uint_crd_f, const int *LJtype, const float *charge, - const float *scaler_f, float *uint_crd_with_LJ, int *nl_atom_numbers, - int *nl_atom_serial, int *nl, const float *d_LJ_A, const float *d_LJ_B, - float *frc_f, cudaStream_t stream); - -#endif diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/lj/lj_pme_direct_force_with_atom_energy_impl.cu b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/lj/lj_pme_direct_force_with_atom_energy_impl.cu deleted file mode 100644 index c94d8888e88..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/lj/lj_pme_direct_force_with_atom_energy_impl.cu +++ /dev/null @@ -1,147 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "plugin/device/gpu/kernel/cuda_impl/sponge/lj/lj_pme_direct_force_with_atom_energy_impl.cuh" -#include "plugin/device/gpu/kernel/cuda_impl/sponge/common_sponge.cuh" - -__global__ void LJ_Direct_CF_Force_With_Atom_Energy_CUDA(const int atom_numbers, const NEIGHBOR_LIST *nl, - const UINT_VECTOR_LJ_TYPE *uint_crd, const VECTOR *boxlength, - const float *LJ_type_A, const float *LJ_type_B, - const float cutoff, VECTOR *frc, const float pme_beta, - const float sqrt_pi, float *atom_energy) { - int atom_i = blockDim.x * blockIdx.x + threadIdx.x; - if (atom_i < atom_numbers) { - NEIGHBOR_LIST nl_i = nl[atom_i]; - int N = nl_i.atom_numbers; - int atom_j; - int int_x; - int int_y; - int int_z; - UINT_VECTOR_LJ_TYPE r1 = uint_crd[atom_i], r2; - VECTOR dr; - float dr_2; - float dr_4; - float dr_8; - float dr_6; - float frc_abs = 0.; - VECTOR frc_lin; - VECTOR frc_record = {0., 0., 0.}; - - float charge_i = r1.charge; - float charge_j; - float dr_abs; - float dr_1; - float beta_dr; - float frc_cf_abs; - - float ene_lin = 0.; - float ene_lin2 = 0.; - - int x, y; - int atom_pair_LJ_type; - for (int j = threadIdx.y; j < N; j = j + blockDim.y) { - atom_j = nl_i.atom_serial[j]; - r2 = uint_crd[atom_j]; - charge_j = r2.charge; - - int_x = r2.uint_x - r1.uint_x; - int_y = r2.uint_y - r1.uint_y; - int_z = r2.uint_z - r1.uint_z; - dr.x = boxlength[0].x * int_x; - dr.y = boxlength[0].y * int_y; - dr.z = boxlength[0].z * int_z; - dr_abs = norm3df(dr.x, dr.y, dr.z); - if (dr_abs < cutoff) { - dr_1 = 1. / dr_abs; - dr_2 = dr_1 * dr_1; - dr_4 = dr_2 * dr_2; - dr_8 = dr_4 * dr_4; - dr_6 = dr_4 * dr_2; - - y = (r2.LJ_type - r1.LJ_type); - x = y >> 31; - y = (y ^ x) - x; - x = r2.LJ_type + r1.LJ_type; - r2.LJ_type = (x + y) >> 1; - x = (x - y) >> 1; - atom_pair_LJ_type = (r2.LJ_type * (r2.LJ_type + 1) >> 1) + x; - - frc_abs = (-LJ_type_A[atom_pair_LJ_type] * dr_6 + LJ_type_B[atom_pair_LJ_type]) * dr_8; - beta_dr = pme_beta * dr_abs; - frc_cf_abs = beta_dr * sqrt_pi * expf(-beta_dr * beta_dr) + erfcf(beta_dr); - frc_cf_abs = frc_cf_abs * dr_2 * dr_1; - frc_cf_abs = charge_i * charge_j * frc_cf_abs; - - ene_lin2 = ene_lin2 + charge_i * charge_j * erfcf(beta_dr) * dr_1; - ene_lin = - ene_lin + - (0.083333333 * LJ_type_A[atom_pair_LJ_type] * dr_6 - 0.166666666 * LJ_type_B[atom_pair_LJ_type]) * dr_6; - - frc_abs = frc_abs - frc_cf_abs; - - frc_lin.x = frc_abs * dr.x; - frc_lin.y = frc_abs * dr.y; - frc_lin.z = frc_abs * dr.z; - - frc_record.x = frc_record.x + frc_lin.x; - frc_record.y = frc_record.y + frc_lin.y; - frc_record.z = frc_record.z + frc_lin.z; - - atomicAdd(&frc[atom_j].x, -frc_lin.x); - atomicAdd(&frc[atom_j].y, -frc_lin.y); - atomicAdd(&frc[atom_j].z, -frc_lin.z); - } - } - atomicAdd(&frc[atom_i].x, frc_record.x); - atomicAdd(&frc[atom_i].y, frc_record.y); - atomicAdd(&frc[atom_i].z, frc_record.z); - - atomicAdd(&atom_energy[atom_i], ene_lin + ene_lin2); - } -} - -void LJDirectCFForceWithAtomEnergy(const int atom_numbers, const float cutoff, const float pme_beta, - const int *uint_crd_f, const int *LJtype, const float *charge, const float *scaler_f, - float *uint_crd_with_LJ, int *nl_atom_numbers, int *nl_atom_serial, int *nl, - const float *d_LJ_A, const float *d_LJ_B, float *frc_f, float *atom_energy, - cudaStream_t stream) { - Reset_List<<(3. * atom_numbers) / 128), 128, 0, stream>>>(3 * atom_numbers, frc_f, 0.); - VECTOR *frc = reinterpret_cast(frc_f); - VECTOR *scaler = const_cast(reinterpret_cast(scaler_f)); - int max_neighbor_numbers = 800; - NEIGHBOR_LIST *nl_a = reinterpret_cast(nl); - construct_neighbor_list_kernel<<(atom_numbers) / 128), 128, 0, stream>>>( - atom_numbers, max_neighbor_numbers, nl_atom_numbers, nl_atom_serial, nl_a); - - UINT_VECTOR_LJ_TYPE *uint_crd_with_LJ_a = reinterpret_cast(uint_crd_with_LJ); - - UNSIGNED_INT_VECTOR *uint_crd = - const_cast(reinterpret_cast(uint_crd_f)); - - Copy_Crd_To_New_Crd_Start<<(atom_numbers) / 32), 32, 0, stream>>>( - atom_numbers, uint_crd, uint_crd_with_LJ_a, LJtype, charge); - - LJ_Direct_CF_Force_With_Atom_Energy_CUDA<<(atom_numbers) / 8), thread_LJ, 0, stream>>>( - atom_numbers, nl_a, uint_crd_with_LJ_a, scaler, d_LJ_A, d_LJ_B, cutoff, frc, pme_beta, TWO_DIVIDED_BY_SQRT_PI, - atom_energy); - return; -} - -void LJDirectCFForceWithAtomEnergy(const int atom_numbers, const float cutoff, const float pme_beta, - const int *uint_crd_f, const int *LJtype, const float *charge, const float *scaler_f, - float *uint_crd_with_LJ, int *nl_atom_numbers, int *nl_atom_serial, int *nl, - const float *d_LJ_A, const float *d_LJ_B, float *frc_f, float *atom_energy, - cudaStream_t stream); diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/lj/lj_pme_direct_force_with_atom_energy_impl.cuh b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/lj/lj_pme_direct_force_with_atom_energy_impl.cuh deleted file mode 100644 index e23c5de34b6..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/lj/lj_pme_direct_force_with_atom_energy_impl.cuh +++ /dev/null @@ -1,31 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_LJ_LJ_PME_DIRECT_FORCE_WITH_ATOM_ENERGY_IMPL_H_ -#define MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_LJ_LJ_PME_DIRECT_FORCE_WITH_ATOM_ENERGY_IMPL_H_ - -#include -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_device_info.h" - -CUDA_LIB_EXPORT void LJDirectCFForceWithAtomEnergy(const int atom_numbers, const float cutoff, const float pme_beta, - const int *uint_crd_f, const int *LJtype, const float *charge, - const float *scaler_f, float *uint_crd_with_LJ, int *nl_atom_numbers, - int *nl_atom_serial, int *nl, const float *d_LJ_A, - const float *d_LJ_B, float *frc_f, float *atom_energy, - cudaStream_t stream); - -#endif diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/nb14/dihedral_14_cf_atom_energy_impl.cu b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/nb14/dihedral_14_cf_atom_energy_impl.cu deleted file mode 100644 index 723967fda0f..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/nb14/dihedral_14_cf_atom_energy_impl.cu +++ /dev/null @@ -1,78 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "plugin/device/gpu/kernel/cuda_impl/sponge/nb14/dihedral_14_cf_atom_energy_impl.cuh" -#include "plugin/device/gpu/kernel/cuda_impl/sponge/common_sponge.cuh" - -__global__ void Dihedral14CFAtomEnergyKernel(const int dihedral_14_numbers, const UINT_VECTOR_LJ_TYPE *uint_crd, - const VECTOR *boxlength, const int *a_14, const int *b_14, - const float *cf_scale_factor, float *ene) { - int dihedral_14_i = blockDim.x * blockIdx.x + threadIdx.x; - if (dihedral_14_i < dihedral_14_numbers) { - int atom_i = a_14[dihedral_14_i]; - int atom_j = b_14[dihedral_14_i]; - - UINT_VECTOR_LJ_TYPE r1 = uint_crd[atom_i]; - UINT_VECTOR_LJ_TYPE r2 = uint_crd[atom_j]; - - int int_x; - int int_y; - int int_z; - VECTOR dr; - float r_1; - float ene_lin = 0.; - - int_x = r2.uint_x - r1.uint_x; - int_y = r2.uint_y - r1.uint_y; - int_z = r2.uint_z - r1.uint_z; - dr.x = boxlength[0].x * int_x; - dr.y = boxlength[0].y * int_y; - dr.z = boxlength[0].z * int_z; - r_1 = rnorm3df(dr.x, dr.y, dr.z); - - ene_lin = r1.charge * r2.charge * r_1; - - ene_lin *= cf_scale_factor[dihedral_14_i]; - - atomicAdd(&ene[atom_i], ene_lin); - } -} - -void Dihedral14CFAtomEnergy(const int dihedral_14_numbers, const int atom_numbers, const int *uint_crd_f, - const int *LJtype, const float *charge, const float *boxlength_f, const int *a_14, - const int *b_14, const float *cf_scale_factor, float *ene, cudaStream_t stream) { - size_t thread_per_block = 128; - size_t block_per_grid = ceilf(static_cast(atom_numbers) / 128); - UINT_VECTOR_LJ_TYPE *uint_crd_with_LJ = NULL; - Cuda_Malloc_Safely(reinterpret_cast(&uint_crd_with_LJ), sizeof(UINT_VECTOR_LJ_TYPE) * atom_numbers); - - UNSIGNED_INT_VECTOR *uint_crd = - const_cast(reinterpret_cast(uint_crd_f)); - - Copy_Crd_To_New_Crd_Start<<(atom_numbers) / 32), 32, 0, stream>>>( - atom_numbers, uint_crd, uint_crd_with_LJ, LJtype, charge); - - VECTOR *boxlength = const_cast(reinterpret_cast(boxlength_f)); - Reset_List<<(3. * atom_numbers) / 128), 128, 0, stream>>>(atom_numbers, ene, 0.); - Dihedral14CFAtomEnergyKernel<<>>( - dihedral_14_numbers, uint_crd_with_LJ, boxlength, a_14, b_14, cf_scale_factor, ene); - - return; -} - -void Dihedral14CFAtomEnergy(const int dihedral_14_numbers, const int atom_numbers, const int *uint_crd_f, - const int *LJtype, const float *charge, const float *boxlength_f, const int *a_14, - const int *b_14, const float *cf_scale_factor, float *ene, cudaStream_t stream); diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/nb14/dihedral_14_cf_atom_energy_impl.cuh b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/nb14/dihedral_14_cf_atom_energy_impl.cuh deleted file mode 100644 index eb0f80a46ec..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/nb14/dihedral_14_cf_atom_energy_impl.cuh +++ /dev/null @@ -1,27 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_NB14_DIHEDRAL_14_CF_ATOM_ENERGY_IMPL_H -#define MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_NB14_DIHEDRAL_14_CF_ATOM_ENERGY_IMPL_H - -#include -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_device_info.h" - -CUDA_LIB_EXPORT void Dihedral14CFAtomEnergy(const int dihedral_14_numbers, const int atom_numbers, - const int *uint_crd_f, const int *LJtype, const float *charge, - const float *boxlength_f, const int *a_14, const int *b_14, - const float *cf_scale_factor, float *ene, cudaStream_t stream); -#endif // MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_NB14_DIHEDRAL_14_CF_ENERGY_IMPL_H diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/nb14/dihedral_14_cf_energy_impl.cu b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/nb14/dihedral_14_cf_energy_impl.cu deleted file mode 100644 index 5192821e406..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/nb14/dihedral_14_cf_energy_impl.cu +++ /dev/null @@ -1,78 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "plugin/device/gpu/kernel/cuda_impl/sponge/nb14/dihedral_14_cf_energy_impl.cuh" -#include "plugin/device/gpu/kernel/cuda_impl/sponge/common_sponge.cuh" - -__global__ void Dihedral14CFEnergyKernel(const int dihedral_14_numbers, const UINT_VECTOR_LJ_TYPE *uint_crd, - const VECTOR *boxlength, const int *a_14, const int *b_14, - const float *cf_scale_factor, float *ene) { - int dihedral_14_i = blockDim.x * blockIdx.x + threadIdx.x; - if (dihedral_14_i < dihedral_14_numbers) { - int atom_i = a_14[dihedral_14_i]; - int atom_j = b_14[dihedral_14_i]; - - UINT_VECTOR_LJ_TYPE r1 = uint_crd[atom_i]; - UINT_VECTOR_LJ_TYPE r2 = uint_crd[atom_j]; - - int int_x; - int int_y; - int int_z; - VECTOR dr; - float r_1; - float ene_lin = 0.; - - int_x = r2.uint_x - r1.uint_x; - int_y = r2.uint_y - r1.uint_y; - int_z = r2.uint_z - r1.uint_z; - dr.x = boxlength[0].x * int_x; - dr.y = boxlength[0].y * int_y; - dr.z = boxlength[0].z * int_z; - r_1 = rnorm3df(dr.x, dr.y, dr.z); - - ene_lin = r1.charge * r2.charge * r_1; - - ene_lin *= cf_scale_factor[dihedral_14_i]; - - ene[dihedral_14_i] = ene_lin; - } -} - -void Dihedral14CFEnergy(const int dihedral_14_numbers, const int atom_numbers, const int *uint_crd_f, const int *LJtype, - const float *charge, float *uint_crd_with_LJ_f, const float *boxlength_f, const int *a_14, - const int *b_14, const float *cf_scale_factor, float *ene, cudaStream_t stream) { - size_t thread_per_block = 32; - size_t block_per_grid = ceilf(static_cast(dihedral_14_numbers) / 32); - - UNSIGNED_INT_VECTOR *uint_crd = - const_cast(reinterpret_cast(uint_crd_f)); - - UINT_VECTOR_LJ_TYPE *uint_crd_with_LJ = reinterpret_cast(uint_crd_with_LJ_f); - - Copy_Crd_To_New_Crd_Start<<(atom_numbers) / 32), 32, 0, stream>>>( - atom_numbers, uint_crd, uint_crd_with_LJ, LJtype, charge); - - VECTOR *boxlength = const_cast(reinterpret_cast(boxlength_f)); - - Dihedral14CFEnergyKernel<<>>( - dihedral_14_numbers, uint_crd_with_LJ, boxlength, a_14, b_14, cf_scale_factor, ene); - - return; -} - -void Dihedral14CFEnergy(const int dihedral_14_numbers, const int atom_numbers, const int *uint_crd_f, const int *LJtype, - const float *charge, float *uint_crd_with_LJ_f, const float *boxlength_f, const int *a_14, - const int *b_14, const float *cf_scale_factor, float *ene, cudaStream_t stream); diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/nb14/dihedral_14_cf_energy_impl.cuh b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/nb14/dihedral_14_cf_energy_impl.cuh deleted file mode 100644 index bc5aea7af1d..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/nb14/dihedral_14_cf_energy_impl.cuh +++ /dev/null @@ -1,27 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_NB14_DIHEDRAL_14_CF_ENERGY_IMPL_H -#define MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_NB14_DIHEDRAL_14_CF_ENERGY_IMPL_H - -#include -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_device_info.h" - -CUDA_LIB_EXPORT void Dihedral14CFEnergy(const int dihedral_14_numbers, const int atom_numbers, const int *uint_crd_f, - const int *LJtype, const float *charge, float *uint_crd_with_LJ_f, - const float *boxlength_f, const int *a_14, const int *b_14, - const float *cf_scale_factor, float *ene, cudaStream_t stream); -#endif // MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_NB14_DIHEDRAL_14_CF_ENERGY_IMPL_H diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/nb14/dihedral_14_lj_atom_energy_impl.cu b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/nb14/dihedral_14_lj_atom_energy_impl.cu deleted file mode 100644 index 5dc0093c75f..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/nb14/dihedral_14_lj_atom_energy_impl.cu +++ /dev/null @@ -1,102 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "plugin/device/gpu/kernel/cuda_impl/sponge/nb14/dihedral_14_lj_atom_energy_impl.cuh" -#include "plugin/device/gpu/kernel/cuda_impl/sponge/common_sponge.cuh" - -__global__ void Dihedral14LJAtomEnergyKernel(const int dihedral_14_numbers, const UINT_VECTOR_LJ_TYPE *uint_crd, - const VECTOR *boxlength, const int *a_14, const int *b_14, - const float *lj_scale_factor, const float *LJ_type_A, - const float *LJ_type_B, float *ene) { - int dihedral_14_i = blockDim.x * blockIdx.x + threadIdx.x; - if (dihedral_14_i < dihedral_14_numbers) { - int atom_i = a_14[dihedral_14_i]; - int atom_j = b_14[dihedral_14_i]; - - UINT_VECTOR_LJ_TYPE r1 = uint_crd[atom_i]; - UINT_VECTOR_LJ_TYPE r2 = uint_crd[atom_j]; - - int int_x; - int int_y; - int int_z; - VECTOR dr; - float dr2; - float dr_2; - float dr_4; - float dr_6; - float dr_12; - float ene_lin = 0.; - int x, y; - int atom_pair_LJ_type; - - int_x = r2.uint_x - r1.uint_x; - int_y = r2.uint_y - r1.uint_y; - int_z = r2.uint_z - r1.uint_z; - dr.x = boxlength[0].x * int_x; - dr.y = boxlength[0].y * int_y; - dr.z = boxlength[0].z * int_z; - dr2 = dr.x * dr.x + dr.y * dr.y + dr.z * dr.z; - - dr_2 = 1. / dr2; - dr_4 = dr_2 * dr_2; - dr_6 = dr_4 * dr_2; - dr_12 = dr_6 * dr_6; - - y = (r2.LJ_type - r1.LJ_type); - x = y >> 31; - y = (y ^ x) - x; - x = r2.LJ_type + r1.LJ_type; - r2.LJ_type = (x + y) >> 1; - x = (x - y) >> 1; - atom_pair_LJ_type = (r2.LJ_type * (r2.LJ_type + 1) >> 1) + x; - - ene_lin = 0.08333333 * LJ_type_A[atom_pair_LJ_type] * dr_12 - - 0.1666666 * LJ_type_B[atom_pair_LJ_type] * dr_6; // LJ的A,B系数已经乘以12和6因此要反乘 - ene_lin *= lj_scale_factor[dihedral_14_i]; - - atomicAdd(&ene[atom_i], ene_lin); - } -} - -void Dihedral14LJAtomEnergy(const int dihedral_14_numbers, const int atom_numbers, const int *uint_crd_f, - const int *LJtype, const float *charge, const float *boxlength_f, const int *a_14, - const int *b_14, const float *lj_scale_factor, const float *LJ_type_A, - const float *LJ_type_B, float *ene, cudaStream_t stream) { - size_t thread_per_block = 128; - size_t block_per_grid = ceilf(static_cast(atom_numbers) / 128); - UINT_VECTOR_LJ_TYPE *uint_crd_with_LJ = NULL; - Cuda_Malloc_Safely(reinterpret_cast(&uint_crd_with_LJ), sizeof(UINT_VECTOR_LJ_TYPE) * atom_numbers); - - UNSIGNED_INT_VECTOR *uint_crd = - const_cast(reinterpret_cast(uint_crd_f)); - - Copy_Crd_To_New_Crd_Start<<(atom_numbers) / 32), 32, 0, stream>>>( - atom_numbers, uint_crd, uint_crd_with_LJ, LJtype, charge); - - VECTOR *boxlength = const_cast(reinterpret_cast(boxlength_f)); - Reset_List<<(3. * atom_numbers) / 128), 128, 0, stream>>>(atom_numbers, ene, 0.); - Dihedral14LJAtomEnergyKernel<<>>( - dihedral_14_numbers, uint_crd_with_LJ, boxlength, a_14, b_14, lj_scale_factor, LJ_type_A, LJ_type_B, ene); - - cudaStreamSynchronize(stream); - - return; -} - -void Dihedral14LJAtomEnergy(const int dihedral_14_numbers, const int atom_numbers, const int *uint_crd_f, - const int *LJtype, const float *charge, const float *boxlength_f, const int *a_14, - const int *b_14, const float *lj_scale_factor, const float *LJ_type_A, - const float *LJ_type_B, float *ene, cudaStream_t stream); diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/nb14/dihedral_14_lj_atom_energy_impl.cuh b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/nb14/dihedral_14_lj_atom_energy_impl.cuh deleted file mode 100644 index a1943aefe2f..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/nb14/dihedral_14_lj_atom_energy_impl.cuh +++ /dev/null @@ -1,28 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_NB14_DIHEDRAL_14_LJ_ATOM_ENERGY_IMPL_H -#define MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_NB14_DIHEDRAL_14_LJ_ATOM_ENERGY_IMPL_H - -#include -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_device_info.h" - -CUDA_LIB_EXPORT void Dihedral14LJAtomEnergy(const int dihedral_14_numbers, const int atom_numbers, - const int *uint_crd_f, const int *LJtype, const float *charge, - const float *boxlength_f, const int *a_14, const int *b_14, - const float *lj_scale_factor, const float *LJ_type_A, - const float *LJ_type_B, float *ene, cudaStream_t stream); -#endif // MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_NB14_DIHEDRAL_14_LJ_ATOM_ENERGY_IMPL_H diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/nb14/dihedral_14_lj_cf_force_with_atom_energy_and_virial_impl.cu b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/nb14/dihedral_14_lj_cf_force_with_atom_energy_and_virial_impl.cu deleted file mode 100644 index 8d3ba0585eb..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/nb14/dihedral_14_lj_cf_force_with_atom_energy_and_virial_impl.cu +++ /dev/null @@ -1,139 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "plugin/device/gpu/kernel/cuda_impl/sponge/nb14/dihedral_14_lj_cf_force_with_atom_energy_and_virial_impl.cuh" -#include "plugin/device/gpu/kernel/cuda_impl/sponge/common_sponge.cuh" - -__global__ void Dihedral14LJCFForceWithAtomEnergyAndVirialKernel( - const int dihedral_14_numbers, const UINT_VECTOR_LJ_TYPE *uint_crd, const VECTOR *scaler, const int *a_14, - const int *b_14, const float *lj_scale_factor, const float *cf_scale_factor, const float *LJ_type_A, - const float *LJ_type_B, VECTOR *frc, float *atom_energy, float *atom_virial) { - int dihedral_14_i = blockDim.x * blockIdx.x + threadIdx.x; - if (dihedral_14_i < dihedral_14_numbers) { - UINT_VECTOR_LJ_TYPE r1, r2; - VECTOR dr; - float dr_abs; - float dr2; - float dr_1; - float dr_2; - float dr_4; - float dr_8; - float dr_14; - float frc_abs = 0.; - VECTOR temp_frc; - - float ene_lin; - float ene_lin2; - - int x, y; - int atom_pair_LJ_type; - - int atom_i = a_14[dihedral_14_i]; - int atom_j = b_14[dihedral_14_i]; - - r1 = uint_crd[atom_i]; - r2 = uint_crd[atom_j]; - - dr = Get_Periodic_Displacement(r2, r1, scaler[0]); - - dr2 = dr.x * dr.x + dr.y * dr.y + dr.z * dr.z; - - dr_2 = 1.0 / dr2; - dr_4 = dr_2 * dr_2; - dr_8 = dr_4 * dr_4; - dr_14 = dr_8 * dr_4 * dr_2; - dr_abs = norm3df(dr.x, dr.y, dr.z); - dr_1 = 1. / dr_abs; - - // CF - float charge_i = r1.charge; - float charge_j = r2.charge; - float frc_cf_abs; - frc_cf_abs = cf_scale_factor[dihedral_14_i] * dr_2 * dr_1; - frc_cf_abs = -charge_i * charge_j * frc_cf_abs; - // LJ - y = (r2.LJ_type - r1.LJ_type); - x = y >> 31; - y = (y ^ x) - x; - x = r2.LJ_type + r1.LJ_type; - r2.LJ_type = (x + y) >> 1; - x = (x - y) >> 1; - atom_pair_LJ_type = (r2.LJ_type * (r2.LJ_type + 1) >> 1) + x; - - frc_abs = -LJ_type_A[atom_pair_LJ_type] * dr_14 + LJ_type_B[atom_pair_LJ_type] * dr_8; - frc_abs *= lj_scale_factor[dihedral_14_i]; - - frc_abs += frc_cf_abs; - temp_frc.x = frc_abs * dr.x; - temp_frc.y = frc_abs * dr.y; - temp_frc.z = frc_abs * dr.z; - - atomicAdd(&frc[atom_j].x, -temp_frc.x); - atomicAdd(&frc[atom_j].y, -temp_frc.y); - atomicAdd(&frc[atom_j].z, -temp_frc.z); - atomicAdd(&frc[atom_i].x, temp_frc.x); - atomicAdd(&frc[atom_i].y, temp_frc.y); - atomicAdd(&frc[atom_i].z, temp_frc.z); - - ene_lin = r1.charge * r2.charge * dr_1; - ene_lin *= cf_scale_factor[dihedral_14_i]; - ene_lin2 = 0.08333333 * LJ_type_A[atom_pair_LJ_type] * dr_4 * dr_8 - - 0.1666666 * LJ_type_B[atom_pair_LJ_type] * dr_4 * dr_2; // LJ的A,B系数已经乘以12和6因此要反乘 - ene_lin2 *= lj_scale_factor[dihedral_14_i]; - - atomicAdd(&atom_energy[atom_i], ene_lin + ene_lin2); - - atomicAdd(&atom_virial[atom_i], -temp_frc * dr); - } -} - -void Dihedral14LJCFForceWithAtomEnergyAndVirial(const int dihedral_14_numbers, const int atom_numbers, - const int *uint_crd_f, const int *LJtype, const float *charge, - float *uint_crd_with_LJ_f, const float *boxlength_f, const int *a_14, - const int *b_14, const float *lj_scale_factor, - const float *cf_scale_factor, const float *LJ_type_A, - const float *LJ_type_B, float *frc_f, float *atom_energy, - float *atom_virial, cudaStream_t stream) { - size_t thread_per_block = 128; - size_t block_per_grid = ceilf(static_cast(atom_numbers) / 128); - UNSIGNED_INT_VECTOR *uint_crd = - const_cast(reinterpret_cast(uint_crd_f)); - - UINT_VECTOR_LJ_TYPE *uint_crd_with_LJ = reinterpret_cast(uint_crd_with_LJ_f); - - Copy_Crd_To_New_Crd_Start<<(atom_numbers) / 32), 32, 0, stream>>>( - atom_numbers, uint_crd, uint_crd_with_LJ, LJtype, charge); - - Reset_List<<(3. * atom_numbers) / 128), 128, 0, stream>>>(3 * atom_numbers, frc_f, 0.); - Reset_List<<(3. * atom_numbers) / 128), 128, 0, stream>>>(atom_numbers, atom_energy, 0.); - Reset_List<<(3. * atom_numbers) / 128), 128, 0, stream>>>(atom_numbers, atom_virial, 0.); - VECTOR *boxlength = const_cast(reinterpret_cast(boxlength_f)); - VECTOR *frc = const_cast(reinterpret_cast(frc_f)); - - Dihedral14LJCFForceWithAtomEnergyAndVirialKernel<<>>( - dihedral_14_numbers, uint_crd_with_LJ, boxlength, a_14, b_14, lj_scale_factor, cf_scale_factor, LJ_type_A, - LJ_type_B, frc, atom_energy, atom_virial); - - return; -} - -void Dihedral14LJCFForceWithAtomEnergyAndVirial(const int dihedral_14_numbers, const int atom_numbers, - const int *uint_crd_f, const int *LJtype, const float *charge, - float *uint_crd_with_LJ_f, const float *boxlength_f, const int *a_14, - const int *b_14, const float *lj_scale_factor, - const float *cf_scale_factor, const float *LJ_type_A, - const float *LJ_type_B, float *frc_f, float *atom_energy, - float *atom_virial, cudaStream_t stream); diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/nb14/dihedral_14_lj_cf_force_with_atom_energy_and_virial_impl.cuh b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/nb14/dihedral_14_lj_cf_force_with_atom_energy_and_virial_impl.cuh deleted file mode 100644 index 91018f1fac4..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/nb14/dihedral_14_lj_cf_force_with_atom_energy_and_virial_impl.cuh +++ /dev/null @@ -1,28 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_NB14_DIHEDRAL_14_LJ_CF_FORCE_WITH_ATOM_ENERGY_IMPL_H -#define MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_NB14_DIHEDRAL_14_LJ_CF_FORCE_WITH_ATOM_ENERGY_IMPL_H - -#include -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_device_info.h" - -CUDA_LIB_EXPORT void Dihedral14LJCFForceWithAtomEnergyAndVirial( - const int dihedral_14_numbers, const int atom_numbers, const int *uint_crd_f, const int *LJtype, const float *charge, - float *uint_crd_with_LJ_f, const float *boxlength_f, const int *a_14, const int *b_14, const float *lj_scale_factor, - const float *cf_scale_factor, const float *LJ_type_A, const float *LJ_type_B, float *frc_f, float *atom_energy, - float *atom_virial, cudaStream_t stream); -#endif // MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_NB14_DIHEDRAL_14_LJ_CF_FORCE_WITH_ATOM_ENERGY_IMPL_H diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/nb14/dihedral_14_lj_cf_force_with_atom_energy_impl.cu b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/nb14/dihedral_14_lj_cf_force_with_atom_energy_impl.cu deleted file mode 100644 index 6b848960e4b..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/nb14/dihedral_14_lj_cf_force_with_atom_energy_impl.cu +++ /dev/null @@ -1,140 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "plugin/device/gpu/kernel/cuda_impl/sponge/nb14/dihedral_14_lj_cf_force_with_atom_energy_impl.cuh" -#include "plugin/device/gpu/kernel/cuda_impl/sponge/common_sponge.cuh" - -__global__ void Dihedral14LJCFForceWithAtomEnergyKernel(const int dihedral_14_numbers, - const UINT_VECTOR_LJ_TYPE *uint_crd, const VECTOR *boxlength, - const int *a_14, const int *b_14, const float *lj_scale_factor, - const float *cf_scale_factor, const float *LJ_type_A, - const float *LJ_type_B, VECTOR *frc, float *atom_energy) { - int dihedral_14_i = blockDim.x * blockIdx.x + threadIdx.x; - if (dihedral_14_i < dihedral_14_numbers) { - int int_x; - int int_y; - int int_z; - UINT_VECTOR_LJ_TYPE r1, r2; - VECTOR dr; - float dr_abs; - float dr2; - float dr_1; - float dr_2; - float dr_4; - float dr_8; - float dr_14; - float frc_abs = 0.; - VECTOR temp_frc; - - float ene_lin; - float ene_lin2; - - int x, y; - int atom_pair_LJ_type; - - int atom_i = a_14[dihedral_14_i]; - int atom_j = b_14[dihedral_14_i]; - - r1 = uint_crd[atom_i]; - r2 = uint_crd[atom_j]; - int_x = r2.uint_x - r1.uint_x; - int_y = r2.uint_y - r1.uint_y; - int_z = r2.uint_z - r1.uint_z; - dr.x = boxlength[0].x * int_x; - dr.y = boxlength[0].y * int_y; - dr.z = boxlength[0].z * int_z; - dr2 = dr.x * dr.x + dr.y * dr.y + dr.z * dr.z; - - dr_2 = 1.0 / dr2; - dr_4 = dr_2 * dr_2; - dr_8 = dr_4 * dr_4; - dr_14 = dr_8 * dr_4 * dr_2; - dr_abs = norm3df(dr.x, dr.y, dr.z); - dr_1 = 1. / dr_abs; - - float charge_i = r1.charge; - float charge_j = r2.charge; - float frc_cf_abs; - frc_cf_abs = cf_scale_factor[dihedral_14_i] * dr_2 * dr_1; - frc_cf_abs = -charge_i * charge_j * frc_cf_abs; - - y = (r2.LJ_type - r1.LJ_type); - x = y >> 31; - y = (y ^ x) - x; - x = r2.LJ_type + r1.LJ_type; - r2.LJ_type = (x + y) >> 1; - x = (x - y) >> 1; - atom_pair_LJ_type = (r2.LJ_type * (r2.LJ_type + 1) >> 1) + x; - - frc_abs = -LJ_type_A[atom_pair_LJ_type] * dr_14 + LJ_type_B[atom_pair_LJ_type] * dr_8; - frc_abs *= lj_scale_factor[dihedral_14_i]; - - frc_abs += frc_cf_abs; - temp_frc.x = frc_abs * dr.x; - temp_frc.y = frc_abs * dr.y; - temp_frc.z = frc_abs * dr.z; - - atomicAdd(&frc[atom_j].x, -temp_frc.x); - atomicAdd(&frc[atom_j].y, -temp_frc.y); - atomicAdd(&frc[atom_j].z, -temp_frc.z); - atomicAdd(&frc[atom_i].x, temp_frc.x); - atomicAdd(&frc[atom_i].y, temp_frc.y); - atomicAdd(&frc[atom_i].z, temp_frc.z); - - ene_lin = r1.charge * r2.charge * dr_1; - ene_lin *= cf_scale_factor[dihedral_14_i]; - ene_lin2 = 0.08333333 * LJ_type_A[atom_pair_LJ_type] * dr_4 * dr_8 - - 0.1666666 * LJ_type_B[atom_pair_LJ_type] * dr_4 * dr_2; // LJ的A,B系数已经乘以12和6因此要反乘 - ene_lin2 *= lj_scale_factor[dihedral_14_i]; - - atomicAdd(&atom_energy[atom_i], ene_lin + ene_lin2); - } -} - -void Dihedral14LJCFForceWithAtomEnergy(const int dihedral_14_numbers, const int atom_numbers, const int *uint_crd_f, - const int *LJtype, const float *charge, float *uint_crd_with_LJ_f, - const float *boxlength_f, const int *a_14, const int *b_14, - const float *lj_scale_factor, const float *cf_scale_factor, - const float *LJ_type_A, const float *LJ_type_B, float *frc_f, float *atom_energy, - cudaStream_t stream) { - size_t thread_per_block = 128; - size_t block_per_grid = ceilf(static_cast(dihedral_14_numbers) / 128); - UNSIGNED_INT_VECTOR *uint_crd = - const_cast(reinterpret_cast(uint_crd_f)); - - UINT_VECTOR_LJ_TYPE *uint_crd_with_LJ = reinterpret_cast(uint_crd_with_LJ_f); - - Copy_Crd_To_New_Crd_Start<<(atom_numbers) / 32), 32, 0, stream>>>( - atom_numbers, uint_crd, uint_crd_with_LJ, LJtype, charge); - - Reset_List<<(3. * atom_numbers) / 128), 128, 0, stream>>>(3 * atom_numbers, frc_f, 0.); - Reset_List<<(atom_numbers) / 128), 128, 0, stream>>>(atom_numbers, atom_energy, 0.); - VECTOR *boxlength = const_cast(reinterpret_cast(boxlength_f)); - VECTOR *frc = const_cast(reinterpret_cast(frc_f)); - - Dihedral14LJCFForceWithAtomEnergyKernel<<>>( - dihedral_14_numbers, uint_crd_with_LJ, boxlength, a_14, b_14, lj_scale_factor, cf_scale_factor, LJ_type_A, - LJ_type_B, frc, atom_energy); - - return; -} - -void Dihedral14LJCFForceWithAtomEnergy(const int dihedral_14_numbers, const int atom_numbers, const int *uint_crd_f, - const int *LJtype, const float *charge, float *uint_crd_with_LJ_f, - const float *boxlength_f, const int *a_14, const int *b_14, - const float *lj_scale_factor, const float *cf_scale_factor, - const float *LJ_type_A, const float *LJ_type_B, float *frc_f, float *atom_energy, - cudaStream_t stream); diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/nb14/dihedral_14_lj_cf_force_with_atom_energy_impl.cuh b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/nb14/dihedral_14_lj_cf_force_with_atom_energy_impl.cuh deleted file mode 100644 index 7d5560c78e0..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/nb14/dihedral_14_lj_cf_force_with_atom_energy_impl.cuh +++ /dev/null @@ -1,30 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_NB14_DIHEDRAL_14_LJ_CF_FORCE_WITH_ATOM_ENERGY_IMPL_H -#define MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_NB14_DIHEDRAL_14_LJ_CF_FORCE_WITH_ATOM_ENERGY_IMPL_H - -#include -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_device_info.h" - -CUDA_LIB_EXPORT void Dihedral14LJCFForceWithAtomEnergy(const int dihedral_14_numbers, const int atom_numbers, - const int *uint_crd_f, const int *LJtype, const float *charge, - float *uint_crd_with_LJ_f, const float *boxlength_f, - const int *a_14, const int *b_14, const float *lj_scale_factor, - const float *cf_scale_factor, const float *LJ_type_A, - const float *LJ_type_B, float *frc_f, float *atom_energy, - cudaStream_t stream); -#endif // MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_NB14_DIHEDRAL_14_LJ_CF_FORCE_WITH_ATOM_ENERGY_IMPL_H diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/nb14/dihedral_14_lj_energy_impl.cu b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/nb14/dihedral_14_lj_energy_impl.cu deleted file mode 100644 index 5e90f43e592..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/nb14/dihedral_14_lj_energy_impl.cu +++ /dev/null @@ -1,98 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "plugin/device/gpu/kernel/cuda_impl/sponge/nb14/dihedral_14_lj_energy_impl.cuh" -#include "plugin/device/gpu/kernel/cuda_impl/sponge/common_sponge.cuh" - -__global__ void Dihedral14LJEnergyKernel(const int dihedral_14_numbers, const UINT_VECTOR_LJ_TYPE *uint_crd, - const VECTOR *boxlength, const int *a_14, const int *b_14, - const float *lj_scale_factor, const float *LJ_type_A, const float *LJ_type_B, - float *ene) { - int dihedral_14_i = blockDim.x * blockIdx.x + threadIdx.x; - if (dihedral_14_i < dihedral_14_numbers) { - int atom_i = a_14[dihedral_14_i]; - int atom_j = b_14[dihedral_14_i]; - - UINT_VECTOR_LJ_TYPE r1 = uint_crd[atom_i]; - UINT_VECTOR_LJ_TYPE r2 = uint_crd[atom_j]; - - int int_x; - int int_y; - int int_z; - VECTOR dr; - float dr2; - float dr_2; - float dr_4; - float dr_6; - float dr_12; - float ene_lin = 0.; - int x, y; - int atom_pair_LJ_type; - - int_x = r2.uint_x - r1.uint_x; - int_y = r2.uint_y - r1.uint_y; - int_z = r2.uint_z - r1.uint_z; - dr.x = boxlength[0].x * int_x; - dr.y = boxlength[0].y * int_y; - dr.z = boxlength[0].z * int_z; - dr2 = dr.x * dr.x + dr.y * dr.y + dr.z * dr.z; - - dr_2 = 1. / dr2; - dr_4 = dr_2 * dr_2; - dr_6 = dr_4 * dr_2; - dr_12 = dr_6 * dr_6; - - y = (r2.LJ_type - r1.LJ_type); - x = y >> 31; - y = (y ^ x) - x; - x = r2.LJ_type + r1.LJ_type; - r2.LJ_type = (x + y) >> 1; - x = (x - y) >> 1; - atom_pair_LJ_type = (r2.LJ_type * (r2.LJ_type + 1) >> 1) + x; - - ene_lin = 0.08333333 * LJ_type_A[atom_pair_LJ_type] * dr_12 - - 0.1666666 * LJ_type_B[atom_pair_LJ_type] * dr_6; // LJ的A,B系数已经乘以12和6因此要反乘 - ene_lin *= lj_scale_factor[dihedral_14_i]; - - ene[dihedral_14_i] = ene_lin; - } -} - -void Dihedral14LJEnergy(const int dihedral_14_numbers, const int atom_numbers, const int *uint_crd_f, const int *LJtype, - const float *charge, float *uint_crd_with_LJ_f, const float *boxlength_f, const int *a_14, - const int *b_14, const float *lj_scale_factor, const float *LJ_type_A, const float *LJ_type_B, - float *ene, cudaStream_t stream) { - size_t thread_per_block = 32; - size_t block_per_grid = ceilf(static_cast(dihedral_14_numbers) / 32); - UNSIGNED_INT_VECTOR *uint_crd = - const_cast(reinterpret_cast(uint_crd_f)); - - UINT_VECTOR_LJ_TYPE *uint_crd_with_LJ = reinterpret_cast(uint_crd_with_LJ_f); - - Copy_Crd_To_New_Crd_Start<<(atom_numbers) / 32), 32, 0, stream>>>( - atom_numbers, uint_crd, uint_crd_with_LJ, LJtype, charge); - VECTOR *boxlength = const_cast(reinterpret_cast(boxlength_f)); - - Dihedral14LJEnergyKernel<<>>( - dihedral_14_numbers, uint_crd_with_LJ, boxlength, a_14, b_14, lj_scale_factor, LJ_type_A, LJ_type_B, ene); - - return; -} - -void Dihedral14LJEnergy(const int dihedral_14_numbers, const int atom_numbers, const int *uint_crd_f, const int *LJtype, - const float *charge, float *uint_crd_with_LJ_f, const float *boxlength_f, const int *a_14, - const int *b_14, const float *lj_scale_factor, const float *LJ_type_A, const float *LJ_type_B, - float *ene, cudaStream_t stream); diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/nb14/dihedral_14_lj_energy_impl.cuh b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/nb14/dihedral_14_lj_energy_impl.cuh deleted file mode 100644 index 7202a596df3..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/nb14/dihedral_14_lj_energy_impl.cuh +++ /dev/null @@ -1,29 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_NB14_DIHEDRAL_14_LJ_ENERGY_IMPL_H -#define MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_NB14_DIHEDRAL_14_LJ_ENERGY_IMPL_H - -#include -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_device_info.h" - -CUDA_LIB_EXPORT void Dihedral14LJEnergy(const int dihedral_14_numbers, const int atom_numbers, const int *uint_crd_f, - const int *LJtype, const float *charge, float *uint_crd_with_LJ_f, - const float *boxlength_f, const int *a_14, const int *b_14, - const float *lj_scale_factor, const float *LJ_type_A, const float *LJ_type_B, - float *ene, cudaStream_t stream); - -#endif // MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_NB14_DIHEDRAL_14_LJ_ENERGY_IMPL_H diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/nb14/dihedral_14_lj_force_impl.cu b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/nb14/dihedral_14_lj_force_impl.cu deleted file mode 100644 index 4e731c62b26..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/nb14/dihedral_14_lj_force_impl.cu +++ /dev/null @@ -1,111 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "plugin/device/gpu/kernel/cuda_impl/sponge/nb14/dihedral_14_lj_force_impl.cuh" -#include "plugin/device/gpu/kernel/cuda_impl/sponge/common_sponge.cuh" - -__global__ void Dihedral14LJForceKernel(const int dihedral_14_numbers, const UINT_VECTOR_LJ_TYPE *uint_crd, - const VECTOR *boxlength, const int *a_14, const int *b_14, - const float *lj_scale_factor, const float *LJ_type_A, const float *LJ_type_B, - VECTOR *frc) { - int dihedral_14_i = blockDim.x * blockIdx.x + threadIdx.x; - if (dihedral_14_i < dihedral_14_numbers) { - int int_x; - int int_y; - int int_z; - UINT_VECTOR_LJ_TYPE r1, r2; - VECTOR dr; - float dr2; - float dr_2; - float dr_4; - float dr_8; - float dr_14; - float frc_abs = 0.; - VECTOR temp_frc; - int x, y; - int atom_pair_LJ_type; - - int atom_i = a_14[dihedral_14_i]; - int atom_j = b_14[dihedral_14_i]; - - r1 = uint_crd[atom_i]; - r2 = uint_crd[atom_j]; - - int_x = r2.uint_x - r1.uint_x; - int_y = r2.uint_y - r1.uint_y; - int_z = r2.uint_z - r1.uint_z; - dr.x = boxlength[0].x * int_x; - dr.y = boxlength[0].y * int_y; - dr.z = boxlength[0].z * int_z; - dr2 = dr.x * dr.x + dr.y * dr.y + dr.z * dr.z; - - dr_2 = 1.0 / dr2; - dr_4 = dr_2 * dr_2; - dr_8 = dr_4 * dr_4; - dr_14 = dr_8 * dr_4 * dr_2; - - y = (r2.LJ_type - r1.LJ_type); - x = y >> 31; - y = (y ^ x) - x; - x = r2.LJ_type + r1.LJ_type; - r2.LJ_type = (x + y) >> 1; - x = (x - y) >> 1; - atom_pair_LJ_type = (r2.LJ_type * (r2.LJ_type + 1) >> 1) + x; - - frc_abs = -LJ_type_A[atom_pair_LJ_type] * dr_14 + LJ_type_B[atom_pair_LJ_type] * dr_8; - frc_abs *= lj_scale_factor[dihedral_14_i]; - temp_frc.x = frc_abs * dr.x; - temp_frc.y = frc_abs * dr.y; - temp_frc.z = frc_abs * dr.z; - - atomicAdd(&frc[atom_j].x, -temp_frc.x); - atomicAdd(&frc[atom_j].y, -temp_frc.y); - atomicAdd(&frc[atom_j].z, -temp_frc.z); - atomicAdd(&frc[atom_i].x, temp_frc.x); - atomicAdd(&frc[atom_i].y, temp_frc.y); - atomicAdd(&frc[atom_i].z, temp_frc.z); - } -} - -void Dihedral14LJForce(const int dihedral_14_numbers, const int atom_numbers, const int *uint_crd_f, const int *LJtype, - const float *charge, const float *boxlength_f, const int *a_14, const int *b_14, - const float *lj_scale_factor, const float *LJ_type_A, const float *LJ_type_B, float *frc_f, - cudaStream_t stream) { - size_t thread_per_block = 128; - size_t block_per_grid = ceilf(static_cast(atom_numbers) / 128); - UINT_VECTOR_LJ_TYPE *uint_crd_with_LJ = NULL; - Cuda_Malloc_Safely(reinterpret_cast(&uint_crd_with_LJ), sizeof(UINT_VECTOR_LJ_TYPE) * atom_numbers); - - UNSIGNED_INT_VECTOR *uint_crd = - const_cast(reinterpret_cast(uint_crd_f)); - - Copy_Crd_To_New_Crd_Start<<(atom_numbers) / 32), 32, 0, stream>>>( - atom_numbers, uint_crd, uint_crd_with_LJ, LJtype, charge); - cudaStreamSynchronize(stream); - Reset_List<<(3. * atom_numbers) / 128), 128, 0, stream>>>(3 * atom_numbers, frc_f, 0.); - VECTOR *boxlength = const_cast(reinterpret_cast(boxlength_f)); - VECTOR *frc = const_cast(reinterpret_cast(frc_f)); - - Dihedral14LJForceKernel<<>>( - dihedral_14_numbers, uint_crd_with_LJ, boxlength, a_14, b_14, lj_scale_factor, LJ_type_A, LJ_type_B, frc); - cudaStreamSynchronize(stream); - return; -} - -void Dihedral14LJForce(const int dihedral_14_numbers, const int atom_numbers, const int *uint_crd_f, const int *LJtype, - const float *charge, const float *boxlength_f, const int *a_14, const int *b_14, - const float *lj_scale_factor, const float *LJ_type_A, const float *LJ_type_B, float *frc_f, - cudaStream_t stream); diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/nb14/dihedral_14_lj_force_impl.cuh b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/nb14/dihedral_14_lj_force_impl.cuh deleted file mode 100644 index 61849b1abc8..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/nb14/dihedral_14_lj_force_impl.cuh +++ /dev/null @@ -1,28 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_NB14_DIHEDRAL_14_LJ_FORCE_IMPL_H -#define MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_NB14_DIHEDRAL_14_LJ_FORCE_IMPL_H - -#include -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_device_info.h" - -CUDA_LIB_EXPORT void Dihedral14LJForce(const int dihedral_14_numbers, const int atom_numbers, const int *uint_crd_f, - const int *LJtype, const float *charge, const float *boxlength_f, - const int *a_14, const int *b_14, const float *lj_scale_factor, - const float *LJ_type_A, const float *LJ_type_B, float *frc_f, - cudaStream_t stream); -#endif // MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_NB14_DIHEDRAL_14_LJ_FORCE_IMPL_H diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/nb14/dihedral_14_lj_force_with_direct_cf_impl.cu b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/nb14/dihedral_14_lj_force_with_direct_cf_impl.cu deleted file mode 100644 index eef6d6209a8..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/nb14/dihedral_14_lj_force_with_direct_cf_impl.cu +++ /dev/null @@ -1,124 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "plugin/device/gpu/kernel/cuda_impl/sponge/nb14/dihedral_14_lj_force_with_direct_cf_impl.cuh" -#include "plugin/device/gpu/kernel/cuda_impl/sponge/common_sponge.cuh" - -__global__ void Dihedral14LJForceWithDirectCFKernel(const int dihedral_14_numbers, const UINT_VECTOR_LJ_TYPE *uint_crd, - const VECTOR *boxlength, const int *a_14, const int *b_14, - const float *lj_scale_factor, const float *cf_scale_factor, - const float *LJ_type_A, const float *LJ_type_B, VECTOR *frc) { - int dihedral_14_i = blockDim.x * blockIdx.x + threadIdx.x; - if (dihedral_14_i < dihedral_14_numbers) { - int int_x; - int int_y; - int int_z; - UINT_VECTOR_LJ_TYPE r1, r2; - VECTOR dr; - float dr_abs; - float dr2; - float dr_1; - float dr_2; - float dr_4; - float dr_8; - float dr_14; - float frc_abs = 0.; - VECTOR temp_frc; - - int x, y; - int atom_pair_LJ_type; - - int atom_i = a_14[dihedral_14_i]; - int atom_j = b_14[dihedral_14_i]; - - r1 = uint_crd[atom_i]; - r2 = uint_crd[atom_j]; - int_x = r2.uint_x - r1.uint_x; - int_y = r2.uint_y - r1.uint_y; - int_z = r2.uint_z - r1.uint_z; - dr.x = boxlength[0].x * int_x; - dr.y = boxlength[0].y * int_y; - dr.z = boxlength[0].z * int_z; - dr2 = dr.x * dr.x + dr.y * dr.y + dr.z * dr.z; - - dr_2 = 1.0 / dr2; - dr_4 = dr_2 * dr_2; - dr_8 = dr_4 * dr_4; - dr_14 = dr_8 * dr_4 * dr_2; - dr_abs = norm3df(dr.x, dr.y, dr.z); - dr_1 = 1. / dr_abs; - - float charge_i = r1.charge; - float charge_j = r2.charge; - float frc_cf_abs; - frc_cf_abs = cf_scale_factor[dihedral_14_i] * dr_2 * dr_1; - frc_cf_abs = -charge_i * charge_j * frc_cf_abs; - // LJ - y = (r2.LJ_type - r1.LJ_type); - x = y >> 31; - y = (y ^ x) - x; - x = r2.LJ_type + r1.LJ_type; - r2.LJ_type = (x + y) >> 1; - x = (x - y) >> 1; - atom_pair_LJ_type = (r2.LJ_type * (r2.LJ_type + 1) >> 1) + x; - - frc_abs = -LJ_type_A[atom_pair_LJ_type] * dr_14 + LJ_type_B[atom_pair_LJ_type] * dr_8; - frc_abs *= lj_scale_factor[dihedral_14_i]; - - frc_abs += frc_cf_abs; - temp_frc.x = frc_abs * dr.x; - temp_frc.y = frc_abs * dr.y; - temp_frc.z = frc_abs * dr.z; - - atomicAdd(&frc[atom_j].x, -temp_frc.x); - atomicAdd(&frc[atom_j].y, -temp_frc.y); - atomicAdd(&frc[atom_j].z, -temp_frc.z); - atomicAdd(&frc[atom_i].x, temp_frc.x); - atomicAdd(&frc[atom_i].y, temp_frc.y); - atomicAdd(&frc[atom_i].z, temp_frc.z); - } -} - -void Dihedral14LJForceWithDirectCF(const int dihedral_14_numbers, const int atom_numbers, const int *uint_crd_f, - const int *LJtype, const float *charge, const float *boxlength_f, const int *a_14, - const int *b_14, const float *lj_scale_factor, const float *cf_scale_factor, - const float *LJ_type_A, const float *LJ_type_B, float *frc_f, cudaStream_t stream) { - size_t thread_per_block = 128; - size_t block_per_grid = ceilf(static_cast(atom_numbers) / 128); - UINT_VECTOR_LJ_TYPE *uint_crd_with_LJ = NULL; - Cuda_Malloc_Safely(reinterpret_cast(&uint_crd_with_LJ), sizeof(UINT_VECTOR_LJ_TYPE) * atom_numbers); - - UNSIGNED_INT_VECTOR *uint_crd = - const_cast(reinterpret_cast(uint_crd_f)); - - Copy_Crd_To_New_Crd_Start<<(atom_numbers) / 32), 32, 0, stream>>>( - atom_numbers, uint_crd, uint_crd_with_LJ, LJtype, charge); - cudaStreamSynchronize(stream); - VECTOR *boxlength = const_cast(reinterpret_cast(boxlength_f)); - Reset_List<<(3. * atom_numbers) / 128), 128, 0, stream>>>(3 * atom_numbers, frc_f, 0.); - VECTOR *frc = const_cast(reinterpret_cast(frc_f)); - - Dihedral14LJForceWithDirectCFKernel<<>>( - dihedral_14_numbers, uint_crd_with_LJ, boxlength, a_14, b_14, lj_scale_factor, cf_scale_factor, LJ_type_A, - LJ_type_B, frc); - - return; -} - -void Dihedral14LJForceWithDirectCF(const int dihedral_14_numbers, const int atom_numbers, const int *uint_crd_f, - const int *LJtype, const float *charge, const float *boxlength_f, const int *a_14, - const int *b_14, const float *lj_scale_factor, const float *cf_scale_factor, - const float *LJ_type_A, const float *LJ_type_B, float *frc_f, cudaStream_t stream); diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/nb14/dihedral_14_lj_force_with_direct_cf_impl.cuh b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/nb14/dihedral_14_lj_force_with_direct_cf_impl.cuh deleted file mode 100644 index 901aa4f56cc..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/nb14/dihedral_14_lj_force_with_direct_cf_impl.cuh +++ /dev/null @@ -1,29 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_NB14_DIHEDRAL_14_LJ_FORCE_WITH_DIRECT_CF_IMPL_H -#define MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_NB14_DIHEDRAL_14_LJ_FORCE_WITH_DIRECT_CF_IMPL_H - -#include -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_device_info.h" - -CUDA_LIB_EXPORT void Dihedral14LJForceWithDirectCF(const int dihedral_14_numbers, const int atom_numbers, - const int *uint_crd_f, const int *LJtype, const float *charge, - const float *boxlength_f, const int *a_14, const int *b_14, - const float *lj_scale_factor, const float *cf_scale_factor, - const float *LJ_type_A, const float *LJ_type_B, float *frc_f, - cudaStream_t stream); -#endif // MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_NB14_DIHEDRAL_14_LJ_FORCE_WITH_DIRECT_CF_IMPL_H diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/neighbor_list/neighbor_list_impl.cu b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/neighbor_list/neighbor_list_impl.cu deleted file mode 100644 index c32a73a0264..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/neighbor_list/neighbor_list_impl.cu +++ /dev/null @@ -1,644 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * Note: - * NeighborListUpdate. This is an experimental interface that is subject to change and/or deletion. - */ - -#include "plugin/device/gpu/kernel/cuda_impl/sponge/neighbor_list/neighbor_list_impl.cuh" -#include - -// common functions - -static __global__ void Copy_List(const int element_numbers, const float *origin_list, float *list) { - int i = blockDim.x * blockIdx.x + threadIdx.x; - if (i < element_numbers) { - list[i] = origin_list[i]; - } -} - -static __global__ void Crd_To_Uint_Crd(const int atom_numbers, float *scale_factor, const VECTOR *crd, - UNSIGNED_INT_VECTOR *uint_crd) { - int atom_i = blockDim.x * blockIdx.x + threadIdx.x; - if (atom_i < atom_numbers) { - INT_VECTOR tempi; - VECTOR temp = crd[atom_i]; - - temp.x *= scale_factor[0]; - temp.y *= scale_factor[1]; - temp.z *= scale_factor[2]; - - tempi.int_x = temp.x; - tempi.int_y = temp.y; - tempi.int_z = temp.z; - - uint_crd[atom_i].uint_x = (tempi.int_x << 2); - uint_crd[atom_i].uint_y = (tempi.int_y << 2); - uint_crd[atom_i].uint_z = (tempi.int_z << 2); - } -} - -static __global__ void Crd_Periodic_Map(const int atom_numbers, VECTOR *crd, const float *box_length) { - int atom_i = blockDim.x * blockIdx.x + threadIdx.x; - if (atom_i < atom_numbers) { - if (crd[atom_i].x >= 0) { - if (crd[atom_i].x < box_length[0]) { - } else { - crd[atom_i].x = crd[atom_i].x - box_length[0]; - } - } else { - crd[atom_i].x = crd[atom_i].x + box_length[0]; - } - - if (crd[atom_i].y >= 0) { - if (crd[atom_i].y < box_length[1]) { - } else { - crd[atom_i].y = crd[atom_i].y - box_length[1]; - } - } else { - crd[atom_i].y = crd[atom_i].y + box_length[1]; - } - if (crd[atom_i].z >= 0) { - if (crd[atom_i].z < box_length[2]) { - } else { - crd[atom_i].z = crd[atom_i].z - box_length[2]; - } - } else { - crd[atom_i].z = crd[atom_i].z + box_length[2]; - } - } -} - -static __global__ void Clear_Grid_Bucket(const int grid_numbers, int *atom_numbers_in_grid_bucket, - GRID_BUCKET *bucket) { - int grid_serial = blockDim.x * blockIdx.x + threadIdx.x; - if (grid_serial < grid_numbers) { - GRID_BUCKET bucket_i = bucket[grid_serial]; - for (int i = 0; i < atom_numbers_in_grid_bucket[grid_serial]; i = i + 1) { - bucket_i.atom_serial[i] = -1; - } - atom_numbers_in_grid_bucket[grid_serial] = 0; - } -} - -static __global__ void Find_Atom_In_Grid_Serial(const int atom_numbers, const float *grid_length_inverse, - const VECTOR *crd, const int *grid_N, const int gridxy, - int *atom_in_grid_serial) { - int atom_i = blockDim.x * blockIdx.x + threadIdx.x; - if (atom_i < atom_numbers) { - int Nx = static_cast(crd[atom_i].x) * grid_length_inverse[0]; - int Ny = static_cast(crd[atom_i].y) * grid_length_inverse[1]; - int Nz = static_cast(crd[atom_i].z) * grid_length_inverse[2]; - Nx = Nx & ((Nx - grid_N[0]) >> 31); - Ny = Ny & ((Ny - grid_N[1]) >> 31); - Nz = Nz & ((Nz - grid_N[2]) >> 31); - atom_in_grid_serial[atom_i] = Nz * gridxy + Ny * grid_N[0] + Nx; - } -} - -static __global__ void Put_Atom_In_Grid_Bucket(const int atom_numbers, const int *atom_in_grid_serial, - GRID_BUCKET *bucket, int *atom_numbers_in_grid_bucket) { - int atom_i = blockDim.x * blockIdx.x + threadIdx.x; - if (atom_i < atom_numbers) { - int grid_serial = atom_in_grid_serial[atom_i]; - GRID_BUCKET bucket_i = bucket[grid_serial]; - int a = atom_numbers_in_grid_bucket[grid_serial]; - atomicCAS(&bucket_i.atom_serial[a], -1, atom_i); - if (bucket_i.atom_serial[a] != atom_i) { - while (true) { - a = a + 1; - atomicCAS(&bucket_i.atom_serial[a], -1, atom_i); - if (bucket_i.atom_serial[a] == atom_i) { - atomicAdd(&atom_numbers_in_grid_bucket[grid_serial], 1); - break; - } - } - } else { - atomicAdd(&atom_numbers_in_grid_bucket[grid_serial], 1); - } - } -} - -static __global__ void Find_atom_neighbors(const int atom_numbers, const UNSIGNED_INT_VECTOR *uint_crd, - const float *uint_dr_to_dr_cof, const int *atom_in_grid_serial, - const GRID_POINTER *gpointer, const GRID_BUCKET *bucket, - const int *atom_numbers_in_grid_bucket, NEIGHBOR_LIST *nl, - const float cutoff_skin_square) { - int atom_i = blockDim.x * blockIdx.x + threadIdx.x; - if (atom_i < atom_numbers) { - int grid_serial = atom_in_grid_serial[atom_i]; - int grid_serial2; - int atom_numbers_in_nl_lin = 0; - int atom_j; - int int_x; - int int_y; - int int_z; - UNSIGNED_INT_VECTOR uint_crd_i = uint_crd[atom_i]; - NEIGHBOR_LIST nl_i = nl[atom_i]; - GRID_POINTER gpointer_i = gpointer[grid_serial]; - VECTOR dr; - float dr2; - for (int grid_cycle = 0; grid_cycle < 125; grid_cycle = grid_cycle + 1) { - grid_serial2 = gpointer_i.grid_serial[grid_cycle]; - GRID_BUCKET bucket_i = bucket[grid_serial2]; - for (int i = 0; i < atom_numbers_in_grid_bucket[grid_serial2]; i = i + 1) { - atom_j = bucket_i.atom_serial[i]; - if (atom_j > atom_i) { - int_x = uint_crd[atom_j].uint_x - uint_crd_i.uint_x; - int_y = uint_crd[atom_j].uint_y - uint_crd_i.uint_y; - int_z = uint_crd[atom_j].uint_z - uint_crd_i.uint_z; - dr.x = uint_dr_to_dr_cof[0] * int_x; - dr.y = uint_dr_to_dr_cof[1] * int_y; - dr.z = uint_dr_to_dr_cof[2] * int_z; - dr2 = dr.x * dr.x + dr.y * dr.y + dr.z * dr.z; - if (dr2 < cutoff_skin_square) { - nl_i.atom_serial[atom_numbers_in_nl_lin] = atom_j; - atom_numbers_in_nl_lin = atom_numbers_in_nl_lin + 1; - } - } - } - } - nl[atom_i].atom_numbers = atom_numbers_in_nl_lin; - } -} - -static __global__ void Delete_Excluded_Atoms_Serial_In_Neighbor_List(const int atom_numbers, NEIGHBOR_LIST *nl, - const int *excluded_list_start, - const int *excluded_list, - const int *excluded_atom_numbers) { - int atom_i = blockDim.x * blockIdx.x + threadIdx.x; - if (atom_i < atom_numbers) { - int excluded_number = excluded_atom_numbers[atom_i]; - if (excluded_number > 0) { - int list_start = excluded_list_start[atom_i]; - int atom_min = excluded_list[list_start]; - int list_end = list_start + excluded_number; - int atom_max = excluded_list[list_end - 1]; - NEIGHBOR_LIST nl_i = nl[atom_i]; - int atomnumbers_in_nl_lin = nl_i.atom_numbers; - int atom_j; - int excluded_atom_numbers_lin = list_end - list_start; - int excluded_atom_numbers_count = 0; - for (int i = 0; i < atomnumbers_in_nl_lin; i = i + 1) { - atom_j = nl_i.atom_serial[i]; - if (atom_j < atom_min || atom_j > atom_max) { - continue; - } else { - for (int j = list_start; j < list_end; j = j + 1) { - if (atom_j == excluded_list[j]) { - atomnumbers_in_nl_lin = atomnumbers_in_nl_lin - 1; - nl_i.atom_serial[i] = nl_i.atom_serial[atomnumbers_in_nl_lin]; - excluded_atom_numbers_count = excluded_atom_numbers_count + 1; - i = i - 1; - } - } - if (excluded_atom_numbers_count < excluded_atom_numbers_lin) { - } else { - break; - } - } - } - nl[atom_i].atom_numbers = atomnumbers_in_nl_lin; - } - } -} - -static __global__ void construct_neighbor_list_kernel(int atom_numbers, int max_neighbor_numbers, int *nl_atom_numbers, - int *nl_atom_serial, NEIGHBOR_LIST *nl) { - for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < atom_numbers; i += gridDim.x * blockDim.x) { - nl[i].atom_numbers = nl_atom_numbers[i]; - nl[i].atom_serial = nl_atom_serial + i * max_neighbor_numbers; - } -} - -static __global__ void copy_neighbor_list_atom_number(int atom_numbers, int max_neighbor_numbers, NEIGHBOR_LIST *nl, - int *nl_atom_numbers, int *nl_atom_serial) { - int i, j; - for (i = blockIdx.x * blockDim.x + threadIdx.x; i < atom_numbers; i += gridDim.x * blockDim.x) { - nl_atom_numbers[i] = nl[i].atom_numbers; - for (j = blockIdx.y * blockDim.y + threadIdx.y; j < max_neighbor_numbers; j += gridDim.y * blockDim.y) { - if (j < nl_atom_numbers[i]) { - nl_atom_serial[i * max_neighbor_numbers + j] = nl[i].atom_serial[j]; - } else { - nl_atom_serial[i * max_neighbor_numbers + j] = 0; - } - } - } -} - -static __global__ void Mul_half(float *src, float *dst) { - int index = threadIdx.x; - if (index < 3) { - dst[index] = src[index] * 0.5; - } -} - -static __global__ void Mul_quarter(float *src, float *dst) { - int index = threadIdx.x; - if (index < 3) { - dst[index] = src[index] * 0.25; - } -} - -// old neighbor list update functions -__global__ void Crd_To_Uint_Crd_Half(const int atom_numbers, float *scale_factor, const VECTOR *crd, - UNSIGNED_INT_VECTOR *uint_crd) { - int atom_i = blockDim.x * blockIdx.x + threadIdx.x; - if (atom_i < atom_numbers) { - uint_crd[atom_i].uint_x = crd[atom_i].x * scale_factor[0]; - uint_crd[atom_i].uint_y = crd[atom_i].y * scale_factor[1]; - uint_crd[atom_i].uint_z = crd[atom_i].z * scale_factor[2]; - uint_crd[atom_i].uint_x = uint_crd[atom_i].uint_x << 1; - uint_crd[atom_i].uint_y = uint_crd[atom_i].uint_y << 1; - uint_crd[atom_i].uint_z = uint_crd[atom_i].uint_z << 1; - } -} - -__global__ void Vector_Translation(const int vector_numbers, VECTOR *vec_list, const VECTOR translation_vec) { - int i = blockDim.x * blockIdx.x + threadIdx.x; - if (i < vector_numbers) { - vec_list[i].x = vec_list[i].x + translation_vec.x; - vec_list[i].y = vec_list[i].y + translation_vec.y; - vec_list[i].z = vec_list[i].z + translation_vec.z; - } -} - -__global__ void Vector_Translation(const int vector_numbers, VECTOR *vec_list, const VECTOR *translation_vec) { - int i = blockDim.x * blockIdx.x + threadIdx.x; - if (i < vector_numbers) { - vec_list[i].x = vec_list[i].x + translation_vec[0].x; - vec_list[i].y = vec_list[i].y + translation_vec[0].y; - vec_list[i].z = vec_list[i].z + translation_vec[0].z; - } -} - -__global__ void Is_need_refresh_neighbor_list_cuda(const int atom_numbers, const VECTOR *crd, const VECTOR *old_crd, - const float half_skin_square, int *need_refresh_flag) { - int i = blockDim.x * blockIdx.x + threadIdx.x; - if (i < atom_numbers) { - VECTOR r1 = crd[i]; - VECTOR r2 = old_crd[i]; - r1.x = r1.x - r2.x; - r1.y = r1.y - r2.y; - r1.z = r1.z - r2.z; - float r1_2 = r1.x * r1.x + r1.y * r1.y + r1.z * r1.z; - if (r1_2 > half_skin_square) { - atomicExch(&need_refresh_flag[0], 1); - } - } -} - -void Refresh_Neighbor_List_Half(int *refresh_sign, const int thread, const int atom_numbers, VECTOR *crd, - VECTOR *old_crd, UNSIGNED_INT_VECTOR *uint_crd, float *crd_to_uint_crd_cof, - float *uint_dr_to_dr_cof, int *atom_in_grid_serial, const float skin, float *box_length, - const GRID_POINTER *gpointer, GRID_BUCKET *bucket, int *atom_numbers_in_grid_bucket, - NEIGHBOR_LIST *d_nl, int *excluded_list_start, int *excluded_list, - int *excluded_numbers, float cutoff_skin_square, int grid_numbers, - float *grid_length_inverse, int *grid_N, int nxy, cudaStream_t stream) { - std::vector h_refresh_sign(1); - cudaMemcpyAsync(h_refresh_sign.data(), refresh_sign, sizeof(int), cudaMemcpyDeviceToHost, stream); - if (h_refresh_sign[0] == 1) { - VECTOR trans_vec = {-skin, -skin, -skin}; - Clear_Grid_Bucket<<(grid_numbers) / thread), thread, 0, stream>>>( - grid_numbers, atom_numbers_in_grid_bucket, bucket); - - Vector_Translation<<(atom_numbers) / thread), thread, 0, stream>>>(atom_numbers, crd, - trans_vec); - - Crd_Periodic_Map<<(atom_numbers) / thread), thread, 0, stream>>>(atom_numbers, crd, - box_length); - - Find_Atom_In_Grid_Serial<<(atom_numbers) / thread), thread, 0, stream>>>( - atom_numbers, grid_length_inverse, crd, grid_N, nxy, atom_in_grid_serial); - - trans_vec.x = -trans_vec.x; - trans_vec.y = -trans_vec.y; - trans_vec.z = -trans_vec.z; - - Vector_Translation<<(atom_numbers) / thread), thread, 0, stream>>>(atom_numbers, crd, - trans_vec); - - Copy_List<<(3. * atom_numbers) / thread), thread, 0, stream>>>( - 3 * atom_numbers, reinterpret_cast(crd), reinterpret_cast(old_crd)); - - Put_Atom_In_Grid_Bucket<<(atom_numbers) / thread), thread, 0, stream>>>( - atom_numbers, atom_in_grid_serial, bucket, atom_numbers_in_grid_bucket); - - Crd_To_Uint_Crd_Half<<(atom_numbers) / thread), thread, 0, stream>>>( - atom_numbers, crd_to_uint_crd_cof, crd, uint_crd); - - Find_atom_neighbors<<(atom_numbers) / thread), thread, 0, stream>>>( - atom_numbers, uint_crd, uint_dr_to_dr_cof, atom_in_grid_serial, gpointer, bucket, atom_numbers_in_grid_bucket, - d_nl, cutoff_skin_square); - - Delete_Excluded_Atoms_Serial_In_Neighbor_List<<(atom_numbers) / thread), thread, 0, - stream>>>(atom_numbers, d_nl, excluded_list_start, excluded_list, - excluded_numbers); - h_refresh_sign[0] = 0; - } -} - -void Refresh_Neighbor_List_First_Time(int *refresh_sign, const int thread, const int atom_numbers, VECTOR *crd, - VECTOR *old_crd, UNSIGNED_INT_VECTOR *uint_crd, float *crd_to_uint_crd_cof, - float *uint_dr_to_dr_cof, int *atom_in_grid_serial, const float skin, - float *box_length, const GRID_POINTER *gpointer, GRID_BUCKET *bucket, - int *atom_numbers_in_grid_bucket, NEIGHBOR_LIST *d_nl, int *excluded_list_start, - int *excluded_list, int *excluded_numbers, float cutoff_skin_square, - int grid_numbers, float *grid_length_inverse, int *grid_N, int nxy, - cudaStream_t stream) { - VECTOR trans_vec = {skin, skin, skin}; - Clear_Grid_Bucket<<(grid_numbers) / 32), 32, 0, stream>>>( - grid_numbers, atom_numbers_in_grid_bucket, bucket); - Crd_Periodic_Map<<(atom_numbers) / 32), 32, 0, stream>>>(atom_numbers, crd, box_length); - Find_Atom_In_Grid_Serial<<(atom_numbers) / 32), 32, 0, stream>>>( - atom_numbers, grid_length_inverse, crd, grid_N, nxy, atom_in_grid_serial); - Vector_Translation<<(atom_numbers) / 32), 32, 0, stream>>>(atom_numbers, crd, trans_vec); - Copy_List<<(3. * atom_numbers) / 32), 32, 0, stream>>>( - 3 * atom_numbers, reinterpret_cast(crd), reinterpret_cast(old_crd)); - Put_Atom_In_Grid_Bucket<<(atom_numbers) / 32), 32, 0, stream>>>( - atom_numbers, atom_in_grid_serial, bucket, atom_numbers_in_grid_bucket); - Crd_To_Uint_Crd_Half<<(atom_numbers) / 32), 32, 0, stream>>>( - atom_numbers, crd_to_uint_crd_cof, crd, uint_crd); - - Find_atom_neighbors<<(atom_numbers) / thread), thread, 0, stream>>>( - atom_numbers, uint_crd, uint_dr_to_dr_cof, atom_in_grid_serial, gpointer, bucket, atom_numbers_in_grid_bucket, d_nl, - cutoff_skin_square); - Delete_Excluded_Atoms_Serial_In_Neighbor_List<<(atom_numbers) / thread), thread, 0, - stream>>>(atom_numbers, d_nl, excluded_list_start, excluded_list, - excluded_numbers); -} - -__global__ void copy_neighbor_list_atom_number(int atom_numbers, NEIGHBOR_LIST *nl, int *nl_atom_numbers) { - for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < atom_numbers; i += gridDim.x * blockDim.x) { - nl_atom_numbers[i] = nl[i].atom_numbers; - } -} - -void ConstructNeighborListHalf(int atom_numbers, int max_neighbor_numbers, int *nl_atom_numbers, int *nl_atom_serial, - NEIGHBOR_LIST *nl, cudaStream_t stream) { - construct_neighbor_list_kernel<<(atom_numbers) / 128), 128, 0, stream>>>( - atom_numbers, max_neighbor_numbers, nl_atom_numbers, nl_atom_serial, nl); -} - -void CopyNeighborListHalf(int atom_numbers, NEIGHBOR_LIST *nl, int *nl_atom_numbers, cudaStream_t stream) { - copy_neighbor_list_atom_number<<(atom_numbers) / 128), 128, 0, stream>>>(atom_numbers, nl, - nl_atom_numbers); -} - -void Refresh_Neighbor_List_No_Check_Half(int grid_numbers, int atom_numbers, float skin, int nxy, - float cutoff_skin_square, int *grid_N, float *box_length, - int *atom_numbers_in_grid_bucket, float *grid_length_inverse, - int *atom_in_grid_serial, GRID_BUCKET *bucket, VECTOR *crd, VECTOR *old_crd, - float *crd_to_uint_crd_cof, UNSIGNED_INT_VECTOR *uint_crd, - float *uint_dr_to_dr_cof, GRID_POINTER *gpointer, NEIGHBOR_LIST *d_nl, - int *excluded_list_start, int *excluded_list, int *excluded_numbers, - cudaStream_t stream) { - VECTOR trans_vec = {-skin, -skin, -skin}; - - Clear_Grid_Bucket<<(grid_numbers) / 32), 32, 0, stream>>>( - grid_numbers, atom_numbers_in_grid_bucket, bucket); - - Vector_Translation<<(atom_numbers) / 32), 32, 0, stream>>>(atom_numbers, crd, trans_vec); - - Crd_Periodic_Map<<(atom_numbers) / 32), 32, 0, stream>>>(atom_numbers, crd, box_length); - - Find_Atom_In_Grid_Serial<<(atom_numbers) / 32), 32, 0, stream>>>( - atom_numbers, grid_length_inverse, crd, grid_N, nxy, atom_in_grid_serial); - trans_vec.x = -trans_vec.x; - trans_vec.y = -trans_vec.y; - trans_vec.z = -trans_vec.z; - Vector_Translation<<(atom_numbers) / 32), 32, 0, stream>>>(atom_numbers, crd, trans_vec); - - cudaMemcpyAsync(old_crd, crd, sizeof(VECTOR) * atom_numbers, cudaMemcpyDeviceToDevice, stream); - - Put_Atom_In_Grid_Bucket<<(atom_numbers) / 32), 32, 0, stream>>>( - atom_numbers, atom_in_grid_serial, bucket, atom_numbers_in_grid_bucket); - - Crd_To_Uint_Crd_Half<<(atom_numbers) / 32), 32, 0, stream>>>( - atom_numbers, crd_to_uint_crd_cof, crd, uint_crd); - - Find_atom_neighbors<<(atom_numbers) / 32), 32, 0, stream>>>( - atom_numbers, uint_crd, uint_dr_to_dr_cof, atom_in_grid_serial, gpointer, bucket, atom_numbers_in_grid_bucket, d_nl, - cutoff_skin_square); - - Delete_Excluded_Atoms_Serial_In_Neighbor_List<<(atom_numbers) / 32), 32, 0, stream>>>( - atom_numbers, d_nl, excluded_list_start, excluded_list, excluded_numbers); -} - -void NeighborListUpdate(int grid_numbers, int atom_numbers, int *d_refresh_count, int refresh_interval, - int not_first_time, float skin, int nxy, float cutoff_square, float cutoff_with_skin_square, - int *grid_N, float *box_length, int *atom_numbers_in_grid_bucket, float *grid_length_inverse, - int *atom_in_grid_serial, GRID_BUCKET *bucket, float *crd, float *old_crd, - float *crd_to_uint_crd_cof, float *half_crd_to_uint_crd_cof, unsigned int *uint_crd, - float *uint_dr_to_dr_cof, GRID_POINTER *gpointer, NEIGHBOR_LIST *d_nl, int *excluded_list_start, - int *excluded_list, int *excluded_numbers, float half_skin_square, - int *is_need_refresh_neighbor_list, cudaStream_t stream) { - if (not_first_time) { - if (refresh_interval > 0) { - std::vector refresh_count_list(1); - cudaMemcpyAsync(refresh_count_list.data(), d_refresh_count, sizeof(int), cudaMemcpyDeviceToHost, stream); - cudaStreamSynchronize(stream); - int refresh_count = refresh_count_list[0]; - - if (refresh_count % refresh_interval == 0) { - Mul_half<<<1, 3, 0, stream>>>(crd_to_uint_crd_cof, half_crd_to_uint_crd_cof); - Refresh_Neighbor_List_No_Check_Half( - grid_numbers, atom_numbers, skin, nxy, cutoff_square, grid_N, box_length, atom_numbers_in_grid_bucket, - grid_length_inverse, atom_in_grid_serial, bucket, reinterpret_cast(crd), - reinterpret_cast(old_crd), half_crd_to_uint_crd_cof, - reinterpret_cast(uint_crd), uint_dr_to_dr_cof, gpointer, d_nl, excluded_list_start, - excluded_list, excluded_numbers, stream); - } - refresh_count += 1; - cudaMemcpyAsync(d_refresh_count, &refresh_count, sizeof(int), cudaMemcpyHostToDevice, stream); - } else { - Is_need_refresh_neighbor_list_cuda<<(atom_numbers) / 128), 128, 0, stream>>>( - atom_numbers, reinterpret_cast(crd), reinterpret_cast(old_crd), half_skin_square, - is_need_refresh_neighbor_list); - Mul_half<<<1, 3, 0, stream>>>(crd_to_uint_crd_cof, half_crd_to_uint_crd_cof); - Refresh_Neighbor_List_Half(is_need_refresh_neighbor_list, 32, atom_numbers, reinterpret_cast(crd), - reinterpret_cast(old_crd), reinterpret_cast(uint_crd), - half_crd_to_uint_crd_cof, uint_dr_to_dr_cof, atom_in_grid_serial, skin, box_length, - gpointer, bucket, atom_numbers_in_grid_bucket, d_nl, excluded_list_start, - excluded_list, excluded_numbers, cutoff_with_skin_square, grid_numbers, - grid_length_inverse, grid_N, nxy, stream); - } - } else { - Mul_half<<<1, 3, 0, stream>>>(crd_to_uint_crd_cof, half_crd_to_uint_crd_cof); - Refresh_Neighbor_List_First_Time( - is_need_refresh_neighbor_list, 32, atom_numbers, reinterpret_cast(crd), - reinterpret_cast(old_crd), reinterpret_cast(uint_crd), half_crd_to_uint_crd_cof, - uint_dr_to_dr_cof, atom_in_grid_serial, skin, box_length, gpointer, bucket, atom_numbers_in_grid_bucket, d_nl, - excluded_list_start, excluded_list, excluded_numbers, cutoff_with_skin_square, grid_numbers, grid_length_inverse, - grid_N, nxy, stream); - } -} - -// new neighbor list update functions - -__device__ __host__ VECTOR Get_Periodic_Displacement_Update(const VECTOR vec_a, const VECTOR vec_b, - const VECTOR box_length) { - VECTOR dr; - dr.x = vec_a.x - vec_b.x; - dr.y = vec_a.y - vec_b.y; - dr.x = vec_a.z - vec_b.z; - - dr.x = dr.x - floorf(dr.x / box_length.x + 0.5) * box_length.x; - dr.y = dr.y - floorf(dr.y / box_length.y + 0.5) * box_length.y; - dr.z = dr.z - floorf(dr.z / box_length.z + 0.5) * box_length.z; - return dr; -} - -__global__ void Is_need_refresh_neighbor_list_cuda(const int atom_numbers, const VECTOR *crd, const VECTOR *old_crd, - const VECTOR *box_length, const float half_skin_square, - int *need_refresh_flag) { - int i = blockDim.x * blockIdx.x + threadIdx.x; - if (i < atom_numbers) { - VECTOR r1 = crd[i]; - VECTOR r2 = old_crd[i]; - r1 = Get_Periodic_Displacement_Update(r1, r2, box_length[0]); - float r1_2 = r1.x * r1.x + r1.y * r1.y + r1.z * r1.z; - if (r1_2 > half_skin_square) { - atomicExch(&need_refresh_flag[0], 1); - } - } -} - -void Refresh_Neighbor_List(int *refresh_sign, const int thread, const int atom_numbers, VECTOR *crd, VECTOR *old_crd, - UNSIGNED_INT_VECTOR *uint_crd, float *crd_to_uint_crd_cof, float *uint_dr_to_dr_cof, - int *atom_in_grid_serial, const float skin, float *box_length, const GRID_POINTER *gpointer, - GRID_BUCKET *bucket, int *atom_numbers_in_grid_bucket, NEIGHBOR_LIST *d_nl, - int *excluded_list_start, int *excluded_list, int *excluded_numbers, - float cutoff_skin_square, int grid_numbers, float *grid_length_inverse, int *grid_N, int nxy, - cudaStream_t stream) { - std::vector h_refresh_sign(1); - cudaMemcpyAsync(h_refresh_sign.data(), refresh_sign, sizeof(int), cudaMemcpyDeviceToHost, stream); - if (h_refresh_sign[0] == 1) { - Clear_Grid_Bucket<<(grid_numbers) / thread), thread, 0, stream>>>( - grid_numbers, atom_numbers_in_grid_bucket, bucket); - - Crd_Periodic_Map<<(atom_numbers) / thread), thread, 0, stream>>>(atom_numbers, crd, - box_length); - - Find_Atom_In_Grid_Serial<<(atom_numbers) / thread), thread, 0, stream>>>( - atom_numbers, grid_length_inverse, crd, grid_N, nxy, atom_in_grid_serial); - - Copy_List<<(3. * atom_numbers) / thread), thread, 0, stream>>>( - 3 * atom_numbers, reinterpret_cast(crd), reinterpret_cast(old_crd)); - - Put_Atom_In_Grid_Bucket<<(atom_numbers) / thread), thread, 0, stream>>>( - atom_numbers, atom_in_grid_serial, bucket, atom_numbers_in_grid_bucket); - - Crd_To_Uint_Crd<<(atom_numbers) / thread), thread, 0, stream>>>( - atom_numbers, crd_to_uint_crd_cof, crd, uint_crd); - - Find_atom_neighbors<<(atom_numbers) / thread), thread, 0, stream>>>( - atom_numbers, uint_crd, uint_dr_to_dr_cof, atom_in_grid_serial, gpointer, bucket, atom_numbers_in_grid_bucket, - d_nl, cutoff_skin_square); - - Delete_Excluded_Atoms_Serial_In_Neighbor_List<<(atom_numbers) / thread), thread, 0, - stream>>>(atom_numbers, d_nl, excluded_list_start, excluded_list, - excluded_numbers); - h_refresh_sign[0] = 0; - } -} - -void Refresh_Neighbor_List_No_Check(int grid_numbers, int atom_numbers, float skin, int nxy, float cutoff_skin_square, - int *grid_N, float *box_length, int *atom_numbers_in_grid_bucket, - float *grid_length_inverse, int *atom_in_grid_serial, GRID_BUCKET *bucket, - VECTOR *crd, VECTOR *old_crd, float *crd_to_uint_crd_cof, - UNSIGNED_INT_VECTOR *uint_crd, float *uint_dr_to_dr_cof, GRID_POINTER *gpointer, - NEIGHBOR_LIST *d_nl, int *excluded_list_start, int *excluded_list, - int *excluded_numbers, cudaStream_t stream) { - Clear_Grid_Bucket<<(grid_numbers) / 32), 32, 0, stream>>>( - grid_numbers, atom_numbers_in_grid_bucket, bucket); - - Crd_Periodic_Map<<(atom_numbers) / 32), 32, 0, stream>>>(atom_numbers, crd, box_length); - - Find_Atom_In_Grid_Serial<<(atom_numbers) / 32), 32, 0, stream>>>( - atom_numbers, grid_length_inverse, crd, grid_N, nxy, atom_in_grid_serial); - - cudaMemcpyAsync(old_crd, crd, sizeof(VECTOR) * atom_numbers, cudaMemcpyDeviceToDevice, stream); - - Put_Atom_In_Grid_Bucket<<(atom_numbers) / 32), 32, 0, stream>>>( - atom_numbers, atom_in_grid_serial, bucket, atom_numbers_in_grid_bucket); - - Crd_To_Uint_Crd<<(atom_numbers) / 32), 32, 0, stream>>>(atom_numbers, crd_to_uint_crd_cof, - crd, uint_crd); - - Find_atom_neighbors<<(atom_numbers) / 32), 32, 0, stream>>>( - atom_numbers, uint_crd, uint_dr_to_dr_cof, atom_in_grid_serial, gpointer, bucket, atom_numbers_in_grid_bucket, d_nl, - cutoff_skin_square); - - Delete_Excluded_Atoms_Serial_In_Neighbor_List<<(atom_numbers) / 32), 32, 0, stream>>>( - atom_numbers, d_nl, excluded_list_start, excluded_list, excluded_numbers); -} - -void CopyNeighborList(int atom_numbers, int max_neighbor_numbers, NEIGHBOR_LIST *nl, int *nl_atom_numbers, - int *nl_atom_serial, cudaStream_t stream) { - copy_neighbor_list_atom_number<<(atom_numbers) / 128), 128, 0, stream>>>( - atom_numbers, max_neighbor_numbers, nl, nl_atom_numbers, nl_atom_serial); -} - -void ConstructNeighborList(int atom_numbers, int max_neighbor_numbers, int *nl_atom_numbers, int *nl_atom_serial, - NEIGHBOR_LIST *nl, cudaStream_t stream) { - construct_neighbor_list_kernel<<(atom_numbers) / 128), 128, 0, stream>>>( - atom_numbers, max_neighbor_numbers, nl_atom_numbers, nl_atom_serial, nl); -} - -int refresh_count = 0; - -void NeighborListRefresh(int grid_numbers, int atom_numbers, int *d_refresh_count, int refresh_interval, - int not_first_time, float skin, int nxy, float cutoff_square, float cutoff_with_skin_square, - int *grid_N, float *box_length, int *atom_numbers_in_grid_bucket, float *grid_length_inverse, - int *atom_in_grid_serial, GRID_BUCKET *bucket, float *crd, float *old_crd, - float *crd_to_uint_crd_cof, float *half_crd_to_uint_crd_cof, unsigned int *uint_crd, - float *uint_dr_to_dr_cof, GRID_POINTER *gpointer, NEIGHBOR_LIST *d_nl, - int *excluded_list_start, int *excluded_list, int *excluded_numbers, float half_skin_square, - int *is_need_refresh_neighbor_list, int forced_update, int forced_check, cudaStream_t stream) { - if (forced_update) { - Mul_quarter<<<1, 3, 0, stream>>>(crd_to_uint_crd_cof, half_crd_to_uint_crd_cof); - Refresh_Neighbor_List_No_Check( - grid_numbers, atom_numbers, skin, nxy, cutoff_square, grid_N, box_length, atom_numbers_in_grid_bucket, - grid_length_inverse, atom_in_grid_serial, bucket, reinterpret_cast(crd), - reinterpret_cast(old_crd), half_crd_to_uint_crd_cof, reinterpret_cast(uint_crd), - uint_dr_to_dr_cof, gpointer, d_nl, excluded_list_start, excluded_list, excluded_numbers, stream); - - } else if (refresh_interval > 0 && !forced_check) { - if (refresh_count % refresh_interval == 0) { - Mul_quarter<<<1, 3, 0, stream>>>(crd_to_uint_crd_cof, half_crd_to_uint_crd_cof); - Refresh_Neighbor_List_No_Check(grid_numbers, atom_numbers, skin, nxy, cutoff_square, grid_N, box_length, - atom_numbers_in_grid_bucket, grid_length_inverse, atom_in_grid_serial, bucket, - reinterpret_cast(crd), reinterpret_cast(old_crd), - half_crd_to_uint_crd_cof, reinterpret_cast(uint_crd), - uint_dr_to_dr_cof, gpointer, d_nl, excluded_list_start, excluded_list, - excluded_numbers, stream); - } - refresh_count += 1; - } else { - Is_need_refresh_neighbor_list_cuda<<(atom_numbers) / 128), 128, 0, stream>>>( - atom_numbers, reinterpret_cast(crd), reinterpret_cast(old_crd), - reinterpret_cast(box_length), half_skin_square, is_need_refresh_neighbor_list); - Mul_quarter<<<1, 3, 0, stream>>>(crd_to_uint_crd_cof, half_crd_to_uint_crd_cof); - Refresh_Neighbor_List(is_need_refresh_neighbor_list, 32, atom_numbers, reinterpret_cast(crd), - reinterpret_cast(old_crd), reinterpret_cast(uint_crd), - half_crd_to_uint_crd_cof, uint_dr_to_dr_cof, atom_in_grid_serial, skin, box_length, gpointer, - bucket, atom_numbers_in_grid_bucket, d_nl, excluded_list_start, excluded_list, - excluded_numbers, cutoff_with_skin_square, grid_numbers, grid_length_inverse, grid_N, nxy, - stream); - } -} diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/neighbor_list/neighbor_list_impl.cuh b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/neighbor_list/neighbor_list_impl.cuh deleted file mode 100644 index f5204e8de84..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/neighbor_list/neighbor_list_impl.cuh +++ /dev/null @@ -1,82 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/** - * Note: - * NeighborListUpdate. This is an experimental interface that is subject to change and/or deletion. - */ - -#ifndef MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_NEIGHBOR_LIST_IMPL_H_ -#define MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_NEIGHBOR_LIST_IMPL_H_ -#include -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_device_info.h" - -struct VECTOR { - float x; - float y; - float z; -}; -struct INT_VECTOR { - int int_x; - int int_y; - int int_z; -}; -struct UNSIGNED_INT_VECTOR { - unsigned int uint_x; - unsigned int uint_y; - unsigned int uint_z; -}; -struct NEIGHBOR_LIST { - int atom_numbers; - int *atom_serial; -}; -struct GRID_BUCKET { - int *atom_serial; -}; -struct GRID_POINTER { - int *grid_serial; -}; - -CUDA_LIB_EXPORT void ConstructNeighborList(int grid_numbers, int max_neighbor_numbers, int *nl_atom_numbers, - int *nl_atom_serial, NEIGHBOR_LIST *nl, cudaStream_t stream); - -CUDA_LIB_EXPORT void CopyNeighborList(int atom_numbers, int max_neighbor_numbers, NEIGHBOR_LIST *nl, - int *nl_atom_numbers, int *nl_atom_serial, cudaStream_t stream); - -CUDA_LIB_EXPORT void NeighborListRefresh( - int grid_numbers, int atom_numbers, int *d_refresh_count, int refresh_interval, int not_first_time, float skin, - int nxy, float cutoff_square, float cutoff_with_skin_square, int *grid_N, float *box_length, - int *atom_numbers_in_grid_bucket, float *grid_length_inverse, int *atom_in_grid_serial, GRID_BUCKET *bucket, - float *crd, float *old_crd, float *crd_to_uint_crd_cof, float *half_crd_to_uint_crd_cof, unsigned int *uint_crd, - float *uint_dr_to_dr_cof, GRID_POINTER *gpointer, NEIGHBOR_LIST *d_nl, int *excluded_list_start, int *excluded_list, - int *excluded_numbers, float half_skin_square, int *is_need_refresh_neighbor_list, int forced_update, - int forced_check, cudaStream_t stream); - -CUDA_LIB_EXPORT void ConstructNeighborListHalf(int grid_numbers, int max_neighbor_numbers, int *nl_atom_numbers, - int *nl_atom_serial, NEIGHBOR_LIST *nl, cudaStream_t stream); - -CUDA_LIB_EXPORT void CopyNeighborListHalf(int atom_numbers, NEIGHBOR_LIST *nl, int *nl_atom_numbers, - cudaStream_t stream); - -CUDA_LIB_EXPORT void NeighborListUpdate( - int grid_numbers, int atom_numbers, int *d_refresh_count, int refresh_interval, int not_first_time, float skin, - int nxy, float cutoff_square, float cutoff_with_skin_square, int *grid_N, float *box_length, - int *atom_numbers_in_grid_bucket, float *grid_length_inverse, int *atom_in_grid_serial, GRID_BUCKET *bucket, - float *crd, float *old_crd, float *crd_to_uint_crd_cof, float *half_crd_to_uint_crd_cof, unsigned int *uint_crd, - float *uint_dr_to_dr_cof, GRID_POINTER *gpointer, NEIGHBOR_LIST *d_nl, int *excluded_list_start, int *excluded_list, - int *excluded_numbers, float half_skin_square, int *is_need_refresh_neighbor_list, cudaStream_t stream); - -#endif diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/nvtit/md_iteration_gradient_descent_impl.cu b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/nvtit/md_iteration_gradient_descent_impl.cu deleted file mode 100644 index fc2102e52c0..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/nvtit/md_iteration_gradient_descent_impl.cu +++ /dev/null @@ -1,41 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "plugin/device/gpu/kernel/cuda_impl/sponge/nvtit/md_iteration_gradient_descent_impl.cuh" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/util.cuh" -#include "plugin/device/gpu/kernel/cuda_impl/sponge/common_sponge.cuh" - -__global__ void MD_Iteration_Gradient_Descent(const int atom_numbers, VECTOR *crd, VECTOR *frc, - const float learning_rate) { - int i = blockDim.x * blockIdx.x + threadIdx.x; - if (i < atom_numbers) { - crd[i].x = crd[i].x + learning_rate * frc[i].x; - crd[i].y = crd[i].y + learning_rate * frc[i].y; - crd[i].z = crd[i].z + learning_rate * frc[i].z; - - frc[i].x = 0.; - frc[i].y = 0.; - frc[i].z = 0.; - } -} - -void MDIterationGradientDescent(const int atom_numbers, float *crd, float *frc, const float learning_rate, - cudaStream_t stream) { - VECTOR *d_crd = reinterpret_cast(crd); - VECTOR *d_frc = reinterpret_cast(frc); - MD_Iteration_Gradient_Descent<<(atom_numbers) / 128), 128, 0, stream>>>( - atom_numbers, d_crd, d_frc, learning_rate); -} diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/nvtit/md_iteration_gradient_descent_impl.cuh b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/nvtit/md_iteration_gradient_descent_impl.cuh deleted file mode 100644 index 8299fc491ca..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/nvtit/md_iteration_gradient_descent_impl.cuh +++ /dev/null @@ -1,26 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_MD_ITERATION_GRADIENT_DESCENT_IMPL_H_ -#define MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_MD_ITERATION_GRADIENT_DESCENT_IMPL_H_ - -#include -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_device_info.h" -CUDA_LIB_EXPORT void MDIterationGradientDescent(const int atom_numbers, float *crd, float *frc, - const float learning_rate, cudaStream_t stream); - -#endif diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/nvtit/md_iteration_leap_frog_impl.cu b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/nvtit/md_iteration_leap_frog_impl.cu deleted file mode 100644 index 8d00fc222e1..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/nvtit/md_iteration_leap_frog_impl.cu +++ /dev/null @@ -1,54 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/** - * Note: - * MDIterationLeapFrog. This is an experimental interface that is subject to change and/or deletion. - */ - -#include "plugin/device/gpu/kernel/cuda_impl/sponge/nvtit/md_iteration_leap_frog_impl.cuh" -#include "plugin/device/gpu/kernel/cuda_impl/sponge/common_sponge.cuh" - -__global__ void MD_Iteration_Leap_Frog(const int atom_numbers, VECTOR *vel, VECTOR *crd, VECTOR *frc, VECTOR *acc, - const float *inverse_mass, const float dt) { - int i = blockDim.x * blockIdx.x + threadIdx.x; - if (i < atom_numbers) { - acc[i].x = inverse_mass[i] * frc[i].x; - acc[i].y = inverse_mass[i] * frc[i].y; - acc[i].z = inverse_mass[i] * frc[i].z; - - vel[i].x = vel[i].x + dt * acc[i].x; - vel[i].y = vel[i].y + dt * acc[i].y; - vel[i].z = vel[i].z + dt * acc[i].z; - - crd[i].x = crd[i].x + dt * vel[i].x; - crd[i].y = crd[i].y + dt * vel[i].y; - crd[i].z = crd[i].z + dt * vel[i].z; - - frc[i].x = 0.; - frc[i].y = 0.; - frc[i].z = 0.; - } -} - -void MDIterationLeapFrog(const int atom_numbers, float *vel, float *crd, float *frc, float *acc, - const float *inverse_mass, const float dt, cudaStream_t stream) { - VECTOR *d_vel = reinterpret_cast(vel); - VECTOR *d_crd = reinterpret_cast(crd); - VECTOR *d_frc = reinterpret_cast(frc); - VECTOR *d_acc = reinterpret_cast(acc); - MD_Iteration_Leap_Frog<<(atom_numbers) / 128), 128, 0, stream>>>( - atom_numbers, d_vel, d_crd, d_frc, d_acc, inverse_mass, dt); -} diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/nvtit/md_iteration_leap_frog_impl.cuh b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/nvtit/md_iteration_leap_frog_impl.cuh deleted file mode 100644 index 61d149060b7..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/nvtit/md_iteration_leap_frog_impl.cuh +++ /dev/null @@ -1,31 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/** - * Note: - * MDIterationLeapFrog. This is an experimental interface that is subject to change and/or deletion. - */ - -#ifndef MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_NVTIT_MD_ITERATION_LEAP_FROG_IMPL_H -#define MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_NVTIT_MD_ITERATION_LEAP_FROG_IMPL_H - -#include -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_device_info.h" - -CUDA_LIB_EXPORT void MDIterationLeapFrog(const int atom_numbers, float *vel, float *crd, float *frc, float *acc, - const float *inverse_mass, const float dt, cudaStream_t stream); - -#endif // MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_MD_ITERATION_LEAP_FROG_LIUJIAN_GPU_IMPL_H_ diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/nvtit/md_iteration_leap_frog_liujian_gpu_impl.cu b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/nvtit/md_iteration_leap_frog_liujian_gpu_impl.cu deleted file mode 100644 index a2a636726b8..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/nvtit/md_iteration_leap_frog_liujian_gpu_impl.cu +++ /dev/null @@ -1,67 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "plugin/device/gpu/kernel/cuda_impl/sponge/nvtit/md_iteration_leap_frog_liujian_gpu_impl.cuh" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/util.cuh" -#include "plugin/device/gpu/kernel/cuda_impl/sponge/common_sponge.cuh" - -__global__ void MD_Iteration_Leap_Frog_With_LiuJian_kernel(const int atom_numbers, const float half_dt, const float dt, - const float exp_gamma, float *inverse_mass, - float *sqrt_mass_inverse, VECTOR *vel, VECTOR *crd, - VECTOR *frc, VECTOR *acc, VECTOR *random_frc, - VECTOR *output) { - int i = blockDim.x * blockIdx.x + threadIdx.x; - - if (i < atom_numbers) { - acc[i].x = inverse_mass[i] * frc[i].x; - acc[i].y = inverse_mass[i] * frc[i].y; - acc[i].z = inverse_mass[i] * frc[i].z; - - vel[i].x = vel[i].x + dt * acc[i].x; - vel[i].y = vel[i].y + dt * acc[i].y; - vel[i].z = vel[i].z + dt * acc[i].z; - - output[i].x = crd[i].x + half_dt * vel[i].x; - output[i].y = crd[i].y + half_dt * vel[i].y; - output[i].z = crd[i].z + half_dt * vel[i].z; - - vel[i].x = exp_gamma * vel[i].x + sqrt_mass_inverse[i] * random_frc[i].x; - vel[i].y = exp_gamma * vel[i].y + sqrt_mass_inverse[i] * random_frc[i].y; - vel[i].z = exp_gamma * vel[i].z + sqrt_mass_inverse[i] * random_frc[i].z; - - output[i].x = output[i].x + half_dt * vel[i].x; - output[i].y = output[i].y + half_dt * vel[i].y; - output[i].z = output[i].z + half_dt * vel[i].z; - } -} - -void MD_Iteration_Leap_Frog_With_LiuJian(const int atom_numbers, const float half_dt, const float dt, - const float exp_gamma, int float4_numbers, float *inverse_mass, - float *sqrt_mass_inverse, float *vel, float *crd, float *frc, float *acc, - curandStatePhilox4_32_10_t *rand_state, float *rand_frc, float *output, - cudaStream_t stream) { - Rand_Normal<<(float4_numbers) / 32.), 32, 0, stream>>>(float4_numbers, rand_state, - reinterpret_cast(rand_frc)); - VECTOR *d_vel = reinterpret_cast(vel); - VECTOR *d_crd = reinterpret_cast(crd); - VECTOR *d_frc = reinterpret_cast(frc); - VECTOR *d_acc = reinterpret_cast(acc); - VECTOR *d_rand_frc = reinterpret_cast(rand_frc); - VECTOR *d_out = reinterpret_cast(output); - MD_Iteration_Leap_Frog_With_LiuJian_kernel<<(atom_numbers) / 32), 32, 0, stream>>>( - atom_numbers, half_dt, dt, exp_gamma, inverse_mass, sqrt_mass_inverse, d_vel, d_crd, d_frc, d_acc, d_rand_frc, - d_out); -} diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/nvtit/md_iteration_leap_frog_liujian_gpu_impl.cuh b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/nvtit/md_iteration_leap_frog_liujian_gpu_impl.cuh deleted file mode 100644 index 77e42a3f2d6..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/nvtit/md_iteration_leap_frog_liujian_gpu_impl.cuh +++ /dev/null @@ -1,29 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_MD_ITERATION_LEAP_FROG_LIUJIAN_GPU_IMPL_H_ -#define MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_MD_ITERATION_LEAP_FROG_LIUJIAN_GPU_IMPL_H_ - -#include -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_device_info.h" -CUDA_LIB_EXPORT void MD_Iteration_Leap_Frog_With_LiuJian(const int atom_numbers, const float half_dt, const float dt, - const float exp_gamma, int float4_numbers, float *inverse_mass, - float *sqrt_mass_inverse, float *vel, float *crd, float *frc, - float *acc, curandStatePhilox4_32_10_t *rand_state, - float *rand_frc, float *output, cudaStream_t stream); - -#endif // MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_MD_ITERATION_LEAP_FROG_LIUJIAN_GPU_IMPL_H_ diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/nvtit/md_iteration_leap_frog_liujian_with_max_vel_impl.cu b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/nvtit/md_iteration_leap_frog_liujian_with_max_vel_impl.cu deleted file mode 100644 index b8c53e91acf..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/nvtit/md_iteration_leap_frog_liujian_with_max_vel_impl.cu +++ /dev/null @@ -1,80 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "plugin/device/gpu/kernel/cuda_impl/sponge/nvtit/md_iteration_leap_frog_liujian_with_max_vel_impl.cuh" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/util.cuh" -#include "plugin/device/gpu/kernel/cuda_impl/sponge/common_sponge.cuh" - -__global__ void MD_Iteration_Leap_Frog_With_LiuJian_With_Max_Velocity( - const int atom_numbers, const float half_dt, const float dt, const float exp_gamma, const float *inverse_mass, - const float *sqrt_mass_inverse, VECTOR *vel, VECTOR *crd, VECTOR *frc, VECTOR *acc, VECTOR *random_frc, - VECTOR *output, const float max_vel) { - int i = blockDim.x * blockIdx.x + threadIdx.x; - float abs_vel; - if (i < atom_numbers) { - acc[i].x = inverse_mass[i] * frc[i].x; - acc[i].y = inverse_mass[i] * frc[i].y; - acc[i].z = inverse_mass[i] * frc[i].z; - - vel[i].x = vel[i].x + dt * acc[i].x; - vel[i].y = vel[i].y + dt * acc[i].y; - vel[i].z = vel[i].z + dt * acc[i].z; - - abs_vel = norm3df(vel[i].x, vel[i].y, vel[i].z); - if (abs_vel < max_vel) { - } else { - abs_vel = max_vel / abs_vel; - vel[i].x = abs_vel * vel[i].x; - vel[i].y = abs_vel * vel[i].y; - vel[i].z = abs_vel * vel[i].z; - } - - output[i].x = crd[i].x + half_dt * vel[i].x; - output[i].y = crd[i].y + half_dt * vel[i].y; - output[i].z = crd[i].z + half_dt * vel[i].z; - - vel[i].x = exp_gamma * vel[i].x + sqrt_mass_inverse[i] * random_frc[i].x; - vel[i].y = exp_gamma * vel[i].y + sqrt_mass_inverse[i] * random_frc[i].y; - vel[i].z = exp_gamma * vel[i].z + sqrt_mass_inverse[i] * random_frc[i].z; - - output[i].x = output[i].x + half_dt * vel[i].x; - output[i].y = output[i].y + half_dt * vel[i].y; - output[i].z = output[i].z + half_dt * vel[i].z; - - frc[i].x = 0.; - frc[i].y = 0.; - frc[i].z = 0.; - } -} -void MD_Iteration_Leap_Frog_With_LiuJian_With_Max_Vel(const int atom_numbers, const float half_dt, const float dt, - const float exp_gamma, int float4_numbers, float *inverse_mass, - float *sqrt_mass_inverse, float *vel, float *crd, float *frc, - float *acc, curandStatePhilox4_32_10_t *rand_state, - float *rand_frc, float *output, const float max_vel, - cudaStream_t stream) { - Rand_Normal<<(float4_numbers) / 32.), 32, 0, stream>>>(float4_numbers, rand_state, - reinterpret_cast(rand_frc)); - VECTOR *d_vel = reinterpret_cast(vel); - VECTOR *d_crd = reinterpret_cast(crd); - VECTOR *d_frc = reinterpret_cast(frc); - VECTOR *d_acc = reinterpret_cast(acc); - VECTOR *d_rand_frc = reinterpret_cast(rand_frc); - VECTOR *d_out = reinterpret_cast(output); - MD_Iteration_Leap_Frog_With_LiuJian_With_Max_Velocity<<(atom_numbers) / 32), 32, 0, - stream>>>(atom_numbers, half_dt, dt, exp_gamma, inverse_mass, - sqrt_mass_inverse, d_vel, d_crd, d_frc, d_acc, - d_rand_frc, d_out, max_vel); -} diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/nvtit/md_iteration_leap_frog_liujian_with_max_vel_impl.cuh b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/nvtit/md_iteration_leap_frog_liujian_with_max_vel_impl.cuh deleted file mode 100644 index 7872f70cfd8..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/nvtit/md_iteration_leap_frog_liujian_with_max_vel_impl.cuh +++ /dev/null @@ -1,28 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_MD_ITERATION_LEAP_FROG_LIUJIAN_WITH_MAX_VEL_IMPL_H_ -#define MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_MD_ITERATION_LEAP_FROG_LIUJIAN_WITH_MAX_VEL_IMPL_H_ - -#include -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_device_info.h" -CUDA_LIB_EXPORT void MD_Iteration_Leap_Frog_With_LiuJian_With_Max_Vel( - const int atom_numbers, const float half_dt, const float dt, const float exp_gamma, int float4_numbers, - float *inverse_mass, float *sqrt_mass_inverse, float *vel, float *crd, float *frc, float *acc, - curandStatePhilox4_32_10_t *rand_state, float *rand_frc, float *output, const float max_vel, cudaStream_t stream); - -#endif // MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_MD_ITERATION_LEAP_FROG_LIUJIAN_WITH_MAX_VEL_IMPL_H_ diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/nvtit/md_iteration_leap_frog_with_max_vel_impl.cu b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/nvtit/md_iteration_leap_frog_with_max_vel_impl.cu deleted file mode 100644 index 60d96b1c008..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/nvtit/md_iteration_leap_frog_with_max_vel_impl.cu +++ /dev/null @@ -1,44 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "plugin/device/gpu/kernel/cuda_impl/sponge/nvtit/md_iteration_leap_frog_with_max_vel_impl.cuh" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/util.cuh" -#include "plugin/device/gpu/kernel/cuda_impl/sponge/common_sponge.cuh" - -__global__ void MD_Iteration_Leap_Frog_With_Max_Velocity(const int atom_numbers, VECTOR *vel, VECTOR *crd, VECTOR *frc, - VECTOR *acc, const float *inverse_mass, const float dt, - const float max_velocity) { - int i = blockDim.x * blockIdx.x + threadIdx.x; - if (i < atom_numbers) { - VECTOR acc_i = inverse_mass[i] * frc[i]; - VECTOR vel_i = vel[i] + dt * acc_i; - vel_i = Make_Vector_Not_Exceed_Value(vel_i, max_velocity); - vel[i] = vel_i; - crd[i] = crd[i] + dt * vel_i; - frc[i] = {0.0f, 0.0f, 0.0f}; - } -} - -void MDIterationLeapFrogWithMaxVelocity(const int atom_numbers, float *vel, float *crd, float *frc, float *acc, - const float *inverse_mass, const float dt, const float max_velocity, - cudaStream_t stream) { - VECTOR *d_vel = reinterpret_cast(vel); - VECTOR *d_crd = reinterpret_cast(crd); - VECTOR *d_frc = reinterpret_cast(frc); - VECTOR *d_acc = reinterpret_cast(acc); - MD_Iteration_Leap_Frog_With_Max_Velocity<<(atom_numbers) / 128), 128, 0, stream>>>( - atom_numbers, d_vel, d_crd, d_frc, d_acc, inverse_mass, dt, max_velocity); -} diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/nvtit/md_iteration_leap_frog_with_max_vel_impl.cuh b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/nvtit/md_iteration_leap_frog_with_max_vel_impl.cuh deleted file mode 100644 index 3bdb3dd60dc..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/nvtit/md_iteration_leap_frog_with_max_vel_impl.cuh +++ /dev/null @@ -1,27 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_MD_ITERATION_LEAP_FROG_WITH_MAX_VEL_IMPL_H_ -#define MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_MD_ITERATION_LEAP_FROG_WITH_MAX_VEL_IMPL_H_ - -#include -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_device_info.h" -CUDA_LIB_EXPORT void MDIterationLeapFrogWithMaxVelocity(const int atom_numbers, float *vel, float *crd, float *frc, - float *acc, const float *inverse_mass, const float dt, - const float max_velocity, cudaStream_t stream); - -#endif // MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_MD_ITERATION_LEAP_FROG_WITH_MAX_VEL_IMPL_H_ diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/nvtit/md_iteration_setup_random_state_gpu_impl.cu b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/nvtit/md_iteration_setup_random_state_gpu_impl.cu deleted file mode 100644 index 54138754bd4..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/nvtit/md_iteration_setup_random_state_gpu_impl.cu +++ /dev/null @@ -1,28 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "plugin/device/gpu/kernel/cuda_impl/sponge/nvtit/md_iteration_setup_random_state_gpu_impl.cuh" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/util.cuh" -#include "plugin/device/gpu/kernel/cuda_impl/sponge/common_sponge.cuh" - -void MD_Iteration_Setup_Random_State(int float4_numbers, curandStatePhilox4_32_10_t *rand_state, int seed, - cudaStream_t stream) { - Setup_Rand_Normal_Kernel<<(float4_numbers) / 32.), 32, 0, stream>>>(float4_numbers, - rand_state, seed); -} - -void MD_Iteration_Setup_Random_State(int float4_numbers, curandStatePhilox4_32_10_t *rand_state, int seed, - cudaStream_t stream); diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/nvtit/md_iteration_setup_random_state_gpu_impl.cuh b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/nvtit/md_iteration_setup_random_state_gpu_impl.cuh deleted file mode 100644 index 8fda94fc3a7..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/nvtit/md_iteration_setup_random_state_gpu_impl.cuh +++ /dev/null @@ -1,25 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_MD_ITERATION_SETUP_RANDOM_STATE_GPU_IMPL_H_ -#define MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_MD_ITERATION_SETUP_RANDOM_STATE_GPU_IMPL_H_ - -#include -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_device_info.h" - -CUDA_LIB_EXPORT void MD_Iteration_Setup_Random_State(int float4_numbers, curandStatePhilox4_32_10_t *rand_state, - int seed, cudaStream_t stream); -#endif diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/pme/fft_3d_impl.cu b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/pme/fft_3d_impl.cu deleted file mode 100644 index d3910756359..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/pme/fft_3d_impl.cu +++ /dev/null @@ -1,27 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#include "plugin/device/gpu/kernel/cuda_impl/sponge/pme/fft_3d_impl.cuh" -#include "plugin/device/gpu/kernel/cuda_impl/sponge/pme/pme_common.cuh" - -template -void FFT3D(int Nfft, T *input_tensor, Complex *output_tensor, const cufftHandle &FFT_plan_r2c, cudaStream_t stream) { - cufftExecR2C(FFT_plan_r2c, input_tensor, reinterpret_cast(output_tensor)); - return; -} - -template CUDA_LIB_EXPORT -void FFT3D(int Nfft, float *input_tensor, Complex *output_tensor, - const cufftHandle &FFT_plan_r2c, cudaStream_t stream); diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/pme/fft_3d_impl.cuh b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/pme/fft_3d_impl.cuh deleted file mode 100644 index 6df893b71ea..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/pme/fft_3d_impl.cuh +++ /dev/null @@ -1,28 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_PME_FFT_3D_IMPL_H_ -#define MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_PME_FFT_3D_IMPL_H_ - -#include -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/complex.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_device_info.h" - -template -CUDA_LIB_EXPORT void FFT3D(int Nfft, T *input_tensor, Complex *output_tensor, const cufftHandle &FFT_plan_r2c, - cudaStream_t stream); - -#endif diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/pme/ifft_3d_impl.cu b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/pme/ifft_3d_impl.cu deleted file mode 100644 index 6c6880f1e6a..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/pme/ifft_3d_impl.cu +++ /dev/null @@ -1,28 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#include "plugin/device/gpu/kernel/cuda_impl/sponge/pme/ifft_3d_impl.cuh" -#include "plugin/device/gpu/kernel/cuda_impl/sponge/pme/pme_common.cuh" - -template -void IFFT3D(int Nfft, Complex *input_tensor, T *output_tensor, const cufftHandle &FFT_plan_c2r, - cudaStream_t stream) { - cufftExecC2R(FFT_plan_c2r, reinterpret_cast(input_tensor), output_tensor); - return; -} - -template CUDA_LIB_EXPORT -void IFFT3D(int Nfft, Complex *input_tensor, float *output_tensor, - const cufftHandle &FFT_plan_c2r, cudaStream_t stream); diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/pme/ifft_3d_impl.cuh b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/pme/ifft_3d_impl.cuh deleted file mode 100644 index 0d740e97a6b..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/pme/ifft_3d_impl.cuh +++ /dev/null @@ -1,28 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_PME_IFFT_3D_IMPL_H_ -#define MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_PME_IFFT_3D_IMPL_H_ - -#include -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/complex.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_device_info.h" - -template -CUDA_LIB_EXPORT void IFFT3D(int Nfft, Complex *input_tensor, T *output_tensor, const cufftHandle &FFT_plan_c2r, - cudaStream_t stream); - -#endif diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/pme/pme_batched_fft_2d_impl.cu b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/pme/pme_batched_fft_2d_impl.cu deleted file mode 100644 index 5f7dc4281ae..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/pme/pme_batched_fft_2d_impl.cu +++ /dev/null @@ -1,29 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#include "plugin/device/gpu/kernel/cuda_impl/sponge/pme/pme_batched_fft_2d_impl.cuh" -#include "plugin/device/gpu/kernel/cuda_impl/sponge/pme/pme_common.cuh" - -template -void PMEBatchedFFT2D(Complex *input_tensor, Complex *output_tensor, - const cufftHandle &FFT_plan_c2c, int direction, cudaStream_t stream) { - cufftExecC2C(FFT_plan_c2c, reinterpret_cast(input_tensor), - reinterpret_cast(output_tensor), direction); - return; -} - -template CUDA_LIB_EXPORT -void PMEBatchedFFT2D(Complex *input_tensor, Complex *output_tensor, - const cufftHandle &FFT_plan_c2c, int direction, cudaStream_t stream); diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/pme/pme_batched_fft_2d_impl.cuh b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/pme/pme_batched_fft_2d_impl.cuh deleted file mode 100644 index 71f1aa919d5..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/pme/pme_batched_fft_2d_impl.cuh +++ /dev/null @@ -1,28 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_PME_PME_BATCHED_FFT_2D_IMPL_H_ -#define MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_PME_PME_BATCHED_FFT_2D_IMPL_H_ - -#include -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/complex.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_device_info.h" - -template -CUDA_LIB_EXPORT void PMEBatchedFFT2D(Complex *input_tensor, Complex *output_tensor, - const cufftHandle &FFT_plan_c2c, int direction, cudaStream_t stream); - -#endif diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/pme/pme_common.cuh b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/pme/pme_common.cuh deleted file mode 100644 index 58e17a73bdd..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/pme/pme_common.cuh +++ /dev/null @@ -1,357 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/** - * Note: - * PME_Common. This is an experimental interface that is subject to change and/or deletion. - */ - -#ifndef MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_PME_PME_COMMON_H_ -#define MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_PME_PME_COMMON_H_ -#include "plugin/device/gpu/kernel/cuda_impl/sponge/common_sponge.cuh" -__constant__ float PME_Ma[4] = {1.0 / 6.0, -0.5, 0.5, -1.0 / 6.0}; -__constant__ float PME_Mb[4] = {0, 0.5, -1, 0.5}; -__constant__ float PME_Mc[4] = {0, 0.5, 0, -0.5}; -__constant__ float PME_Md[4] = {0, 1.0 / 6.0, 4.0 / 6.0, 1.0 / 6.0}; -__constant__ float PME_dMa[4] = {0.5, -1.5, 1.5, -0.5}; -__constant__ float PME_dMb[4] = {0, 1, -2, 1}; -__constant__ float PME_dMc[4] = {0, 0.5, 0, -0.5}; -#define PI 3.1415926 -const float periodic_factor_inverse = 2.3283064365387e-10; -static dim3 thread_PME; - -const float cutoff = 10.0; -const float tolerance = 0.00001; - -static float M_(float u, int n) { - if (n == 2) { - if (u > 2 || u < 0) return 0; - return 1 - abs(u - 1); - } else { - return u / (n - 1) * M_(u, n - 1) + (n - u) / (n - 1) * M_(u - 1, n - 1); - } -} - -static float Get_Beta(float cutoff, float tolerance) { - float beta, low, high, tempf; - int ilow, ihigh; - - high = 1.0; - ihigh = 1; - - while (1) { - tempf = erfc(high * cutoff) / cutoff; - if (tempf <= tolerance) break; - high *= 2; - ihigh++; - } - - ihigh += 50; - low = 0.0; - for (ilow = 1; ilow < ihigh; ilow++) { - beta = (low + high) / 2; - tempf = erfc(beta * cutoff) / cutoff; - if (tempf >= tolerance) - low = beta; - else - high = beta; - } - return beta; -} - -static cufftComplex expc(cufftComplex z) { - cufftComplex res; - float t = expf(z.x); - sincosf(z.y, &res.y, &res.x); - res.x *= t; - res.y *= t; - return res; -} - -static float getb(int k, int NFFT, int B_order) { - cufftComplex tempc, tempc2, res; - float tempf; - tempc2.x = 0; - tempc2.y = 0; - - tempc.x = 0; - tempc.y = 2 * (B_order - 1) * PI * k / NFFT; - res = expc(tempc); - - for (int kk = 0; kk < (B_order - 1); kk++) { - tempc.x = 0; - tempc.y = 2 * PI * k / NFFT * kk; - tempc = expc(tempc); - tempf = M_(kk + 1, B_order); - tempc2.x += tempf * tempc.x; - tempc2.y += tempf * tempc.y; - } - res = cuCdivf(res, tempc2); - return res.x * res.x + res.y * res.y; -} - -__global__ static void device_add(float *ene, float *factor, float *charge_sum) { - ene[0] += factor[0] * charge_sum[0] * charge_sum[0]; -} - -__global__ static void PME_Atom_Near(const UNSIGNED_INT_VECTOR *uint_crd, int *PME_atom_near, const int PME_Nin, - const float periodic_factor_inverse_x, const float periodic_factor_inverse_y, - const float periodic_factor_inverse_z, const int atom_numbers, const int fftx, - const int ffty, const int fftz, const UNSIGNED_INT_VECTOR *PME_kxyz, - UNSIGNED_INT_VECTOR *PME_uxyz, VECTOR *PME_frxyz) { - int atom = blockDim.x * blockIdx.x + threadIdx.x; - if (atom < atom_numbers) { - UNSIGNED_INT_VECTOR *temp_uxyz = &PME_uxyz[atom]; - int k, tempux, tempuy, tempuz; - float tempf; - tempf = static_cast(uint_crd[atom].uint_x) * periodic_factor_inverse_x; - tempux = static_cast(tempf); - PME_frxyz[atom].x = tempf - tempux; - - tempf = static_cast(uint_crd[atom].uint_y) * periodic_factor_inverse_y; - tempuy = static_cast(tempf); - PME_frxyz[atom].y = tempf - tempuy; - - tempf = static_cast(uint_crd[atom].uint_z) * periodic_factor_inverse_z; - tempuz = static_cast(tempf); - PME_frxyz[atom].z = tempf - tempuz; - - if (tempux != (*temp_uxyz).uint_x || tempuy != (*temp_uxyz).uint_y || tempuz != (*temp_uxyz).uint_z) { - (*temp_uxyz).uint_x = tempux; - (*temp_uxyz).uint_y = tempuy; - (*temp_uxyz).uint_z = tempuz; - int *temp_near = PME_atom_near + atom * 64; - int kx, ky, kz; - for (k = 0; k < 64; k++) { - UNSIGNED_INT_VECTOR temp_kxyz = PME_kxyz[k]; - kx = tempux - temp_kxyz.uint_x; - if (kx < 0) kx += fftx; - if (kx > fftx) kx -= fftx; - ky = tempuy - temp_kxyz.uint_y; - if (ky < 0) ky += ffty; - if (ky > ffty) ky -= ffty; - kz = tempuz - temp_kxyz.uint_z; - if (kz < 0) kz += fftz; - if (kz > fftz) kz -= fftz; - temp_near[k] = kx * PME_Nin + ky * fftz + kz; - } - } - } -} - -__global__ static void PME_Q_Spread(int *PME_atom_near, const float *charge, const VECTOR *PME_frxyz, float *PME_Q, - const UNSIGNED_INT_VECTOR *PME_kxyz, const int atom_numbers) { - int atom = blockDim.x * blockIdx.x + threadIdx.x; - - if (atom < atom_numbers) { - int k; - float tempf, tempQ, tempf2; - - int *temp_near = PME_atom_near + atom * 64; - VECTOR temp_frxyz = PME_frxyz[atom]; - float tempcharge = charge[atom]; - - UNSIGNED_INT_VECTOR temp_kxyz; - unsigned int kx; - - for (k = threadIdx.y; k < 64; k = k + blockDim.y) { - temp_kxyz = PME_kxyz[k]; - kx = temp_kxyz.uint_x; - tempf = (temp_frxyz.x); - tempf2 = tempf * tempf; - tempf = PME_Ma[kx] * tempf * tempf2 + PME_Mb[kx] * tempf2 + PME_Mc[kx] * tempf + PME_Md[kx]; - - tempQ = tempcharge * tempf; - - kx = temp_kxyz.uint_y; - tempf = (temp_frxyz.y); - tempf2 = tempf * tempf; - tempf = PME_Ma[kx] * tempf * tempf2 + PME_Mb[kx] * tempf2 + PME_Mc[kx] * tempf + PME_Md[kx]; - - tempQ = tempQ * tempf; - - kx = temp_kxyz.uint_z; - tempf = (temp_frxyz.z); - tempf2 = tempf * tempf; - tempf = PME_Ma[kx] * tempf * tempf2 + PME_Mb[kx] * tempf2 + PME_Mc[kx] * tempf + PME_Md[kx]; - tempQ = tempQ * tempf; - - atomicAdd(&PME_Q[temp_near[k]], tempQ); - } - } -} - -__global__ static void PME_Direct_Energy(const int atom_numbers, const NEIGHBOR_LIST *nl, - const UNSIGNED_INT_VECTOR *uint_crd, const VECTOR *boxlength, - const float *charge, const float beta, const float cutoff_square, - float *direct_ene) { - int atom_i = blockDim.x * blockIdx.x + threadIdx.x; - if (atom_i < atom_numbers) { - NEIGHBOR_LIST nl_i = nl[atom_i]; - int N = nl_i.atom_numbers; - int atom_j; - int int_x; - int int_y; - int int_z; - UNSIGNED_INT_VECTOR r1 = uint_crd[atom_i], r2; - VECTOR dr; - float dr2; - float dr_abs; - // float dr_inverse; - float ene_temp; - float charge_i = charge[atom_i]; - float ene_lin = 0.; - - // int x, y; - // int atom_pair_LJ_type; - for (int j = threadIdx.y; j < N; j = j + blockDim.y) { - atom_j = nl_i.atom_serial[j]; - r2 = uint_crd[atom_j]; - - int_x = r2.uint_x - r1.uint_x; - int_y = r2.uint_y - r1.uint_y; - int_z = r2.uint_z - r1.uint_z; - dr.x = boxlength[0].x * int_x; - dr.y = boxlength[0].y * int_y; - dr.z = boxlength[0].z * int_z; - - dr2 = dr.x * dr.x + dr.y * dr.y + dr.z * dr.z; - if (dr2 < cutoff_square) { - dr_abs = norm3df(dr.x, dr.y, dr.z); - ene_temp = charge_i * charge[atom_j] * erfcf(beta * dr_abs) / dr_abs; - ene_lin = ene_lin + ene_temp; - } - } - atomicAdd(direct_ene, ene_lin); - } -} - -__global__ static void PME_Direct_Atom_Energy(const int atom_numbers, const NEIGHBOR_LIST *nl, - const UNSIGNED_INT_VECTOR *uint_crd, const VECTOR *boxlength, - const float *charge, const float beta, const float cutoff_square, - float *direct_ene) { - int atom_i = blockDim.x * blockIdx.x + threadIdx.x; - if (atom_i < atom_numbers) { - NEIGHBOR_LIST nl_i = nl[atom_i]; - int N = nl_i.atom_numbers; - int atom_j; - int int_x; - int int_y; - int int_z; - UNSIGNED_INT_VECTOR r1 = uint_crd[atom_i], r2; - VECTOR dr; - float dr2; - float dr_abs; - // float dr_inverse; - float ene_temp; - float charge_i = charge[atom_i]; - float ene_lin = 0.; - - for (int j = threadIdx.y; j < N; j = j + blockDim.y) { - atom_j = nl_i.atom_serial[j]; - r2 = uint_crd[atom_j]; - - int_x = r2.uint_x - r1.uint_x; - int_y = r2.uint_y - r1.uint_y; - int_z = r2.uint_z - r1.uint_z; - dr.x = boxlength[0].x * int_x; - dr.y = boxlength[0].y * int_y; - dr.z = boxlength[0].z * int_z; - - dr2 = dr.x * dr.x + dr.y * dr.y + dr.z * dr.z; - if (dr2 < cutoff_square) { - dr_abs = norm3df(dr.x, dr.y, dr.z); - ene_temp = charge_i * charge[atom_j] * erfcf(beta * dr_abs) / dr_abs; - ene_lin = ene_lin + ene_temp; - } - } - atomicAdd(&direct_ene[atom_i], ene_lin); - } -} - -__global__ static void PME_Energy_Product(const int element_number, const float *list1, const float *list2, - float *sum) { - if (threadIdx.x == 0) { - sum[0] = 0.; - } - __syncthreads(); - float lin = 0.0; - for (int i = threadIdx.x; i < element_number; i = i + blockDim.x) { - lin = lin + list1[i] * list2[i]; - } - atomicAdd(sum, lin); -} - -__global__ static void PME_BCFQ(cufftComplex *PME_FQ, float *PME_BC, int PME_Nfft) { - int index = blockDim.x * blockIdx.x + threadIdx.x; - if (index < PME_Nfft) { - float tempf = PME_BC[index]; - cufftComplex tempc = PME_FQ[index]; - PME_FQ[index].x = tempc.x * tempf; - PME_FQ[index].y = tempc.y * tempf; - } -} - -__global__ static void PME_Excluded_Energy_Correction(const int atom_numbers, const UNSIGNED_INT_VECTOR *uint_crd, - const VECTOR *sacler, const float *charge, const float pme_beta, - const float sqrt_pi, const int *excluded_list_start, - const int *excluded_list, const int *excluded_atom_numbers, - float *ene) { - int atom_i = blockDim.x * blockIdx.x + threadIdx.x; - if (atom_i < atom_numbers) { - int excluded_number = excluded_atom_numbers[atom_i]; - if (excluded_number > 0) { - int list_start = excluded_list_start[atom_i]; - // int atom_min = excluded_list[list_start]; - int list_end = list_start + excluded_number; - int atom_j; - int int_x; - int int_y; - int int_z; - - float charge_i = charge[atom_i]; - float charge_j; - float dr_abs; - float beta_dr; - - UNSIGNED_INT_VECTOR r1 = uint_crd[atom_i], r2; - VECTOR dr; - float dr2; - - float ene_lin = 0.; - - for (int i = list_start; i < list_end; i = i + 1) { - atom_j = excluded_list[i]; - r2 = uint_crd[atom_j]; - charge_j = charge[atom_j]; - - int_x = r2.uint_x - r1.uint_x; - int_y = r2.uint_y - r1.uint_y; - int_z = r2.uint_z - r1.uint_z; - dr.x = sacler[0].x * int_x; - dr.y = sacler[0].y * int_y; - dr.z = sacler[0].z * int_z; - dr2 = dr.x * dr.x + dr.y * dr.y + dr.z * dr.z; - - dr_abs = sqrtf(dr2); - beta_dr = pme_beta * dr_abs; - - ene_lin -= charge_i * charge_j * erff(beta_dr) / dr_abs; - } - atomicAdd(ene, ene_lin); - } - } -} -#endif diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/pme/pme_energy_impl.cu b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/pme/pme_energy_impl.cu deleted file mode 100644 index 73ab1b89d03..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/pme/pme_energy_impl.cu +++ /dev/null @@ -1,85 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/** - * Note: - * PMEEnergy. This is an experimental interface that is subject to change and/or deletion. - */ -#include "plugin/device/gpu/kernel/cuda_impl/sponge/pme/pme_energy_impl.cuh" -#include "plugin/device/gpu/kernel/cuda_impl/sponge/pme/pme_common.cuh" -#include "plugin/device/gpu/kernel/cuda_impl/sponge/common_sponge.cuh" - -__global__ void PME_Energy_Reciprocal(const int element_number, const cufftComplex *FQ, const float *BC, float *sum) { - if (threadIdx.x == 0) { - sum[0] = 0.; - } - __syncthreads(); - float lin = 0.0; - cufftComplex FQ_i; - for (int i = threadIdx.x; i < element_number; i = i + blockDim.x) { - FQ_i = FQ[i]; - lin = lin + (FQ_i.x * FQ_i.x + FQ_i.y * FQ_i.y) * BC[i]; - } - atomicAdd(sum, lin); -} - -void PMEEnergy(int fftx, int ffty, int fftz, int atom_numbers, float beta, float *PME_BC, int *pme_uxyz, - float *pme_frxyz, float *PME_Q, float *pme_fq, int *PME_atom_near, int *pme_kxyz, const int *uint_crd_f, - const float *charge, int *nl_atom_numbers, int *nl_atom_serial, int *nl, const float *scaler_f, - const int *excluded_list_start, const int *excluded_list, const int *excluded_atom_numbers, - float *d_reciprocal_ene, float *d_self_ene, float *d_direct_ene, float *d_correction_ene, - dim3 thread_PME, int PME_Nin, int PME_Nfft, int PME_Nall, const cufftHandle &PME_plan_r2c, - const cufftHandle &PME_plan_c2r, cudaStream_t stream) { - UNSIGNED_INT_VECTOR *uint_crd = - const_cast(reinterpret_cast(uint_crd_f)); - VECTOR *scaler = const_cast(reinterpret_cast(scaler_f)); - int max_neighbor_numbers = 800; - NEIGHBOR_LIST *nl_a = reinterpret_cast(nl); - construct_neighbor_list_kernel<<(atom_numbers) / 128), 128, 0, stream>>>( - atom_numbers, max_neighbor_numbers, nl_atom_numbers, nl_atom_serial, nl_a); - - UNSIGNED_INT_VECTOR *PME_uxyz = reinterpret_cast(pme_uxyz); - UNSIGNED_INT_VECTOR *PME_kxyz = reinterpret_cast(pme_kxyz); - VECTOR *PME_frxyz = reinterpret_cast(pme_frxyz); - cufftComplex *PME_FQ = reinterpret_cast(pme_fq); - - Reset_List<<<3 * atom_numbers / 32 + 1, 32, 0, stream>>>(3 * atom_numbers, reinterpret_cast(PME_uxyz), - 1 << 30); - PME_Atom_Near<<>>( - uint_crd, PME_atom_near, PME_Nin, periodic_factor_inverse * fftx, periodic_factor_inverse * ffty, - periodic_factor_inverse * fftz, atom_numbers, fftx, ffty, fftz, PME_kxyz, PME_uxyz, PME_frxyz); - - Reset_List<<>>(PME_Nall, PME_Q, 0); - - PME_Q_Spread<<>>(PME_atom_near, charge, PME_frxyz, PME_Q, - PME_kxyz, atom_numbers); - - cufftExecR2C(PME_plan_r2c, reinterpret_cast(PME_Q), reinterpret_cast(PME_FQ)); - - PME_Energy_Reciprocal<<<1, 1024, 0, stream>>>(PME_Nfft, PME_FQ, PME_BC, d_reciprocal_ene); - - PME_Energy_Product<<<1, 1024, 0, stream>>>(atom_numbers, charge, charge, d_self_ene); - Scale_List<<<1, 1, 0, stream>>>(1, d_self_ene, -beta / sqrtf(PI)); - - Reset_List<<<1, 1, 0, stream>>>(1, d_direct_ene, 0.0); - PME_Direct_Energy<<>>( - atom_numbers, nl_a, uint_crd, scaler, charge, beta, cutoff * cutoff, d_direct_ene); - - Reset_List<<<1, 1, 0, stream>>>(1, d_correction_ene, 0.0); - PME_Excluded_Energy_Correction<<>>( - atom_numbers, uint_crd, scaler, charge, beta, sqrtf(PI), excluded_list_start, excluded_list, excluded_atom_numbers, - d_correction_ene); - return; -} diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/pme/pme_energy_impl.cuh b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/pme/pme_energy_impl.cuh deleted file mode 100644 index 24b0db6d046..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/pme/pme_energy_impl.cuh +++ /dev/null @@ -1,32 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_PME_PME_ENERGY_IMPL_H_ -#define MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_PME_PME_ENERGY_IMPL_H_ - -#include -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_device_info.h" - -CUDA_LIB_EXPORT void PMEEnergy(int fftx, int ffty, int fftz, int atom_numbers, float beta, float *PME_BC, int *pme_uxyz, - float *pme_frxyz, float *PME_Q, float *pme_fq, int *PME_atom_near, int *pme_kxyz, - const int *uint_crd_f, const float *charge, int *nl_atom_numbers, int *nl_atom_serial, - int *nl, const float *scaler_f, const int *excluded_list_start, const int *excluded_list, - const int *excluded_atom_numbers, float *d_reciprocal_ene, float *d_self_ene, - float *d_direct_ene, float *d_correction_ene, dim3 thread_PME, int PME_Nin, int PME_Nfft, - int PME_Nall, const cufftHandle &PME_plan_r2c, const cufftHandle &PME_plan_c2r, - cudaStream_t stream); - -#endif diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/pme/pme_energy_update_impl.cu b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/pme/pme_energy_update_impl.cu deleted file mode 100644 index 4e9b3fa91d5..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/pme/pme_energy_update_impl.cu +++ /dev/null @@ -1,90 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/** - * Note: - * PMEEnergyUpdate. This is an experimental interface that is subject to change and/or deletion. - */ -#include "plugin/device/gpu/kernel/cuda_impl/sponge/pme/pme_energy_update_impl.cuh" -#include "plugin/device/gpu/kernel/cuda_impl/sponge/pme/pme_common.cuh" -#include "plugin/device/gpu/kernel/cuda_impl/sponge/common_sponge.cuh" - -__global__ void PME_Energy_Reciprocal_update(const int element_number, const cufftComplex *FQ, const float *BC, - float *sum) { - if (threadIdx.x == 0) { - sum[0] = 0.; - } - __syncthreads(); - float lin = 0.0; - cufftComplex FQ_i; - for (int i = threadIdx.x; i < element_number; i = i + blockDim.x) { - FQ_i = FQ[i]; - lin = lin + (FQ_i.x * FQ_i.x + FQ_i.y * FQ_i.y) * BC[i]; - } - atomicAdd(sum, lin); -} - -void PMEEnergyUpdate(int fftx, int ffty, int fftz, int atom_numbers, float beta, float *PME_BC, int *pme_uxyz, - float *pme_frxyz, float *PME_Q, float *pme_fq, int *PME_atom_near, int *pme_kxyz, - const int *uint_crd_f, const float *charge, int *nl_atom_numbers, int *nl_atom_serial, int *nl, - const float *scaler_f, const int *excluded_list_start, const int *excluded_list, - const int *excluded_atom_numbers, float *d_reciprocal_ene, float *d_self_ene, float *d_direct_ene, - float *d_correction_ene, dim3 thread_PME, int PME_Nin, int PME_Nfft, int PME_Nall, - const cufftHandle &PME_plan_r2c, const cufftHandle &PME_plan_c2r, float *neutralizing_factor, - float *charge_sum, int max_neighbor_numbers, cudaStream_t stream) { - UNSIGNED_INT_VECTOR *uint_crd = - const_cast(reinterpret_cast(uint_crd_f)); - VECTOR *scaler = const_cast(reinterpret_cast(scaler_f)); - // int max_neighbor_numbers = 800; - NEIGHBOR_LIST *nl_a = reinterpret_cast(nl); - construct_neighbor_list_kernel<<(atom_numbers) / 128), 128, 0, stream>>>( - atom_numbers, max_neighbor_numbers, nl_atom_numbers, nl_atom_serial, nl_a); - - UNSIGNED_INT_VECTOR *PME_uxyz = reinterpret_cast(pme_uxyz); - UNSIGNED_INT_VECTOR *PME_kxyz = reinterpret_cast(pme_kxyz); - VECTOR *PME_frxyz = reinterpret_cast(pme_frxyz); - cufftComplex *PME_FQ = reinterpret_cast(pme_fq); - - Reset_List<<<3 * atom_numbers / 32 + 1, 32, 0, stream>>>(3 * atom_numbers, reinterpret_cast(PME_uxyz), - 1 << 30); - PME_Atom_Near<<>>( - uint_crd, PME_atom_near, PME_Nin, periodic_factor_inverse * fftx, periodic_factor_inverse * ffty, - periodic_factor_inverse * fftz, atom_numbers, fftx, ffty, fftz, PME_kxyz, PME_uxyz, PME_frxyz); - - Reset_List<<>>(PME_Nall, PME_Q, 0); - - PME_Q_Spread<<>>(PME_atom_near, charge, PME_frxyz, PME_Q, - PME_kxyz, atom_numbers); - - cufftExecR2C(PME_plan_r2c, reinterpret_cast(PME_Q), reinterpret_cast(PME_FQ)); - - PME_Energy_Reciprocal_update<<<1, 1024, 0, stream>>>(PME_Nfft, PME_FQ, PME_BC, d_reciprocal_ene); - - PME_Energy_Product<<<1, 1024, 0, stream>>>(atom_numbers, charge, charge, d_self_ene); - Scale_List<<<1, 1, 0, stream>>>(1, d_self_ene, -beta / sqrtf(PI)); - - Sum_Of_List<<<1, 1024>>>(atom_numbers, charge, charge_sum); - device_add<<<1, 1>>>(d_self_ene, neutralizing_factor, charge_sum); - - Reset_List<<<1, 1, 0, stream>>>(1, d_direct_ene, 0.0); - PME_Direct_Energy<<>>( - atom_numbers, nl_a, uint_crd, scaler, charge, beta, cutoff * cutoff, d_direct_ene); - - Reset_List<<<1, 1, 0, stream>>>(1, d_correction_ene, 0.0); - PME_Excluded_Energy_Correction<<>>( - atom_numbers, uint_crd, scaler, charge, beta, sqrtf(PI), excluded_list_start, excluded_list, excluded_atom_numbers, - d_correction_ene); - return; -} diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/pme/pme_energy_update_impl.cuh b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/pme/pme_energy_update_impl.cuh deleted file mode 100644 index 3a26a7eccc4..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/pme/pme_energy_update_impl.cuh +++ /dev/null @@ -1,34 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_PME_PME_ENERGY_UPDATE_IMPL_H_ -#define MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_PME_PME_ENERGY_UPDATE_IMPL_H_ - -#include -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_device_info.h" - -CUDA_LIB_EXPORT void PMEEnergyUpdate(int fftx, int ffty, int fftz, int atom_numbers, float beta, float *PME_BC, - int *pme_uxyz, float *pme_frxyz, float *PME_Q, float *pme_fq, int *PME_atom_near, - int *pme_kxyz, const int *uint_crd_f, const float *charge, int *nl_atom_numbers, - int *nl_atom_serial, int *nl, const float *scaler_f, - const int *excluded_list_start, const int *excluded_list, - const int *excluded_atom_numbers, float *d_reciprocal_ene, float *d_self_ene, - float *d_direct_ene, float *d_correction_ene, dim3 thread_PME, int PME_Nin, - int PME_Nfft, int PME_Nall, const cufftHandle &PME_plan_r2c, - const cufftHandle &PME_plan_c2r, float *neutralizing_factor, float *charge_sum, - int max_neighbor_numbers, cudaStream_t stream); - -#endif diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/pme/pme_excluded_force_impl.cu b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/pme/pme_excluded_force_impl.cu deleted file mode 100644 index 02885a6777c..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/pme/pme_excluded_force_impl.cu +++ /dev/null @@ -1,103 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#include "plugin/device/gpu/kernel/cuda_impl/sponge/pme/pme_excluded_force_impl.cuh" -#include "plugin/device/gpu/kernel/cuda_impl/sponge/pme/pme_common.cuh" - -__global__ void PME_Excluded_Force_Correction(const int atom_numbers, const UNSIGNED_INT_VECTOR *uint_crd, - const VECTOR *sacler, const float *charge, const float pme_beta, - const float sqrt_pi, const int *excluded_list_start, - const int *excluded_list, const int *excluded_atom_numbers, VECTOR *frc) { - int atom_i = blockDim.x * blockIdx.x + threadIdx.x; - if (atom_i < atom_numbers) { - int excluded_numbers = excluded_atom_numbers[atom_i]; - if (excluded_numbers > 0) { - int list_start = excluded_list_start[atom_i]; - // int atom_min = excluded_list[list_start]; - int list_end = list_start + excluded_numbers; - int atom_j; - int int_x; - int int_y; - int int_z; - - float charge_i = charge[atom_i]; - float charge_j; - float dr_abs; - float beta_dr; - - UNSIGNED_INT_VECTOR r1 = uint_crd[atom_i], r2; - VECTOR dr; - float dr2; - - float frc_abs = 0.; - VECTOR frc_lin; - VECTOR frc_record = {0., 0., 0.}; - - for (int i = list_start; i < list_end; i = i + 1) { - atom_j = excluded_list[i]; - r2 = uint_crd[atom_j]; - charge_j = charge[atom_j]; - - int_x = r2.uint_x - r1.uint_x; - int_y = r2.uint_y - r1.uint_y; - int_z = r2.uint_z - r1.uint_z; - dr.x = sacler[0].x * int_x; - dr.y = sacler[0].y * int_y; - dr.z = sacler[0].z * int_z; - dr2 = dr.x * dr.x + dr.y * dr.y + dr.z * dr.z; - - dr_abs = sqrtf(dr2); - beta_dr = pme_beta * dr_abs; - // sqrt_pi= 2/sqrt(3.141592654); - frc_abs = beta_dr * sqrt_pi * expf(-beta_dr * beta_dr) + erfcf(beta_dr); - frc_abs = (frc_abs - 1.) / dr2 / dr_abs; - frc_abs = -charge_i * charge_j * frc_abs; - frc_lin.x = frc_abs * dr.x; - frc_lin.y = frc_abs * dr.y; - frc_lin.z = frc_abs * dr.z; - - frc_record.x = frc_record.x + frc_lin.x; - frc_record.y = frc_record.y + frc_lin.y; - frc_record.z = frc_record.z + frc_lin.z; - - atomicAdd(&frc[atom_j].x, -frc_lin.x); - atomicAdd(&frc[atom_j].y, -frc_lin.y); - atomicAdd(&frc[atom_j].z, -frc_lin.z); - } // atom_j cycle - atomicAdd(&frc[atom_i].x, frc_record.x); - atomicAdd(&frc[atom_i].y, frc_record.y); - atomicAdd(&frc[atom_i].z, frc_record.z); - } // if need excluded - } -} - -void PMEExcludedForce(const int atom_numbers, const float pme_beta, const int *uint_crd_f, const float *sacler_f, - const float *charge, const int *excluded_list_start, const int *excluded_list, - const int *excluded_atom_numbers, float *frc_f, cudaStream_t stream) { - Reset_List<<(3. * atom_numbers) / 128), 128, 0, stream>>>(3 * atom_numbers, frc_f, 0.); - UNSIGNED_INT_VECTOR *uint_crd = - const_cast(reinterpret_cast(uint_crd_f)); - VECTOR *frc = reinterpret_cast(frc_f); - VECTOR *sacler = const_cast(reinterpret_cast(sacler_f)); - - PME_Excluded_Force_Correction<<(atom_numbers) / 128), 128, 0, stream>>>( - atom_numbers, uint_crd, sacler, charge, pme_beta, TWO_DIVIDED_BY_SQRT_PI, excluded_list_start, excluded_list, - excluded_atom_numbers, frc); - return; -} - -void PMEExcludedForce(const int atom_numbers, const float pme_beta, const int *uint_crd_f, const float *sacler_f, - const float *charge, const int *excluded_list_start, const int *excluded_list, - const int *excluded_atom_numbers, float *frc_f, cudaStream_t stream); diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/pme/pme_excluded_force_impl.cuh b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/pme/pme_excluded_force_impl.cuh deleted file mode 100644 index bd08b11c1d8..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/pme/pme_excluded_force_impl.cuh +++ /dev/null @@ -1,28 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_PME_PME_EXCLUDED_FORCE_IMPL_H_ -#define MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_PME_PME_EXCLUDED_FORCE_IMPL_H_ - -#include -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_device_info.h" - -CUDA_LIB_EXPORT void PMEExcludedForce(const int atom_numbers, const float pme_beta, const int *uint_crd_f, - const float *sacler_f, const float *charge, const int *excluded_list_start, - const int *excluded_list, const int *excluded_atom_numbers, float *frc_f, - cudaStream_t stream); - -#endif diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/pme/pme_fft_1d_impl.cu b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/pme/pme_fft_1d_impl.cu deleted file mode 100644 index 289b1d08ddd..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/pme/pme_fft_1d_impl.cu +++ /dev/null @@ -1,29 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#include "plugin/device/gpu/kernel/cuda_impl/sponge/pme/pme_fft_1d_impl.cuh" -#include "plugin/device/gpu/kernel/cuda_impl/sponge/pme/pme_common.cuh" - -template -void PMEFFT1D(int Nfft, Complex *input_tensor, Complex *output_tensor, const cufftHandle &FFT_plan_c2c, - cudaStream_t stream) { - cufftExecC2C(FFT_plan_c2c, reinterpret_cast(input_tensor), - reinterpret_cast(output_tensor), CUFFT_FORWARD); - return; -} - -template CUDA_LIB_EXPORT -void PMEFFT1D(int Nfft, Complex *input_tensor, Complex *output_tensor, - const cufftHandle &FFT_plan_c2c, cudaStream_t stream); diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/pme/pme_fft_1d_impl.cuh b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/pme/pme_fft_1d_impl.cuh deleted file mode 100644 index 0ecde226b81..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/pme/pme_fft_1d_impl.cuh +++ /dev/null @@ -1,27 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_PME_PME_FFT_1D_IMPL_H_ -#define MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_PME_PME_FFT_1D_IMPL_H_ - -#include -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/complex.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" - -template -CUDA_LIB_EXPORT void PMEFFT1D(int Nfft, Complex *input_tensor, Complex *output_tensor, - const cufftHandle &FFT_plan_c2c, cudaStream_t stream); - -#endif diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/pme/pme_fft_2d_impl.cu b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/pme/pme_fft_2d_impl.cu deleted file mode 100644 index f06d37a2429..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/pme/pme_fft_2d_impl.cu +++ /dev/null @@ -1,29 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#include "plugin/device/gpu/kernel/cuda_impl/sponge/pme/pme_fft_2d_impl.cuh" -#include "plugin/device/gpu/kernel/cuda_impl/sponge/pme/pme_common.cuh" - -template -void PMEFFT2D(int Nfft, Complex *input_tensor, Complex *output_tensor, - const cufftHandle &FFT_plan_c2c, cudaStream_t stream) { - cufftExecC2C(FFT_plan_c2c, reinterpret_cast(input_tensor), - reinterpret_cast(output_tensor), CUFFT_FORWARD); - return; -} - -template CUDA_LIB_EXPORT -void PMEFFT2D(int Nfft, Complex *input_tensor, Complex *output_tensor, - const cufftHandle &FFT_plan_c2c, cudaStream_t stream); diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/pme/pme_fft_2d_impl.cuh b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/pme/pme_fft_2d_impl.cuh deleted file mode 100644 index 3218a1ac46e..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/pme/pme_fft_2d_impl.cuh +++ /dev/null @@ -1,27 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_PME_PME_FFT_2D_IMPL_H_ -#define MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_PME_PME_FFT_2D_IMPL_H_ - -#include -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/complex.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" - -template -CUDA_LIB_EXPORT void PMEFFT2D(int Nfft, Complex *input_tensor, Complex *output_tensor, - const cufftHandle &FFT_plan_c2c, cudaStream_t stream); - -#endif diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/pme/pme_ifft_1d_impl.cu b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/pme/pme_ifft_1d_impl.cu deleted file mode 100644 index a73ca562b3f..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/pme/pme_ifft_1d_impl.cu +++ /dev/null @@ -1,29 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#include "plugin/device/gpu/kernel/cuda_impl/sponge/pme/pme_ifft_1d_impl.cuh" -#include "plugin/device/gpu/kernel/cuda_impl/sponge/pme/pme_common.cuh" - -template -void PMEIFFT1D(int Nfft, Complex *input_tensor, Complex *output_tensor, const cufftHandle &FFT_plan_c2c, - cudaStream_t stream) { - cufftExecC2C(FFT_plan_c2c, reinterpret_cast(input_tensor), - reinterpret_cast(output_tensor), CUFFT_INVERSE); - return; -} - -template CUDA_LIB_EXPORT -void PMEIFFT1D(int Nfft, Complex *input_tensor, Complex *output_tensor, - const cufftHandle &FFT_plan_c2c, cudaStream_t stream); diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/pme/pme_ifft_1d_impl.cuh b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/pme/pme_ifft_1d_impl.cuh deleted file mode 100644 index 524195b0d7c..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/pme/pme_ifft_1d_impl.cuh +++ /dev/null @@ -1,27 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_PME_PME_IFFT_1D_IMPL_H_ -#define MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_PME_PME_IFFT_1D_IMPL_H_ - -#include -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/complex.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" - -template -CUDA_LIB_EXPORT void PMEIFFT1D(int Nfft, Complex *input_tensor, Complex *output_tensor, - const cufftHandle &FFT_plan_c2c, cudaStream_t stream); - -#endif diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/pme/pme_ifft_2d_impl.cu b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/pme/pme_ifft_2d_impl.cu deleted file mode 100644 index 0ecdb5bf886..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/pme/pme_ifft_2d_impl.cu +++ /dev/null @@ -1,29 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#include "plugin/device/gpu/kernel/cuda_impl/sponge/pme/pme_ifft_2d_impl.cuh" -#include "plugin/device/gpu/kernel/cuda_impl/sponge/pme/pme_common.cuh" - -template -void PMEIFFT2D(int Nfft, Complex *input_tensor, Complex *output_tensor, const cufftHandle &FFT_plan_c2c, - cudaStream_t stream) { - cufftExecC2C(FFT_plan_c2c, reinterpret_cast(input_tensor), - reinterpret_cast(output_tensor), CUFFT_INVERSE); - return; -} - -template CUDA_LIB_EXPORT -void PMEIFFT2D(int Nfft, Complex *input_tensor, Complex *output_tensor, - const cufftHandle &FFT_plan_c2c, cudaStream_t stream); diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/pme/pme_ifft_2d_impl.cuh b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/pme/pme_ifft_2d_impl.cuh deleted file mode 100644 index 4d3e7f44a7e..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/pme/pme_ifft_2d_impl.cuh +++ /dev/null @@ -1,27 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_PME_PME_IFFT_2D_IMPL_H_ -#define MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_PME_PME_IFFT_2D_IMPL_H_ - -#include -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/complex.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" - -template -CUDA_LIB_EXPORT void PMEIFFT2D(int Nfft, Complex *input_tensor, Complex *output_tensor, - const cufftHandle &FFT_plan_c2c, cudaStream_t stream); - -#endif diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/pme/pme_irfft_2d_impl.cu b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/pme/pme_irfft_2d_impl.cu deleted file mode 100644 index 38121dc53c2..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/pme/pme_irfft_2d_impl.cu +++ /dev/null @@ -1,28 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#include "plugin/device/gpu/kernel/cuda_impl/sponge/pme/pme_irfft_2d_impl.cuh" -#include "plugin/device/gpu/kernel/cuda_impl/sponge/pme/pme_common.cuh" - -template -void PMEIRFFT2D(int Nfft, Complex *input_tensor, T *output_tensor, const cufftHandle &FFT_plan_c2r, - cudaStream_t stream) { - cufftExecC2R(FFT_plan_c2r, reinterpret_cast(input_tensor), output_tensor); - return; -} - -template CUDA_LIB_EXPORT -void PMEIRFFT2D(int Nfft, Complex *input_tensor, float *output_tensor, - const cufftHandle &FFT_plan_c2r, cudaStream_t stream); diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/pme/pme_irfft_2d_impl.cuh b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/pme/pme_irfft_2d_impl.cuh deleted file mode 100644 index 3610537d68d..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/pme/pme_irfft_2d_impl.cuh +++ /dev/null @@ -1,27 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_PME_PME_IRFFT_2D_IMPL_H_ -#define MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_PME_PME_IRFFT_2D_IMPL_H_ - -#include -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/complex.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" - -template -CUDA_LIB_EXPORT void PMEIRFFT2D(int Nfft, Complex *input_tensor, T *output_tensor, const cufftHandle &FFT_plan_c2r, - cudaStream_t stream); - -#endif diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/pme/pme_reciprocal_force_impl.cu b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/pme/pme_reciprocal_force_impl.cu deleted file mode 100644 index c8971f2fff8..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/pme/pme_reciprocal_force_impl.cu +++ /dev/null @@ -1,107 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/** - * Note: - * PMEReciprocalForce. This is an experimental interface that is subject to change and/or deletion. - */ -#include "plugin/device/gpu/kernel/cuda_impl/sponge/pme/pme_reciprocal_force_impl.cuh" -#include "plugin/device/gpu/kernel/cuda_impl/sponge/pme/pme_common.cuh" - -__global__ void PME_Final(int *PME_atom_near, const float *charge, const float *PME_Q, VECTOR *force, - const VECTOR *PME_frxyz, const UNSIGNED_INT_VECTOR *PME_kxyz, - const _VECTOR PME_inverse_box_vector, const int atom_numbers) { - int atom = blockDim.x * blockIdx.x + threadIdx.x; - if (atom < atom_numbers) { - int k, kx; - float tempdQx, tempdQy, tempdQz, tempdx, tempdy, tempdz, tempx, tempy, tempz, tempdQf; - float tempf, tempf2; - float temp_charge = charge[atom]; - int *temp_near = PME_atom_near + atom * 64; - UNSIGNED_INT_VECTOR temp_kxyz; - VECTOR temp_frxyz = PME_frxyz[atom]; - for (k = threadIdx.y; k < 64; k = k + blockDim.y) { - temp_kxyz = PME_kxyz[k]; - tempdQf = -PME_Q[temp_near[k]] * temp_charge; - - kx = temp_kxyz.uint_x; - tempf = (temp_frxyz.x); - tempf2 = tempf * tempf; - tempx = PME_Ma[kx] * tempf * tempf2 + PME_Mb[kx] * tempf2 + PME_Mc[kx] * tempf + PME_Md[kx]; - tempdx = PME_dMa[kx] * tempf2 + PME_dMb[kx] * tempf + PME_dMc[kx]; - - kx = temp_kxyz.uint_y; - tempf = (temp_frxyz.y); - tempf2 = tempf * tempf; - tempy = PME_Ma[kx] * tempf * tempf2 + PME_Mb[kx] * tempf2 + PME_Mc[kx] * tempf + PME_Md[kx]; - tempdy = PME_dMa[kx] * tempf2 + PME_dMb[kx] * tempf + PME_dMc[kx]; - - kx = temp_kxyz.uint_z; - tempf = (temp_frxyz.z); - tempf2 = tempf * tempf; - tempz = PME_Ma[kx] * tempf * tempf2 + PME_Mb[kx] * tempf2 + PME_Mc[kx] * tempf + PME_Md[kx]; - tempdz = PME_dMa[kx] * tempf2 + PME_dMb[kx] * tempf + PME_dMc[kx]; - - tempdQx = tempdx * tempy * tempz * PME_inverse_box_vector.x; - tempdQy = tempdy * tempx * tempz * PME_inverse_box_vector.y; - tempdQz = tempdz * tempx * tempy * PME_inverse_box_vector.z; - - atomicAdd(&force[atom].x, tempdQf * tempdQx); - atomicAdd(&force[atom].y, tempdQf * tempdQy); - atomicAdd(&force[atom].z, tempdQf * tempdQz); - } - } -} - -void PMEReciprocalForce(int fftx, int ffty, int fftz, int atom_numbers, float beta, float *PME_BC, int *pme_uxyz, - float *pme_frxyz, float *PME_Q, float *pme_fq, int *PME_atom_near, int *pme_kxyz, - const int *uint_crd_f, const float *charge, float *force, int PME_Nin, int PME_Nall, - int PME_Nfft, const cufftHandle &PME_plan_r2c, const cufftHandle &PME_plan_c2r, - const _VECTOR &PME_inverse_box_vector, cudaStream_t stream) { - Reset_List<<(3. * atom_numbers) / 128), 128, 0, stream>>>(3 * atom_numbers, force, 0.); - UNSIGNED_INT_VECTOR *uint_crd = - const_cast(reinterpret_cast(uint_crd_f)); - UNSIGNED_INT_VECTOR *PME_uxyz = reinterpret_cast(pme_uxyz); - UNSIGNED_INT_VECTOR *PME_kxyz = reinterpret_cast(pme_kxyz); - Reset_List<<<3 * atom_numbers / 32 + 1, 32, 0, stream>>>(3 * atom_numbers, reinterpret_cast(PME_uxyz), - 1 << 30); - - VECTOR *PME_frxyz = reinterpret_cast(pme_frxyz); - VECTOR *frc = reinterpret_cast(force); - - cufftComplex *PME_FQ = reinterpret_cast(pme_fq); - - // initial end - Reset_List<<(3. * atom_numbers) / 128), 128, 0, stream>>>( - 3 * atom_numbers, reinterpret_cast(frc), 0.); - - PME_Atom_Near<<>>( - uint_crd, PME_atom_near, PME_Nin, periodic_factor_inverse * fftx, periodic_factor_inverse * ffty, - periodic_factor_inverse * fftz, atom_numbers, fftx, ffty, fftz, PME_kxyz, PME_uxyz, PME_frxyz); - - Reset_List<<>>(PME_Nall, PME_Q, 0); - - PME_Q_Spread<<>>(PME_atom_near, charge, PME_frxyz, PME_Q, - PME_kxyz, atom_numbers); - - cufftExecR2C(PME_plan_r2c, reinterpret_cast(PME_Q), reinterpret_cast(PME_FQ)); - PME_BCFQ<<>>(PME_FQ, PME_BC, PME_Nfft); - - cufftExecC2R(PME_plan_c2r, reinterpret_cast(PME_FQ), reinterpret_cast(PME_Q)); - - PME_Final<<>>(PME_atom_near, charge, PME_Q, frc, PME_frxyz, - PME_kxyz, PME_inverse_box_vector, atom_numbers); - return; -} diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/pme/pme_reciprocal_force_impl.cuh b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/pme/pme_reciprocal_force_impl.cuh deleted file mode 100644 index 5faa797bdef..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/pme/pme_reciprocal_force_impl.cuh +++ /dev/null @@ -1,34 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_PME_PME_RECIPROCAL_FORCE_IMPL_H_ -#define MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_PME_PME_RECIPROCAL_FORCE_IMPL_H_ - -#include -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" - -struct _VECTOR { - float x; - float y; - float z; -}; -CUDA_LIB_EXPORT void PMEReciprocalForce(int fftx, int ffty, int fftz, int atom_numbers, float beta, float *PME_BC, - int *pme_uxyz, float *pme_frxyz, float *PME_Q, float *pme_fq, - int *PME_atom_near, int *pme_kxyz, const int *uint_crd_f, const float *charge, - float *force, int PME_Nin, int PME_Nall, int PME_Nfft, - const cufftHandle &PME_plan_r2c, const cufftHandle &PME_plan_c2r, - const _VECTOR &PME_inverse_box_vector, cudaStream_t stream); - -#endif diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/pme/pme_rfft_2d_impl.cu b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/pme/pme_rfft_2d_impl.cu deleted file mode 100644 index 58eee14dfc5..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/pme/pme_rfft_2d_impl.cu +++ /dev/null @@ -1,28 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#include "plugin/device/gpu/kernel/cuda_impl/sponge/pme/pme_rfft_2d_impl.cuh" -#include "plugin/device/gpu/kernel/cuda_impl/sponge/pme/pme_common.cuh" - -template -void PMERFFT2D(int Nfft, T *input_tensor, Complex *output_tensor, - const cufftHandle &FFT_plan_r2c, cudaStream_t stream) { - cufftExecR2C(FFT_plan_r2c, input_tensor, reinterpret_cast(output_tensor)); - return; -} - -template CUDA_LIB_EXPORT -void PMERFFT2D(int Nfft, float *input_tensor, Complex *output_tensor, - const cufftHandle &FFT_plan_r2c, cudaStream_t stream); diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/pme/pme_rfft_2d_impl.cuh b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/pme/pme_rfft_2d_impl.cuh deleted file mode 100644 index f6648bb33e2..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/pme/pme_rfft_2d_impl.cuh +++ /dev/null @@ -1,28 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_PME_PME_RFFT_2D_IMPL_H_ -#define MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_PME_PME_RFFT_2D_IMPL_H_ - -#include -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/complex.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_device_info.h" - -template -CUDA_LIB_EXPORT void PMERFFT2D(int Nfft, T *input_tensor, Complex *output_tensor, const cufftHandle &FFT_plan_r2c, - cudaStream_t stream); - -#endif diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/restrain/restrain_energy_impl.cu b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/restrain/restrain_energy_impl.cu deleted file mode 100644 index 546f1a96f72..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/restrain/restrain_energy_impl.cu +++ /dev/null @@ -1,45 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "plugin/device/gpu/kernel/cuda_impl/sponge/restrain/restrain_energy_impl.cuh" -#include "plugin/device/gpu/kernel/cuda_impl/sponge/common_sponge.cuh" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/util.cuh" - -__global__ void restrain_energy_kernel(const int restrain_numbers, const int *restrain_list, const VECTOR *crd, - const VECTOR *crd_ref, const float weight, const VECTOR boxlength, float *ene) { - int i = blockDim.x * blockIdx.x + threadIdx.x; - if (i < restrain_numbers) { - int atom_i = restrain_list[i]; - VECTOR dr = Get_Periodic_Displacement(crd_ref[atom_i], crd[atom_i], boxlength); - ene[i] = weight * dr * dr; - } -} - -void restrainenergy(int restrain_numbers, int atom_numbers, float weight, const int *restrain_list, const float *crd_f, - const float *crd_ref_f, const float *boxlength_f, float *ene, cudaStream_t stream) { - Reset_List<<(restrain_numbers) / 128), 128, 0, stream>>>(restrain_numbers, ene, 0.); - size_t thread_per_block = 128; - size_t block_per_grid = ceilf(static_cast(restrain_numbers) / 128); - const VECTOR *crd = reinterpret_cast(crd_f); - const VECTOR *crd_ref = reinterpret_cast(crd_ref_f); - const VECTOR *boxlength = reinterpret_cast(boxlength_f); - restrain_energy_kernel<<>>(restrain_numbers, restrain_list, crd, crd_ref, - weight, boxlength[0], ene); - return; -} - -void restrainenergy(int restrain_numbers, int atom_numbers, float weight, const int *restrain_list, const float *crd_f, - const float *crd_ref, const float *boxlength_f, float *ene, cudaStream_t stream); diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/restrain/restrain_energy_impl.cuh b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/restrain/restrain_energy_impl.cuh deleted file mode 100644 index 6402e158c1d..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/restrain/restrain_energy_impl.cuh +++ /dev/null @@ -1,27 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_RESTRAIN_RESTRAIN_ENERGY_IMPL_H_ -#define MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_RESTRAIN_RESTRAIN_ENERGY_IMPL_H_ - -#include -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_device_info.h" - -CUDA_LIB_EXPORT void restrainenergy(int restrain_numbers, int atom_numbers, float weight, const int *restrain_list, - const float *crd_f, const float *crd_ref, const float *boxlength_f, float *ene, - cudaStream_t stream); -#endif // MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_RESTRAIN_RESTRAIN_ENERGY_IMPL_H_ diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/restrain/restrain_force_atom_energy_virial_impl.cu b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/restrain/restrain_force_atom_energy_virial_impl.cu deleted file mode 100644 index 2e7766babcc..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/restrain/restrain_force_atom_energy_virial_impl.cu +++ /dev/null @@ -1,59 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "plugin/device/gpu/kernel/cuda_impl/sponge/restrain/restrain_force_atom_energy_virial_impl.cuh" -#include "plugin/device/gpu/kernel/cuda_impl/sponge/common_sponge.cuh" - -__global__ void restrain_force_with_atom_energy_and_virial(int restrain_numbers, const int *restrain_list, - const VECTOR *crd, const VECTOR *crd_ref, const float weight, - const VECTOR *boxlength, float *atom_energy, - float *atom_virial, VECTOR *frc) { - int i = blockDim.x * blockIdx.x + threadIdx.x; - if (i < restrain_numbers) { - int atom_i = restrain_list[i]; - VECTOR dr = Get_Periodic_Displacement(crd_ref[atom_i], crd[atom_i], boxlength[0]); - - VECTOR temp_force = 2 * weight * dr; - float virial = temp_force * dr; - - atom_energy[atom_i] += 0.5 * virial; - atom_virial[atom_i] -= virial; - frc[atom_i] = frc[atom_i] + temp_force; - } -} - -void restrainforcewithatomenergyandvirial(int restrain_numbers, int atom_numbers, const int *restrain_list, - const float *crd_f, const float *crd_ref_f, const float weight, - const float *boxlength_f, float *atom_ene, float *atom_virial, float *frc_f, - cudaStream_t stream) { - Reset_List<<(3. * atom_numbers) / 128), 128, 0, stream>>>(3 * atom_numbers, frc_f, 0.); - Reset_List<<(atom_numbers) / 128), 128, 0, stream>>>(atom_numbers, atom_ene, 0.); - Reset_List<<(atom_numbers) / 128), 128, 0, stream>>>(atom_numbers, atom_virial, 0.); - size_t thread_per_block = 128; - size_t block_per_grid = ceilf(static_cast(restrain_numbers) / 128); - const VECTOR *crd = reinterpret_cast(crd_f); - const VECTOR *crd_ref = reinterpret_cast(crd_ref_f); - const VECTOR *boxlength = reinterpret_cast(boxlength_f); - VECTOR *frc = const_cast(reinterpret_cast(frc_f)); - restrain_force_with_atom_energy_and_virial<<>>( - restrain_numbers, restrain_list, crd, crd_ref, weight, boxlength, atom_ene, atom_virial, frc); - return; -} - -void restrainforcewithatomenergyandvirial(int restrain_numbers, int atom_numbers, const int *restrain_list, - const float *crd_f, const float *crd_ref_f, const float weight, - const float *boxlength_f, float *atom_ene, float *atom_virial, float *frc_f, - cudaStream_t stream); diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/restrain/restrain_force_atom_energy_virial_impl.cuh b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/restrain/restrain_force_atom_energy_virial_impl.cuh deleted file mode 100644 index 099bd7435a3..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/restrain/restrain_force_atom_energy_virial_impl.cuh +++ /dev/null @@ -1,29 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_RESTRAIN_RESTRAIN_FORCE_ATOM_ENERGY_VIRIAL_IMPL_H_ -#define MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_RESTRAIN_RESTRAIN_FORCE_ATOM_ENERGY_VIRIAL_IMPL_H_ - -#include -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_device_info.h" - -CUDA_LIB_EXPORT void restrainforcewithatomenergyandvirial(int restrain_numbers, int atom_numbers, - const int *restrain_list, const float *crd_f, - const float *crd_ref_f, const float weight, - const float *boxlength_f, float *atom_ene, float *atom_virial, - float *frc_f, cudaStream_t stream); -#endif // MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_RESTRAIN_RESTRAIN_FORCE_ATOM_ENERGY_VIRIAL_IMPL_H_ diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/restrain/restrain_force_impl.cu b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/restrain/restrain_force_impl.cu deleted file mode 100644 index cff6a302b80..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/restrain/restrain_force_impl.cu +++ /dev/null @@ -1,56 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "plugin/device/gpu/kernel/cuda_impl/sponge/restrain/restrain_force_impl.cuh" -#include "plugin/device/gpu/kernel/cuda_impl/sponge/common_sponge.cuh" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/util.cuh" - -__global__ void restrainforcekernel(int restrain_numbers, const int *restrain_list, const UNSIGNED_INT_VECTOR *uint_crd, - const UNSIGNED_INT_VECTOR *uint_crd_ref, const float factor, const VECTOR *scaler, - VECTOR *frc) { - int i = blockDim.x * blockIdx.x + threadIdx.x; - if (i < restrain_numbers) { - int atom_i = restrain_list[i]; - VECTOR dr = Get_Periodic_Displacement(uint_crd_ref[atom_i], uint_crd[atom_i], scaler[0]); - - atomicAdd(&frc[atom_i].x, factor * dr.x); - atomicAdd(&frc[atom_i].y, factor * dr.y); - atomicAdd(&frc[atom_i].z, factor * dr.z); - } -} - -void restrainforce(int restrain_numbers, int atom_numbers, const int *restrain_list, const int *uint_crd_f, - const int *uint_crd_ref_f, const float factor, const float *scaler_f, float *frc_f, - cudaStream_t stream) { - Reset_List<<(3. * atom_numbers) / 128), 128, 0, stream>>>(3 * atom_numbers, frc_f, 0.); - size_t thread_per_block = 128; - size_t block_per_grid = ceilf(static_cast(restrain_numbers) / 128); - UNSIGNED_INT_VECTOR *uint_crd = - const_cast(reinterpret_cast(uint_crd_f)); - - UNSIGNED_INT_VECTOR *uint_crd_ref = - const_cast(reinterpret_cast(uint_crd_ref_f)); - - VECTOR *scaler = const_cast(reinterpret_cast(scaler_f)); - VECTOR *frc = const_cast(reinterpret_cast(frc_f)); - restrainforcekernel<<>>(restrain_numbers, restrain_list, uint_crd, - uint_crd_ref, factor, scaler, frc); - return; -} - -void restrainforce(int restrain_numbers, int atom_numbers, const int *restrain_list, const int *uint_crd_f, - const int *uint_crd_ref, const float factor, const float *scaler_f, float *frc_f, - cudaStream_t stream); diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/restrain/restrain_force_impl.cuh b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/restrain/restrain_force_impl.cuh deleted file mode 100644 index f1cd09b0298..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/restrain/restrain_force_impl.cuh +++ /dev/null @@ -1,27 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_RESTRAIN_RESTAIN_FORCE_IMPL_H_ -#define MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_RESTRAIN_RESTAIN_FORCE_IMPL_H_ - -#include -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_device_info.h" - -CUDA_LIB_EXPORT void restrainforce(int restrain_numbers, int atom_numbers, const int *restrain_list, - const int *uint_crd_f, const int *uint_crd_ref, const float factor, - const float *scaler_f, float *frc_f, cudaStream_t stream); -#endif // MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_RESTRAIN_RESTAIN_FORCE_IMPL_H_ diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/simple_constrain/constrain_force_cycle_impl.cu b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/simple_constrain/constrain_force_cycle_impl.cu deleted file mode 100644 index 49445cfe81a..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/simple_constrain/constrain_force_cycle_impl.cu +++ /dev/null @@ -1,82 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/** - * Note: - * Constrain_Force_Cycle. This is an experimental interface that is subject to change and/or deletion. - */ - -#include "plugin/device/gpu/kernel/cuda_impl/sponge/common_sponge.cuh" -#include "plugin/device/gpu/kernel/cuda_impl/sponge/simple_constrain/constrain_force_cycle_impl.cuh" - -__global__ void Constrain_Force_Cycle_Kernel(const int constrain_pair_numbers, const UNSIGNED_INT_VECTOR *uint_crd, - const VECTOR *scaler, const CONSTRAIN_PAIR *constrain_pair, - const VECTOR *pair_dr, VECTOR *test_frc) { - int pair_i = blockDim.x * blockIdx.x + threadIdx.x; - if (pair_i < constrain_pair_numbers) { - CONSTRAIN_PAIR cp = constrain_pair[pair_i]; - float r_1; - VECTOR dr; - float frc_abs; - VECTOR frc_lin; - - dr.x = (static_cast(uint_crd[cp.atom_i_serial].uint_x - uint_crd[cp.atom_j_serial].uint_x)) * scaler[0].x; - dr.y = (static_cast(uint_crd[cp.atom_i_serial].uint_y - uint_crd[cp.atom_j_serial].uint_y)) * scaler[0].y; - dr.z = (static_cast(uint_crd[cp.atom_i_serial].uint_z - uint_crd[cp.atom_j_serial].uint_z)) * scaler[0].z; - r_1 = rnorm3df(dr.x, dr.y, dr.z); - frc_abs = (1. - cp.constant_r * r_1) * cp.constrain_k; - - frc_lin.x = frc_abs * pair_dr[pair_i].x; - frc_lin.y = frc_abs * pair_dr[pair_i].y; - frc_lin.z = frc_abs * pair_dr[pair_i].z; - - atomicAdd(&test_frc[cp.atom_j_serial].x, frc_lin.x); - atomicAdd(&test_frc[cp.atom_j_serial].y, frc_lin.y); - atomicAdd(&test_frc[cp.atom_j_serial].z, frc_lin.z); - - atomicAdd(&test_frc[cp.atom_i_serial].x, -frc_lin.x); - atomicAdd(&test_frc[cp.atom_i_serial].y, -frc_lin.y); - atomicAdd(&test_frc[cp.atom_i_serial].z, -frc_lin.z); - } -} - -void Constrain_Force_Cycle(int atom_numbers, int constrain_pair_numbers, const unsigned int *uint_crd_f, - const float *scaler_f, float *constrain_pair_f, const float *pair_dr_f, - const int *atom_i_serials, const int *atom_j_serials, const float *constant_rs, - const float *constrain_ks, float *test_frc_f, cudaStream_t stream) { - Reset_List<<(3 * atom_numbers) / 128), 128, 0, stream>>>(3 * atom_numbers, test_frc_f, 0.); - size_t thread_per_block = 128; - size_t block_per_grid = ceilf(static_cast(atom_numbers) / 128); - const UNSIGNED_INT_VECTOR *uint_crd = reinterpret_cast(uint_crd_f); - const VECTOR *scaler = reinterpret_cast(scaler_f); - const VECTOR *pair_dr = reinterpret_cast(pair_dr_f); - - VECTOR *test_frc = reinterpret_cast(test_frc_f); - - CONSTRAIN_PAIR *constrain_pair = reinterpret_cast(constrain_pair_f); - - construct_constrain_pair<<(constrain_pair_numbers) / 128), 128, 0, stream>>>( - constrain_pair_numbers, atom_i_serials, atom_j_serials, constant_rs, constrain_ks, constrain_pair); - - Constrain_Force_Cycle_Kernel<<>>( - constrain_pair_numbers, uint_crd, scaler, constrain_pair, pair_dr, test_frc); - - return; -} - -void Constrain_Force_Cycle(int atom_numbers, int constrain_pair_numbers, const unsigned int *uint_crd_f, - const float *scaler_f, float *constrain_pair_f, const float *pair_dr_f, - const int *atom_i_serials, const int *atom_j_serials, const float *constant_rs, - const float *constrain_ks, float *test_frc_f, cudaStream_t stream); diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/simple_constrain/constrain_force_cycle_impl.cuh b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/simple_constrain/constrain_force_cycle_impl.cuh deleted file mode 100644 index 502e6e65823..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/simple_constrain/constrain_force_cycle_impl.cuh +++ /dev/null @@ -1,34 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/** - * Note: - * Constrain_Force_Cycle. This is an experimental interface that is subject to change and/or deletion. - */ - -#ifndef MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_SIMPLE_CONSTRAIN_CONSTRAIN_FORCE_CYCLE_IMPL_H_ -#define MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_SIMPLE_CONSTRAIN_CONSTRAIN_FORCE_CYCLE_IMPL_H_ - -#include -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_device_info.h" - -CUDA_LIB_EXPORT void Constrain_Force_Cycle(int atom_numbers, int constrain_pair_numbers, const unsigned int *uint_crd_f, - const float *scaler_f, float *constrain_pair_f, const float *pair_dr_f, - const int *atom_i_serials, const int *atom_j_serials, - const float *constant_rs, const float *constrain_ks, float *test_frc_f, - cudaStream_t stream); - -#endif diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/simple_constrain/constrain_force_cycle_with_virial_impl.cu b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/simple_constrain/constrain_force_cycle_with_virial_impl.cu deleted file mode 100644 index 4530efed91c..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/simple_constrain/constrain_force_cycle_with_virial_impl.cu +++ /dev/null @@ -1,78 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/** - * Note: - * ConstrainForceCycleVirial. This is an experimental interface that is subject to change and/or deletion. - */ - -#include "plugin/device/gpu/kernel/cuda_impl/sponge/common_sponge.cuh" -#include "plugin/device/gpu/kernel/cuda_impl/sponge/simple_constrain/constrain_force_cycle_with_virial_impl.cuh" - -__global__ void Constrain_Force_Cycle_With_Virial(int constrain_pair_numbers, const UNSIGNED_INT_VECTOR *uint_crd, - const VECTOR *scaler, CONSTRAIN_PAIR *constrain_pair, - const VECTOR *pair_dr, VECTOR *test_frc, float *d_atom_virial) { - int pair_i = blockDim.x * blockIdx.x + threadIdx.x; - if (pair_i < constrain_pair_numbers) { - CONSTRAIN_PAIR cp = constrain_pair[pair_i]; - VECTOR dr0 = pair_dr[pair_i]; - VECTOR dr = Get_Periodic_Displacement(uint_crd[cp.atom_i_serial], uint_crd[cp.atom_j_serial], scaler[0]); - float r_1 = rnorm3df(dr.x, dr.y, dr.z); - float frc_abs = (1. - cp.constant_r * r_1) * cp.constrain_k; - VECTOR frc_lin = frc_abs * dr0; - d_atom_virial[pair_i] -= frc_lin * dr0; - - atomicAdd(&test_frc[cp.atom_j_serial].x, frc_lin.x); - atomicAdd(&test_frc[cp.atom_j_serial].y, frc_lin.y); - atomicAdd(&test_frc[cp.atom_j_serial].z, frc_lin.z); - - atomicAdd(&test_frc[cp.atom_i_serial].x, -frc_lin.x); - atomicAdd(&test_frc[cp.atom_i_serial].y, -frc_lin.y); - atomicAdd(&test_frc[cp.atom_i_serial].z, -frc_lin.z); - } -} - -void Constrain_Force_Cycle_With_Virial(int atom_numbers, int constrain_pair_numbers, const unsigned int *uint_crd_f, - const float *scaler_f, float *constrain_pair_f, const float *pair_dr_f, - const int *atom_i_serials, const int *atom_j_serials, const float *constant_rs, - const float *constrain_ks, float *test_frc_f, float *d_atom_virial, - cudaStream_t stream) { - Reset_List<<(3 * atom_numbers) / 128), 128, 0, stream>>>(3 * atom_numbers, test_frc_f, 0.); - Reset_List<<(constrain_pair_numbers) / 128), 128, 0, stream>>>(constrain_pair_numbers, - d_atom_virial, 0.); - size_t thread_per_block = 128; - size_t block_per_grid = ceilf(static_cast(atom_numbers) / 128); - const UNSIGNED_INT_VECTOR *uint_crd = reinterpret_cast(uint_crd_f); - const VECTOR *scaler = reinterpret_cast(scaler_f); - const VECTOR *pair_dr = reinterpret_cast(pair_dr_f); - - VECTOR *test_frc = reinterpret_cast(test_frc_f); - - CONSTRAIN_PAIR *constrain_pair = reinterpret_cast(constrain_pair_f); - - construct_constrain_pair<<(constrain_pair_numbers) / 128), 128, 0, stream>>>( - constrain_pair_numbers, atom_i_serials, atom_j_serials, constant_rs, constrain_ks, constrain_pair); - - Constrain_Force_Cycle_With_Virial<<>>( - constrain_pair_numbers, uint_crd, scaler, constrain_pair, pair_dr, test_frc, d_atom_virial); - - return; -} - -void Constrain_Force_Cycle_With_Virial(int atom_numbers, int constrain_pair_numbers, const unsigned int *uint_crd_f, - const float *scaler_f, float *constrain_pair_f, const float *pair_dr_f, - const int *atom_i_serials, const int *atom_j_serials, const float *constant_rs, - const float *constrain_ks, float *test_frc_f, float *d_atom_virial, - cudaStream_t stream); diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/simple_constrain/constrain_force_cycle_with_virial_impl.cuh b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/simple_constrain/constrain_force_cycle_with_virial_impl.cuh deleted file mode 100644 index bbc9064d1e9..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/simple_constrain/constrain_force_cycle_with_virial_impl.cuh +++ /dev/null @@ -1,34 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/** - * Note: - * ConstrainForceCycleVirial. This is an experimental interface that is subject to change and/or deletion. - */ - -#ifndef MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_SIMPLE_CONSTRAIN_CONSTRAIN_FORCE_CYCLE_WITH_VIRIAL_IMPL_H_ -#define MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_SIMPLE_CONSTRAIN_CONSTRAIN_FORCE_CYCLE_WITH_VIRIAL_IMPL_H_ - -#include -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_device_info.h" - -CUDA_LIB_EXPORT void Constrain_Force_Cycle_With_Virial(int atom_numbers, int constrain_pair_numbers, - const unsigned int *uint_crd_f, const float *scaler_f, - float *constrain_pair_f, const float *pair_dr_f, - const int *atom_i_serials, const int *atom_j_serials, - const float *constant_rs, const float *constrain_ks, - float *test_frc_f, float *d_atom_virial, cudaStream_t stream); -#endif diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/simple_constrain/constrain_force_virial_impl.cu b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/simple_constrain/constrain_force_virial_impl.cu deleted file mode 100644 index 904270580c2..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/simple_constrain/constrain_force_virial_impl.cu +++ /dev/null @@ -1,180 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/** - * Note: - * ConstrainForceCycleVirial. This is an experimental interface that is subject to change and/or deletion. - */ - -#include "plugin/device/gpu/kernel/cuda_impl/sponge/common_sponge.cuh" -#include "plugin/device/gpu/kernel/cuda_impl/sponge/simple_constrain/constrain_force_virial_impl.cuh" - -__global__ void constrain_force_cycle_with_virial_update_kernel(int constrain_pair_numbers, - const UNSIGNED_INT_VECTOR *uint_crd, - const VECTOR *scaler, CONSTRAIN_PAIR *constrain_pair, - const VECTOR *pair_dr, VECTOR *test_frc, - float *d_atom_virial) { - int pair_i = blockDim.x * blockIdx.x + threadIdx.x; - if (pair_i < constrain_pair_numbers) { - CONSTRAIN_PAIR cp = constrain_pair[pair_i]; - VECTOR dr0 = pair_dr[pair_i]; - VECTOR dr = Get_Periodic_Displacement(uint_crd[cp.atom_i_serial], uint_crd[cp.atom_j_serial], scaler[0]); - float r_1 = rnorm3df(dr.x, dr.y, dr.z); - float frc_abs = (1. - cp.constant_r * r_1) * cp.constrain_k; - VECTOR frc_lin = frc_abs * dr0; - d_atom_virial[pair_i] -= frc_lin * dr0; - - atomicAdd(&test_frc[cp.atom_j_serial].x, frc_lin.x); - atomicAdd(&test_frc[cp.atom_j_serial].y, frc_lin.y); - atomicAdd(&test_frc[cp.atom_j_serial].z, frc_lin.z); - - atomicAdd(&test_frc[cp.atom_i_serial].x, -frc_lin.x); - atomicAdd(&test_frc[cp.atom_i_serial].y, -frc_lin.y); - atomicAdd(&test_frc[cp.atom_i_serial].z, -frc_lin.z); - } -} - -__global__ void constrain_force_cycle_update_kernel(const int constrain_pair_numbers, - const UNSIGNED_INT_VECTOR *uint_crd, const VECTOR *scaler, - const CONSTRAIN_PAIR *constrain_pair, const VECTOR *pair_dr, - VECTOR *test_frc) { - int pair_i = blockDim.x * blockIdx.x + threadIdx.x; - if (pair_i < constrain_pair_numbers) { - CONSTRAIN_PAIR cp = constrain_pair[pair_i]; - float r_1; - VECTOR dr; - float frc_abs; - VECTOR frc_lin; - - dr.x = (static_cast(uint_crd[cp.atom_i_serial].uint_x - uint_crd[cp.atom_j_serial].uint_x)) * scaler[0].x; - dr.y = (static_cast(uint_crd[cp.atom_i_serial].uint_y - uint_crd[cp.atom_j_serial].uint_y)) * scaler[0].y; - dr.z = (static_cast(uint_crd[cp.atom_i_serial].uint_z - uint_crd[cp.atom_j_serial].uint_z)) * scaler[0].z; - r_1 = rnorm3df(dr.x, dr.y, dr.z); - frc_abs = (1. - cp.constant_r * r_1) * cp.constrain_k; - - frc_lin.x = frc_abs * pair_dr[pair_i].x; - frc_lin.y = frc_abs * pair_dr[pair_i].y; - frc_lin.z = frc_abs * pair_dr[pair_i].z; - - atomicAdd(&test_frc[cp.atom_j_serial].x, frc_lin.x); - atomicAdd(&test_frc[cp.atom_j_serial].y, frc_lin.y); - atomicAdd(&test_frc[cp.atom_j_serial].z, frc_lin.z); - - atomicAdd(&test_frc[cp.atom_i_serial].x, -frc_lin.x); - atomicAdd(&test_frc[cp.atom_i_serial].y, -frc_lin.y); - atomicAdd(&test_frc[cp.atom_i_serial].z, -frc_lin.z); - } -} - -__global__ void refresh_uint_crd_update_kernel(int atom_numbers, const VECTOR *crd, - const VECTOR *quarter_crd_to_uint_crd_cof, UNSIGNED_INT_VECTOR *uint_crd, - VECTOR *test_frc, const float *mass_inverse, - const float half_exp_gamma_plus_half) { - int atom_i = blockDim.x * blockIdx.x + threadIdx.x; - if (atom_i < atom_numbers) { - INT_VECTOR tempi; - VECTOR crd_lin = crd[atom_i]; - VECTOR frc_lin = test_frc[atom_i]; - float mass_lin = mass_inverse[atom_i]; - - crd_lin.x = crd_lin.x + half_exp_gamma_plus_half * frc_lin.x * mass_lin; - crd_lin.y = crd_lin.y + half_exp_gamma_plus_half * frc_lin.y * mass_lin; - crd_lin.z = crd_lin.z + half_exp_gamma_plus_half * frc_lin.z * mass_lin; - - tempi.int_x = crd_lin.x * quarter_crd_to_uint_crd_cof[0].x; - tempi.int_y = crd_lin.y * quarter_crd_to_uint_crd_cof[0].y; - tempi.int_z = crd_lin.z * quarter_crd_to_uint_crd_cof[0].z; - - uint_crd[atom_i].uint_x = tempi.int_x << 2; - uint_crd[atom_i].uint_y = tempi.int_y << 2; - uint_crd[atom_i].uint_z = tempi.int_z << 2; - } -} - -void constrain_force_cycle_update(int atom_numbers, int constrain_pair_numbers, const unsigned int *uint_crd_f, - const float *scaler_f, float *constrain_pair_f, const float *pair_dr_f, - const int *atom_i_serials, const int *atom_j_serials, const float *constant_rs, - const float *constrain_ks, float *test_frc_f, cudaStream_t stream) { - size_t thread_per_block = 128; - size_t block_per_grid = ceilf(static_cast(atom_numbers) / 128); - const UNSIGNED_INT_VECTOR *uint_crd = reinterpret_cast(uint_crd_f); - const VECTOR *scaler = reinterpret_cast(scaler_f); - const VECTOR *pair_dr = reinterpret_cast(pair_dr_f); - - VECTOR *test_frc = reinterpret_cast(test_frc_f); - - CONSTRAIN_PAIR *constrain_pair = reinterpret_cast(constrain_pair_f); - - construct_constrain_pair<<(constrain_pair_numbers) / 128), 128, 0, stream>>>( - constrain_pair_numbers, atom_i_serials, atom_j_serials, constant_rs, constrain_ks, constrain_pair); - - constrain_force_cycle_update_kernel<<>>( - constrain_pair_numbers, uint_crd, scaler, constrain_pair, pair_dr, test_frc); - - return; -} - -void constrain_force_cycle_with_virial_update(int atom_numbers, int constrain_pair_numbers, - const unsigned int *uint_crd_f, const float *scaler_f, - float *constrain_pair_f, const float *pair_dr_f, - const int *atom_i_serials, const int *atom_j_serials, - const float *constant_rs, const float *constrain_ks, float *test_frc_f, - float *d_atom_virial, cudaStream_t stream) { - size_t thread_per_block = 128; - size_t block_per_grid = ceilf(static_cast(atom_numbers) / 128); - const UNSIGNED_INT_VECTOR *uint_crd = reinterpret_cast(uint_crd_f); - const VECTOR *scaler = reinterpret_cast(scaler_f); - const VECTOR *pair_dr = reinterpret_cast(pair_dr_f); - - VECTOR *test_frc = reinterpret_cast(test_frc_f); - - CONSTRAIN_PAIR *constrain_pair = reinterpret_cast(constrain_pair_f); - - construct_constrain_pair<<(constrain_pair_numbers) / 128), 128, 0, stream>>>( - constrain_pair_numbers, atom_i_serials, atom_j_serials, constant_rs, constrain_ks, constrain_pair); - - constrain_force_cycle_with_virial_update_kernel<<>>( - constrain_pair_numbers, uint_crd, scaler, constrain_pair, pair_dr, test_frc, d_atom_virial); - - return; -} - -void refresh_uint_crd_update(int atom_numbers, float half_exp_gamma_plus_half, const float *crd_f, - const float *quarter_crd_to_uint_crd_cof_f, float *test_frc_f, const float *mass_inverse, - unsigned int *uint_crd_f, cudaStream_t stream) { - size_t thread_per_block = 128; - size_t block_per_grid = ceilf(static_cast(atom_numbers) / 128); - const VECTOR *crd = reinterpret_cast(crd_f); - const VECTOR *quarter_crd_to_uint_crd_cof = reinterpret_cast(quarter_crd_to_uint_crd_cof_f); - VECTOR *test_frc = reinterpret_cast(test_frc_f); - UNSIGNED_INT_VECTOR *uint_crd = reinterpret_cast(uint_crd_f); - - refresh_uint_crd_update_kernel<<>>( - atom_numbers, crd, quarter_crd_to_uint_crd_cof, uint_crd, test_frc, mass_inverse, half_exp_gamma_plus_half); - return; -} - -void set_zero_force_with_virial(int atom_numbers, int constrain_pair_numbers, float *test_frc_f, float *d_atom_virial, - cudaStream_t stream) { - Reset_List<<(3 * atom_numbers) / 128), 128, 0, stream>>>(3 * atom_numbers, test_frc_f, 0.); - Reset_List<<(constrain_pair_numbers) / 128), 128, 0, stream>>>(constrain_pair_numbers, - d_atom_virial, 0.); - return; -} - -void set_zero(int numbers, float *x, cudaStream_t stream) { - Reset_List<<(numbers) / 128), 128, 0, stream>>>(numbers, x, 0.); - return; -} diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/simple_constrain/constrain_force_virial_impl.cuh b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/simple_constrain/constrain_force_virial_impl.cuh deleted file mode 100644 index 289455b9327..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/simple_constrain/constrain_force_virial_impl.cuh +++ /dev/null @@ -1,49 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/** - * Note: - * ConstrainForce. This is an experimental interface that is subject to change and/or deletion. - */ - -#ifndef MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_SIMPLE_CONSTRAIN_CONSTRAIN_FORCE_VIRIAL_IMPL_H_ -#define MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_SIMPLE_CONSTRAIN_CONSTRAIN_FORCE_VIRIAL_IMPL_H_ - -#include -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_device_info.h" - -CUDA_LIB_EXPORT void constrain_force_cycle_update(int atom_numbers, int constrain_pair_numbers, - const unsigned int *uint_crd_f, const float *scaler_f, - float *constrain_pair_f, const float *pair_dr_f, - const int *atom_i_serials, const int *atom_j_serials, - const float *constant_rs, const float *constrain_ks, - float *test_frc_f, cudaStream_t stream); - -CUDA_LIB_EXPORT void constrain_force_cycle_with_virial_update( - int atom_numbers, int constrain_pair_numbers, const unsigned int *uint_crd_f, const float *scaler_f, - float *constrain_pair_f, const float *pair_dr_f, const int *atom_i_serials, const int *atom_j_serials, - const float *constant_rs, const float *constrain_ks, float *test_frc_f, float *d_atom_virial, cudaStream_t stream); - -CUDA_LIB_EXPORT void refresh_uint_crd_update(int atom_numbers, float half_exp_gamma_plus_half, const float *crd_f, - const float *quarter_crd_to_uint_crd_cof_f, float *test_frc_f, - const float *mass_inverse, unsigned int *uint_crd_f, cudaStream_t stream); - -CUDA_LIB_EXPORT void set_zero_force_with_virial(int atom_numbers, int constrain_pair_numbers, float *test_frc_f, - float *d_atom_virial, cudaStream_t stream); - -CUDA_LIB_EXPORT void set_zero(int numbers, float *x, cudaStream_t stream); - -#endif diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/simple_constrain/last_crd_to_dr_impl.cu b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/simple_constrain/last_crd_to_dr_impl.cu deleted file mode 100644 index 56081c1b87b..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/simple_constrain/last_crd_to_dr_impl.cu +++ /dev/null @@ -1,84 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "plugin/device/gpu/kernel/cuda_impl/sponge/simple_constrain/last_crd_to_dr_impl.cuh" -#include "plugin/device/gpu/kernel/cuda_impl/sponge/common_sponge.cuh" - -__global__ void Last_Crd_To_dr(const int constarin_pair_numbers, const VECTOR *atom_crd, - const VECTOR *quarter_crd_to_uint_crd_cof, const VECTOR *uint_dr_to_dr, - CONSTRAIN_PAIR *constrain_pair, VECTOR *pair_dr) { - int pair_i = blockDim.x * blockIdx.x + threadIdx.x; - if (pair_i < constarin_pair_numbers) { - INT_VECTOR tempi; - INT_VECTOR tempj; - UNSIGNED_INT_VECTOR uint_crd_i; - UNSIGNED_INT_VECTOR uint_crd_j; - CONSTRAIN_PAIR cp = constrain_pair[pair_i]; - VECTOR dr; - - tempi.int_x = atom_crd[cp.atom_i_serial].x * quarter_crd_to_uint_crd_cof[0].x; - tempi.int_y = atom_crd[cp.atom_i_serial].y * quarter_crd_to_uint_crd_cof[0].y; - tempi.int_z = atom_crd[cp.atom_i_serial].z * quarter_crd_to_uint_crd_cof[0].z; - - tempj.int_x = atom_crd[cp.atom_j_serial].x * quarter_crd_to_uint_crd_cof[0].x; - tempj.int_y = atom_crd[cp.atom_j_serial].y * quarter_crd_to_uint_crd_cof[0].y; - tempj.int_z = atom_crd[cp.atom_j_serial].z * quarter_crd_to_uint_crd_cof[0].z; - - uint_crd_i.uint_x = tempi.int_x << 2; - uint_crd_i.uint_y = tempi.int_y << 2; - uint_crd_i.uint_z = tempi.int_z << 2; - - uint_crd_j.uint_x = tempj.int_x << 2; - uint_crd_j.uint_y = tempj.int_y << 2; - uint_crd_j.uint_z = tempj.int_z << 2; - - dr.x = (static_cast(uint_crd_i.uint_x - uint_crd_j.uint_x)) * uint_dr_to_dr[0].x; - dr.y = (static_cast(uint_crd_i.uint_y - uint_crd_j.uint_y)) * uint_dr_to_dr[0].y; - dr.z = (static_cast(uint_crd_i.uint_z - uint_crd_j.uint_z)) * uint_dr_to_dr[0].z; - - pair_dr[pair_i] = dr; - } -} - -void lastcrdtodr(int constrain_pair_numbers, const float *atom_crd_f, const float *quarter_crd_to_uint_crd_cof_f, - const float *uint_dr_to_dr_f, float *constrain_pair_f, const int *atom_i_serials, - const int *atom_j_serials, const float *constant_rs, const float *constrain_ks, float *pair_dr_f, - cudaStream_t stream) { - Reset_List<<(3 * constrain_pair_numbers) / 128), 128, 0, stream>>>( - 3 * constrain_pair_numbers, pair_dr_f, 0.); - size_t thread_per_block = 128; - size_t block_per_grid = ceilf(static_cast(constrain_pair_numbers) / 128); - const VECTOR *atom_crd = reinterpret_cast(atom_crd_f); - const VECTOR *quarter_crd_to_uint_crd_cof = reinterpret_cast(quarter_crd_to_uint_crd_cof_f); - const VECTOR *uint_dr_to_dr = reinterpret_cast(uint_dr_to_dr_f); - - CONSTRAIN_PAIR *constrain_pair = reinterpret_cast(constrain_pair_f); - - VECTOR *pair_dr = reinterpret_cast(pair_dr_f); - - construct_constrain_pair<<(constrain_pair_numbers) / 128), 128, 0, stream>>>( - constrain_pair_numbers, atom_i_serials, atom_j_serials, constant_rs, constrain_ks, constrain_pair); - - Last_Crd_To_dr<<>>(constrain_pair_numbers, atom_crd, - quarter_crd_to_uint_crd_cof, uint_dr_to_dr, - constrain_pair, pair_dr); - return; -} - -void lastcrdtodr(int constrain_pair_numbers, const float *atom_crd_f, const float *quarter_crd_to_uint_crd_cof_f, - const float *uint_dr_to_dr_f, float *constrain_pair_f, const int *atom_i_serials, - const int *atom_j_serials, const float *constant_rs, const float *constrain_ks, float *pair_dr_f, - cudaStream_t stream); diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/simple_constrain/last_crd_to_dr_impl.cuh b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/simple_constrain/last_crd_to_dr_impl.cuh deleted file mode 100644 index becb141bfe0..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/simple_constrain/last_crd_to_dr_impl.cuh +++ /dev/null @@ -1,29 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_SIMPLE_CONSTRAIN_LAST_CRD_TO_DR_IMPL_H_ -#define MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_SIMPLE_CONSTRAIN_LAST_CRD_TO_DR_IMPL_H_ - -#include -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_device_info.h" - -CUDA_LIB_EXPORT void lastcrdtodr(int constrain_pair_numbers, const float *atom_crd_f, - const float *quarter_crd_to_uint_crd_cof_f, const float *uint_dr_to_dr_f, - float *constrain_pair_f, const int *atom_i_serials, const int *atom_j_serials, - const float *constant_rs, const float *constrain_ks, float *pair_dr_f, - cudaStream_t stream); -#endif // MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_SIMPLE_CONSTRAIN_LAST_CRD_TO_DR_IMPL_H_ diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/simple_constrain/refresh_crd_vel_impl.cu b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/simple_constrain/refresh_crd_vel_impl.cu deleted file mode 100644 index 996381d83b2..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/simple_constrain/refresh_crd_vel_impl.cu +++ /dev/null @@ -1,61 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "plugin/device/gpu/kernel/cuda_impl/sponge/simple_constrain/refresh_crd_vel_impl.cuh" -#include "plugin/device/gpu/kernel/cuda_impl/sponge/common_sponge.cuh" - -__global__ void Refresh_Crd_Vel(int atom_numbers, float dt_inverse, float dt, float exp_gamma, - float half_exp_gamma_plus_half, VECTOR *test_frc, float *mass_inverse, VECTOR *crd, - VECTOR *vel) { - int atom_i = blockDim.x * blockIdx.x + threadIdx.x; - if (atom_i < atom_numbers) { - VECTOR crd_lin = crd[atom_i]; - VECTOR frc_lin = test_frc[atom_i]; - VECTOR vel_lin = vel[atom_i]; - float mass_lin = mass_inverse[atom_i]; - - frc_lin.x = frc_lin.x * mass_lin; - frc_lin.y = frc_lin.y * mass_lin; - frc_lin.z = frc_lin.z * mass_lin; // mass实际为mass的倒数,frc_lin已经乘以dt^2 - - crd_lin.x = crd_lin.x + half_exp_gamma_plus_half * frc_lin.x; - crd_lin.y = crd_lin.y + half_exp_gamma_plus_half * frc_lin.y; - crd_lin.z = crd_lin.z + half_exp_gamma_plus_half * frc_lin.z; - - vel_lin.x = (vel_lin.x + exp_gamma * frc_lin.x * dt_inverse); - vel_lin.y = (vel_lin.y + exp_gamma * frc_lin.y * dt_inverse); - vel_lin.z = (vel_lin.z + exp_gamma * frc_lin.z * dt_inverse); - - crd[atom_i] = crd_lin; - vel[atom_i] = vel_lin; - } -} - -void refreshcrdvel(int atom_numbers, float dt_inverse, float dt, float exp_gamma, float half_exp_gamma_plus_half, - float *test_frc_f, float *mass_inverse, float *crd_f, float *vel_f, cudaStream_t stream) { - size_t thread_per_block = 128; - size_t block_per_grid = ceilf(static_cast(atom_numbers) / 128); - VECTOR *crd = reinterpret_cast(crd_f); - VECTOR *vel = reinterpret_cast(vel_f); - VECTOR *test_frc = reinterpret_cast(test_frc_f); - - Refresh_Crd_Vel<<>>( - atom_numbers, dt_inverse, dt, exp_gamma, half_exp_gamma_plus_half, test_frc, mass_inverse, crd, vel); - return; -} - -void refreshcrdvel(int atom_numbers, float dt_inverse, float dt, float exp_gamma, float half_exp_gamma_plus_half, - float *test_frc_f, float *mass_inverse, float *crd_f, float *vel_f, cudaStream_t stream); diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/simple_constrain/refresh_crd_vel_impl.cuh b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/simple_constrain/refresh_crd_vel_impl.cuh deleted file mode 100644 index f3c27147830..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/simple_constrain/refresh_crd_vel_impl.cuh +++ /dev/null @@ -1,27 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_SIMPLE_CONSTRAIN_REFRESH_CRD_VEL_IMPL_H_ -#define MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_SIMPLE_CONSTRAIN_REFRESH_CRD_VEL_IMPL_H_ - -#include -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_device_info.h" - -CUDA_LIB_EXPORT void refreshcrdvel(int atom_numbers, float dt_inverse, float dt, float exp_gamma, - float half_exp_gamma_plus_half, float *test_frc_f, float *mass_inverse, float *crd_f, - float *vel_f, cudaStream_t stream); -#endif // MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_SIMPLE_CONSTRAIN_REFRESH_CRD_VEL_IMPL_H_ diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/simple_constrain/refresh_uint_crd_impl.cu b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/simple_constrain/refresh_uint_crd_impl.cu deleted file mode 100644 index 43aaf3142a8..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/simple_constrain/refresh_uint_crd_impl.cu +++ /dev/null @@ -1,63 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "plugin/device/gpu/kernel/cuda_impl/sponge/simple_constrain/refresh_uint_crd_impl.cuh" -#include "plugin/device/gpu/kernel/cuda_impl/sponge/common_sponge.cuh" - -__global__ void Refresh_Uint_Crd(int atom_numbers, const VECTOR *crd, const VECTOR *quarter_crd_to_uint_crd_cof, - UNSIGNED_INT_VECTOR *uint_crd, const VECTOR *test_frc, const float *mass_inverse, - const float half_exp_gamma_plus_half) { - int atom_i = blockDim.x * blockIdx.x + threadIdx.x; - if (atom_i < atom_numbers) { - INT_VECTOR tempi; - VECTOR crd_lin = crd[atom_i]; - VECTOR frc_lin = test_frc[atom_i]; - float mass_lin = mass_inverse[atom_i]; - - crd_lin.x = crd_lin.x + half_exp_gamma_plus_half * frc_lin.x * mass_lin; - crd_lin.y = crd_lin.y + half_exp_gamma_plus_half * frc_lin.y * mass_lin; - crd_lin.z = crd_lin.z + half_exp_gamma_plus_half * frc_lin.z * mass_lin; - - tempi.int_x = crd_lin.x * quarter_crd_to_uint_crd_cof[0].x; - tempi.int_y = crd_lin.y * quarter_crd_to_uint_crd_cof[0].y; - tempi.int_z = crd_lin.z * quarter_crd_to_uint_crd_cof[0].z; - - uint_crd[atom_i].uint_x = tempi.int_x << 2; - uint_crd[atom_i].uint_y = tempi.int_y << 2; - uint_crd[atom_i].uint_z = tempi.int_z << 2; - } -} - -void refreshuintcrd(int atom_numbers, float half_exp_gamma_plus_half, const float *crd_f, - const float *quarter_crd_to_uint_crd_cof_f, const float *test_frc_f, const float *mass_inverse, - unsigned int *uint_crd_f, cudaStream_t stream) { - size_t thread_per_block = 128; - size_t block_per_grid = ceilf(static_cast(atom_numbers) / 128); - const VECTOR *crd = reinterpret_cast(crd_f); - const VECTOR *quarter_crd_to_uint_crd_cof = reinterpret_cast(quarter_crd_to_uint_crd_cof_f); - const VECTOR *test_frc = reinterpret_cast(test_frc_f); - UNSIGNED_INT_VECTOR *uint_crd = reinterpret_cast(uint_crd_f); - - Refresh_Uint_Crd<<>>(atom_numbers, crd, - quarter_crd_to_uint_crd_cof, - uint_crd, test_frc, mass_inverse, - half_exp_gamma_plus_half); - return; -} - -void refreshuintcrd(int atom_numbers, float half_exp_gamma_plus_half, const float *crd_f, - const float *quarter_crd_to_uint_crd_cof_f, const float *test_frc_f, const float *mass_inverse, - unsigned int *uint_crd_f, cudaStream_t stream); diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/simple_constrain/refresh_uint_crd_impl.cuh b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/simple_constrain/refresh_uint_crd_impl.cuh deleted file mode 100644 index 1a6e3df7d9c..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/sponge/simple_constrain/refresh_uint_crd_impl.cuh +++ /dev/null @@ -1,27 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_SIMPLE_CONSTRAIN_REFRESH_UINT_CRD_IMPL_H_ -#define MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_SIMPLE_CONSTRAIN_REFRESH_UINT_CRD_IMPL_H_ - -#include -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_device_info.h" - -CUDA_LIB_EXPORT void refreshuintcrd(int atom_numbers, float half_exp_gamma_plus_half, const float *crd_f, - const float *quarter_crd_to_uint_crd_cof_f, const float *test_frc_f, - const float *mass_inverse, unsigned int *uint_crd_f, cudaStream_t stream); -#endif // MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_SIMPLE_CONSTRAIN_REFRESH_UINT_CRD_IMPL_H_ diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/OWNERS b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/OWNERS deleted file mode 100644 index 2d07c75f316..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/OWNERS +++ /dev/null @@ -1,2 +0,0 @@ -approvers: -- wang_zi_dong diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/angle/angle_atom_energy_kernel.cc b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/angle/angle_atom_energy_kernel.cc deleted file mode 100644 index f3345ea0ffa..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/angle/angle_atom_energy_kernel.cc +++ /dev/null @@ -1,88 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "plugin/device/gpu/kernel/sponge/angle/angle_atom_energy_kernel.h" -#include "ops/angle_atom_energy.h" - -namespace mindspore { -namespace kernel { -using KernelRunFunc = AngleAtomEnergyGpuKernelMod::KernelRunFunc; -const std::vector> &AngleAtomEnergyGpuKernelMod::GetFuncList() const { - static const std::vector> func_list = { - {KernelAttr() - .AddInputAttr(kNumberTypeUInt32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeFloat32) - .AddOutputAttr(kNumberTypeFloat32), - &AngleAtomEnergyGpuKernelMod::LaunchKernel}}; - return func_list; -} - -bool AngleAtomEnergyGpuKernelMod::Init(const BaseOperatorPtr &base_operator, const std::vector &inputs, - const std::vector &outputs) { - if (!MatchKernelFunc(base_operator, inputs, outputs)) { - return false; - } - auto kernel_ptr = std::dynamic_pointer_cast(base_operator); - MS_EXCEPTION_IF_NULL(kernel_ptr); - kernel_name_ = kernel_ptr->name(); - angle_numbers_ = static_cast(kernel_ptr->get_angle_numbers()); - return true; -} - -int AngleAtomEnergyGpuKernelMod::Resize(const BaseOperatorPtr &base_operator, - const std::vector &inputs, - const std::vector &outputs, - const std::map &inputsOnHost) { - if (int ret = KernelMod::Resize(base_operator, inputs, outputs, inputsOnHost); ret != KRET_OK) { - return ret; - } - auto shape_uint_crd = inputs[0]->GetShapeVector(); - ele_uint_crd_ = SizeOf(shape_uint_crd); - return KRET_OK; -} - -template -bool AngleAtomEnergyGpuKernelMod::LaunchKernel(const std::vector &inputs, const std::vector &, - const std::vector &outputs) { - auto uint_crd_f = GetDeviceAddress(inputs, 0); - auto scaler_f = GetDeviceAddress(inputs, 1); - auto atom_a = GetDeviceAddress(inputs, 2); - auto atom_b = GetDeviceAddress(inputs, 3); - auto atom_c = GetDeviceAddress(inputs, 4); - auto angle_k = GetDeviceAddress(inputs, 5); - auto angle_theta0 = GetDeviceAddress(inputs, 6); - - auto ene = GetDeviceAddress(outputs, 0); - AngleAtomEnergy(angle_numbers_, ele_uint_crd_, uint_crd_f, scaler_f, atom_a, atom_b, atom_c, angle_k, angle_theta0, - ene, reinterpret_cast(stream_ptr_)); - return true; -} - -bool AngleAtomEnergyGpuKernelMod::Launch(const std::vector &inputs, - const std::vector &workspace, - const std::vector &outputs, void *stream_ptr) { - stream_ptr_ = stream_ptr; - return kernel_func_(this, inputs, workspace, outputs); -} - -MS_KERNEL_FACTORY_REG(NativeGpuKernelMod, AngleAtomEnergy, AngleAtomEnergyGpuKernelMod); -} // namespace kernel -} // namespace mindspore diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/angle/angle_atom_energy_kernel.h b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/angle/angle_atom_energy_kernel.h deleted file mode 100644 index e22ea7183f2..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/angle/angle_atom_energy_kernel.h +++ /dev/null @@ -1,62 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_CCSRC_PLUGIN_DEVICE_GPU_KERNEL_SPONGE_ANGLE_ANGLE_ATOM_ENERGY_KERNEL_H_ -#define MINDSPORE_CCSRC_PLUGIN_DEVICE_GPU_KERNEL_SPONGE_ANGLE_ANGLE_ATOM_ENERGY_KERNEL_H_ -#include -#include -#include -#include -#include -#include "plugin/device/gpu/kernel/gpu_kernel.h" -#include "plugin/device/gpu/kernel/gpu_kernel_factory.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" -#include "plugin/device/gpu/kernel/cuda_impl/sponge/angle/angle_atom_energy_impl.cuh" - -namespace mindspore { -namespace kernel { -class AngleAtomEnergyGpuKernelMod : public NativeGpuKernelMod, public MatchKernelHelper { - public: - AngleAtomEnergyGpuKernelMod() = default; - ~AngleAtomEnergyGpuKernelMod() override = default; - - const std::vector> &GetFuncList() const override; - - std::vector GetOpSupport() override { return OpSupport(); }; - - bool Init(const BaseOperatorPtr &base_operator, const std::vector &inputs, - const std::vector &outputs) override; - - int Resize(const BaseOperatorPtr &base_operator, const std::vector &inputs, - const std::vector &outputs, - const std::map &inputsOnHost) override; - - bool Launch(const std::vector &inputs, const std::vector &workspace, - const std::vector &outputs, void *stream_ptr) override; - - private: - template - bool LaunchKernel(const std::vector &inputs, const std::vector &workspace, - const std::vector &outputs); - - int angle_numbers_{0}; - size_t ele_uint_crd_{1}; - void *stream_ptr_{nullptr}; -}; -} // namespace kernel -} // namespace mindspore - -#endif // MINDSPORE_CCSRC_PLUGIN_DEVICE_GPU_KERNEL_SPONGE_ANGLE_ANGLE_ATOM_ENERGY_KERNEL_H_ diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/angle/angle_energy_kernel.cc b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/angle/angle_energy_kernel.cc deleted file mode 100644 index 99a30601a17..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/angle/angle_energy_kernel.cc +++ /dev/null @@ -1,33 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "plugin/device/gpu/kernel/sponge/angle/angle_energy_kernel.h" - -namespace mindspore { -namespace kernel { -MS_REG_GPU_KERNEL_TWO(AngleEnergy, - KernelAttr() - .AddInputAttr(kNumberTypeUInt32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeFloat32) - .AddOutputAttr(kNumberTypeFloat32), - AngleEnergyGpuKernelMod, float, int) -} // namespace kernel -} // namespace mindspore diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/angle/angle_energy_kernel.h b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/angle/angle_energy_kernel.h deleted file mode 100644 index fe0fcc43bdd..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/angle/angle_energy_kernel.h +++ /dev/null @@ -1,99 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONGE_ANGLE_ANGLE_ENERGY_KERNEL_H_ -#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONGE_ANGLE_ANGLE_ENERGY_KERNEL_H_ -#include -#include -#include -#include -#include "plugin/device/gpu/kernel/gpu_kernel.h" -#include "plugin/device/gpu/kernel/gpu_kernel_factory.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" -#include "plugin/device/gpu/kernel/cuda_impl/sponge/angle/angle_energy_impl.cuh" -namespace mindspore { -namespace kernel { -template -class AngleEnergyGpuKernelMod : public DeprecatedNativeGpuKernelMod { - public: - AngleEnergyGpuKernelMod() : ele_uint_crd(1) {} - ~AngleEnergyGpuKernelMod() override = default; - - bool Init(const CNodePtr &kernel_node) override { - kernel_node_ = kernel_node; - angle_numbers = static_cast(GetAttr(kernel_node, "angle_numbers")); - auto shape_uint_crd = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 0); - auto shape_scaler = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 1); - auto shape_atom_a = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 2); - auto shape_atom_b = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 3); - auto shape_atom_c = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 4); - auto shape_angle_k = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 5); - auto shape_angle_theta0 = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 6); - - ele_uint_crd *= SizeOf(shape_uint_crd); - ele_scaler *= SizeOf(shape_scaler); - ele_atom_a *= SizeOf(shape_atom_a); - ele_atom_b *= SizeOf(shape_atom_b); - ele_atom_c *= SizeOf(shape_atom_c); - ele_angle_k *= SizeOf(shape_angle_k); - ele_angle_theta0 *= SizeOf(shape_angle_theta0); - - InitSizeLists(); - return true; - } - - bool Launch(const std::vector &inputs, const std::vector &, - const std::vector &outputs, void *stream_ptr) override { - auto uint_crd_f = GetDeviceAddress(inputs, 0); - auto scaler_f = GetDeviceAddress(inputs, 1); - auto atom_a = GetDeviceAddress(inputs, 2); - auto atom_b = GetDeviceAddress(inputs, 3); - auto atom_c = GetDeviceAddress(inputs, 4); - auto angle_k = GetDeviceAddress(inputs, 5); - auto angle_theta0 = GetDeviceAddress(inputs, 6); - - auto ene = GetDeviceAddress(outputs, 0); - AngleEnergy(angle_numbers, uint_crd_f, scaler_f, atom_a, atom_b, atom_c, angle_k, angle_theta0, ene, - reinterpret_cast(stream_ptr)); - return true; - } - - protected: - void InitSizeLists() override { - input_size_list_.push_back(ele_uint_crd * sizeof(T1)); - input_size_list_.push_back(ele_scaler * sizeof(T)); - input_size_list_.push_back(ele_atom_a * sizeof(T1)); - input_size_list_.push_back(ele_atom_b * sizeof(T1)); - input_size_list_.push_back(ele_atom_c * sizeof(T1)); - input_size_list_.push_back(ele_angle_k * sizeof(T)); - input_size_list_.push_back(ele_angle_theta0 * sizeof(T)); - - output_size_list_.push_back(angle_numbers * sizeof(T)); - } - - private: - size_t ele_uint_crd = 1; - size_t ele_scaler = 1; - size_t ele_atom_a = 1; - size_t ele_atom_b = 1; - size_t ele_atom_c = 1; - size_t ele_angle_k = 1; - size_t ele_angle_theta0 = 1; - int angle_numbers; -}; -} // namespace kernel -} // namespace mindspore -#endif diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/angle/angle_force_kernel.cc b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/angle/angle_force_kernel.cc deleted file mode 100644 index 0732fe5a5e9..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/angle/angle_force_kernel.cc +++ /dev/null @@ -1,33 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "plugin/device/gpu/kernel/sponge/angle/angle_force_kernel.h" - -namespace mindspore { -namespace kernel { -MS_REG_GPU_KERNEL_TWO(AngleForce, - KernelAttr() - .AddInputAttr(kNumberTypeUInt32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeFloat32) - .AddOutputAttr(kNumberTypeFloat32), - AngleForceGpuKernelMod, float, int) -} // namespace kernel -} // namespace mindspore diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/angle/angle_force_kernel.h b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/angle/angle_force_kernel.h deleted file mode 100644 index 553f9bee9f8..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/angle/angle_force_kernel.h +++ /dev/null @@ -1,100 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONGE_ANGLE_ANGLE_FORCE_KERNEL_H_ -#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONGE_ANGLE_ANGLE_FORCE_KERNEL_H_ -#include -#include -#include -#include -#include "plugin/device/gpu/kernel/gpu_kernel.h" -#include "plugin/device/gpu/kernel/gpu_kernel_factory.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" -#include "plugin/device/gpu/kernel/cuda_impl/sponge/angle/angle_force_impl.cuh" -namespace mindspore { -namespace kernel { -template -class AngleForceGpuKernelMod : public DeprecatedNativeGpuKernelMod { - public: - AngleForceGpuKernelMod() : ele_uint_crd(1) {} - ~AngleForceGpuKernelMod() override = default; - - bool Init(const CNodePtr &kernel_node) override { - kernel_node_ = kernel_node; - angle_numbers = static_cast(GetAttr(kernel_node, "angle_numbers")); - auto shape_uint_crd = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 0); - auto shape_scaler = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 1); - auto shape_atom_a = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 2); - auto shape_atom_b = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 3); - auto shape_atom_c = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 4); - auto shape_angle_k = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 5); - auto shape_angle_theta0 = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 6); - - ele_uint_crd *= SizeOf(shape_uint_crd); - ele_scaler *= SizeOf(shape_scaler); - ele_atom_a *= SizeOf(shape_atom_a); - ele_atom_b *= SizeOf(shape_atom_b); - ele_atom_c *= SizeOf(shape_atom_c); - ele_angle_k *= SizeOf(shape_angle_k); - ele_angle_theta0 *= SizeOf(shape_angle_theta0); - - InitSizeLists(); - return true; - } - - bool Launch(const std::vector &inputs, const std::vector &, - const std::vector &outputs, void *stream_ptr) override { - auto uint_crd_f = GetDeviceAddress(inputs, 0); - auto scaler_f = GetDeviceAddress(inputs, 1); - auto atom_a = GetDeviceAddress(inputs, 2); - auto atom_b = GetDeviceAddress(inputs, 3); - auto atom_c = GetDeviceAddress(inputs, 4); - auto angle_k = GetDeviceAddress(inputs, 5); - auto angle_theta0 = GetDeviceAddress(inputs, 6); - - auto frc_f = GetDeviceAddress(outputs, 0); - AngleForce(angle_numbers, ele_uint_crd, uint_crd_f, scaler_f, atom_a, atom_b, atom_c, angle_k, angle_theta0, frc_f, - reinterpret_cast(stream_ptr)); - return true; - } - - protected: - void InitSizeLists() override { - input_size_list_.push_back(ele_uint_crd * sizeof(T1)); - input_size_list_.push_back(ele_scaler * sizeof(T)); - input_size_list_.push_back(ele_atom_a * sizeof(T1)); - input_size_list_.push_back(ele_atom_b * sizeof(T1)); - input_size_list_.push_back(ele_atom_c * sizeof(T1)); - input_size_list_.push_back(ele_angle_k * sizeof(T)); - input_size_list_.push_back(ele_angle_theta0 * sizeof(T)); - - output_size_list_.push_back(ele_uint_crd * 3 * sizeof(T)); - } - - private: - size_t ele_uint_crd = 1; - size_t ele_scaler = 1; - size_t ele_atom_a = 1; - size_t ele_atom_b = 1; - size_t ele_atom_c = 1; - size_t ele_angle_k = 1; - size_t ele_angle_theta0 = 1; - - int angle_numbers; -}; -} // namespace kernel -} // namespace mindspore -#endif diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/angle/angle_force_with_atom_energy_kernel.cc b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/angle/angle_force_with_atom_energy_kernel.cc deleted file mode 100644 index dc81e9505b9..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/angle/angle_force_with_atom_energy_kernel.cc +++ /dev/null @@ -1,34 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "plugin/device/gpu/kernel/sponge/angle/angle_force_with_atom_energy_kernel.h" - -namespace mindspore { -namespace kernel { -MS_REG_GPU_KERNEL_TWO(AngleForceWithAtomEnergy, - KernelAttr() - .AddInputAttr(kNumberTypeUInt32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeFloat32) - .AddOutputAttr(kNumberTypeFloat32) - .AddOutputAttr(kNumberTypeFloat32), - AngleForceWithAtomEnergyGpuKernelMod, float, int) -} // namespace kernel -} // namespace mindspore diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/angle/angle_force_with_atom_energy_kernel.h b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/angle/angle_force_with_atom_energy_kernel.h deleted file mode 100644 index b7bdc046b25..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/angle/angle_force_with_atom_energy_kernel.h +++ /dev/null @@ -1,101 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONGE_ANGLE_ANGLE_FORCE_WITH_ATOM_ENERGY_KERNEL_H_ -#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONGE_ANGLE_ANGLE_FORCE_WITH_ATOM_ENERGY_KERNEL_H_ -#include -#include -#include -#include -#include "plugin/device/gpu/kernel/gpu_kernel.h" -#include "plugin/device/gpu/kernel/gpu_kernel_factory.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" -#include "plugin/device/gpu/kernel/cuda_impl/sponge/angle/angle_force_with_atom_energy_impl.cuh" -namespace mindspore { -namespace kernel { -template -class AngleForceWithAtomEnergyGpuKernelMod : public DeprecatedNativeGpuKernelMod { - public: - AngleForceWithAtomEnergyGpuKernelMod() : ele_uint_crd(1) {} - ~AngleForceWithAtomEnergyGpuKernelMod() override = default; - - bool Init(const CNodePtr &kernel_node) override { - kernel_node_ = kernel_node; - angle_numbers = static_cast(GetAttr(kernel_node, "angle_numbers")); - auto shape_uint_crd = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 0); - auto shape_scaler = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 1); - auto shape_atom_a = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 2); - auto shape_atom_b = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 3); - auto shape_atom_c = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 4); - auto shape_angle_k = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 5); - auto shape_angle_theta0 = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 6); - - ele_uint_crd *= SizeOf(shape_uint_crd); - ele_scaler *= SizeOf(shape_scaler); - ele_atom_a *= SizeOf(shape_atom_a); - ele_atom_b *= SizeOf(shape_atom_b); - ele_atom_c *= SizeOf(shape_atom_c); - ele_angle_k *= SizeOf(shape_angle_k); - ele_angle_theta0 *= SizeOf(shape_angle_theta0); - InitSizeLists(); - return true; - } - - bool Launch(const std::vector &inputs, const std::vector &, - const std::vector &outputs, void *stream_ptr) override { - auto uint_crd_f = GetDeviceAddress(inputs, 0); - auto scaler_f = GetDeviceAddress(inputs, 1); - auto atom_a = GetDeviceAddress(inputs, 2); - auto atom_b = GetDeviceAddress(inputs, 3); - auto atom_c = GetDeviceAddress(inputs, 4); - auto angle_k = GetDeviceAddress(inputs, 5); - auto angle_theta0 = GetDeviceAddress(inputs, 6); - - auto frc_f = GetDeviceAddress(outputs, 0); - auto ene = GetDeviceAddress(outputs, 1); - AngleForceWithAtomEnergy(angle_numbers, ele_uint_crd, uint_crd_f, scaler_f, atom_a, atom_b, atom_c, angle_k, - angle_theta0, frc_f, ene, reinterpret_cast(stream_ptr)); - return true; - } - - protected: - void InitSizeLists() override { - input_size_list_.push_back(ele_uint_crd * sizeof(T1)); - input_size_list_.push_back(ele_scaler * sizeof(T)); - input_size_list_.push_back(ele_atom_a * sizeof(T1)); - input_size_list_.push_back(ele_atom_b * sizeof(T1)); - input_size_list_.push_back(ele_atom_c * sizeof(T1)); - input_size_list_.push_back(ele_angle_k * sizeof(T)); - input_size_list_.push_back(ele_angle_theta0 * sizeof(T)); - - output_size_list_.push_back(ele_uint_crd * 3 * sizeof(T)); - output_size_list_.push_back(ele_uint_crd * sizeof(T)); - } - - private: - size_t ele_uint_crd = 1; - size_t ele_scaler = 1; - size_t ele_atom_a = 1; - size_t ele_atom_b = 1; - size_t ele_atom_c = 1; - size_t ele_angle_k = 1; - size_t ele_angle_theta0 = 1; - - int angle_numbers; -}; -} // namespace kernel -} // namespace mindspore -#endif diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/bond/bond_atom_energy_cuda_gpu_kernel.cc b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/bond/bond_atom_energy_cuda_gpu_kernel.cc deleted file mode 100644 index 331c8f06775..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/bond/bond_atom_energy_cuda_gpu_kernel.cc +++ /dev/null @@ -1,32 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "plugin/device/gpu/kernel/sponge/bond/bond_atom_energy_cuda_gpu_kernel.h" - -namespace mindspore { -namespace kernel { -MS_REG_GPU_KERNEL_TWO(BondAtomEnergy, - KernelAttr() - .AddInputAttr(kNumberTypeUInt32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeFloat32) - .AddOutputAttr(kNumberTypeFloat32), - BondAtomEnergyCudaGpuKernelMod, float, int) -} // namespace kernel -} // namespace mindspore diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/bond/bond_atom_energy_cuda_gpu_kernel.h b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/bond/bond_atom_energy_cuda_gpu_kernel.h deleted file mode 100644 index 9b51a2beec4..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/bond/bond_atom_energy_cuda_gpu_kernel.h +++ /dev/null @@ -1,102 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONG_BOND_BOND_ATOM_ENERGY_CUDA_GPU_KERNEL_H_ -#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONG_BOND_BOND_ATOM_ENERGY_CUDA_GPU_KERNEL_H_ - -#include "plugin/device/gpu/kernel/cuda_impl/sponge/bond/bond_atom_energy_cuda_gpu_impl.cuh" - -#include -#include -#include -#include - -#include "plugin/device/gpu/kernel/gpu_kernel.h" -#include "plugin/device/gpu/kernel/gpu_kernel_factory.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" - -namespace mindspore { -namespace kernel { -template -class BondAtomEnergyCudaGpuKernelMod : public DeprecatedNativeGpuKernelMod { - public: - BondAtomEnergyCudaGpuKernelMod() : ele_uint_crd(1) {} - ~BondAtomEnergyCudaGpuKernelMod() override = default; - - bool Init(const CNodePtr &kernel_node) override { - kernel_node_ = kernel_node; - bond_numbers = static_cast(GetAttr(kernel_node, "bond_numbers")); - atom_numbers = static_cast(GetAttr(kernel_node, "atom_numbers")); - auto shape_uint_crd = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 0); - auto shape_scaler = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 1); - auto shape_atom_a = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 2); - auto shape_atom_b = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 3); - auto shape_bond_k = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 4); - auto shape_bond_r0 = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 5); - - ele_uint_crd *= SizeOf(shape_uint_crd); - ele_scaler *= SizeOf(shape_scaler); - ele_atom_a *= SizeOf(shape_atom_a); - ele_atom_b *= SizeOf(shape_atom_b); - ele_bond_k *= SizeOf(shape_bond_k); - ele_bond_r0 *= SizeOf(shape_bond_r0); - - InitSizeLists(); - return true; - } - - bool Launch(const std::vector &inputs, const std::vector &, - const std::vector &outputs, void *stream_ptr) override { - auto uint_crd_f = GetDeviceAddress(inputs, 0); - auto scaler_f = GetDeviceAddress(inputs, 1); - auto atom_a = GetDeviceAddress(inputs, 2); - auto atom_b = GetDeviceAddress(inputs, 3); - auto bond_k = GetDeviceAddress(inputs, 4); - auto bond_r0 = GetDeviceAddress(inputs, 5); - - auto atom_ene = GetDeviceAddress(outputs, 0); - - BondAtomEnergy(bond_numbers, atom_numbers, uint_crd_f, scaler_f, atom_a, atom_b, bond_k, bond_r0, atom_ene, - reinterpret_cast(stream_ptr)); - return true; - } - - protected: - void InitSizeLists() override { - input_size_list_.push_back(ele_uint_crd * sizeof(T1)); - input_size_list_.push_back(ele_scaler * sizeof(T)); - input_size_list_.push_back(ele_atom_a * sizeof(T1)); - input_size_list_.push_back(ele_atom_b * sizeof(T1)); - input_size_list_.push_back(ele_bond_k * sizeof(T)); - input_size_list_.push_back(ele_bond_r0 * sizeof(T)); - - output_size_list_.push_back(atom_numbers * sizeof(T)); - } - - private: - size_t ele_uint_crd = 1; - size_t ele_scaler = 1; - size_t ele_atom_a = 1; - size_t ele_atom_b = 1; - size_t ele_bond_k = 1; - size_t ele_bond_r0 = 1; - - int bond_numbers; - int atom_numbers; -}; -} // namespace kernel -} // namespace mindspore -#endif // MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONG_BOND_BOND_ATOM_ENERGY_CUDA_GPU_KERNEL_H_ diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/bond/bond_energy_cuda_gpu_kernel.cc b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/bond/bond_energy_cuda_gpu_kernel.cc deleted file mode 100644 index bfb93d78576..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/bond/bond_energy_cuda_gpu_kernel.cc +++ /dev/null @@ -1,32 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "plugin/device/gpu/kernel/sponge/bond/bond_energy_cuda_gpu_kernel.h" - -namespace mindspore { -namespace kernel { -MS_REG_GPU_KERNEL_THREE(BondEnergy, - KernelAttr() - .AddInputAttr(kNumberTypeUInt32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeFloat32) - .AddOutputAttr(kNumberTypeFloat32), - BondEnergyCudaGpuKernelMod, float, int, unsigned int) -} // namespace kernel -} // namespace mindspore diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/bond/bond_energy_cuda_gpu_kernel.h b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/bond/bond_energy_cuda_gpu_kernel.h deleted file mode 100644 index 035befcb539..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/bond/bond_energy_cuda_gpu_kernel.h +++ /dev/null @@ -1,103 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONG_BOND_BOND_ENERGY_CUDA_GPU_KERNEL_H_ -#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONG_BOND_BOND_ENERGY_CUDA_GPU_KERNEL_H_ - -#include "plugin/device/gpu/kernel/cuda_impl/sponge/bond/bond_energy_cuda_gpu_impl.cuh" - -#include -#include -#include -#include - -#include "plugin/device/gpu/kernel/gpu_kernel.h" -#include "plugin/device/gpu/kernel/gpu_kernel_factory.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" - -namespace mindspore { -namespace kernel { -template -class BondEnergyCudaGpuKernelMod : public DeprecatedNativeGpuKernelMod { - public: - BondEnergyCudaGpuKernelMod() : ele_uint_crd(1) {} - ~BondEnergyCudaGpuKernelMod() override = default; - - bool Init(const CNodePtr &kernel_node) override { - // get bond_numbers - kernel_node_ = kernel_node; - bond_numbers = static_cast(GetAttr(kernel_node, "bond_numbers")); - atom_numbers = static_cast(GetAttr(kernel_node, "atom_numbers")); - auto shape_uint_crd = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 0); - auto shape_scaler = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 1); - auto shape_atom_a = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 2); - auto shape_atom_b = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 3); - auto shape_bond_k = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 4); - auto shape_bond_r0 = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 5); - - ele_uint_crd *= SizeOf(shape_uint_crd); - ele_scaler *= SizeOf(shape_scaler); - ele_atom_a *= SizeOf(shape_atom_a); - ele_atom_b *= SizeOf(shape_atom_b); - ele_bond_k *= SizeOf(shape_bond_k); - ele_bond_r0 *= SizeOf(shape_bond_r0); - - InitSizeLists(); - return true; - } - - bool Launch(const std::vector &inputs, const std::vector &, - const std::vector &outputs, void *stream_ptr) override { - auto uint_crd_f = GetDeviceAddress(inputs, 0); - auto scaler_f = GetDeviceAddress(inputs, 1); - auto atom_a = GetDeviceAddress(inputs, 2); - auto atom_b = GetDeviceAddress(inputs, 3); - auto bond_k = GetDeviceAddress(inputs, 4); - auto bond_r0 = GetDeviceAddress(inputs, 5); - - auto bond_ene = GetDeviceAddress(outputs, 0); - - BondEnergy(bond_numbers, atom_numbers, uint_crd_f, scaler_f, atom_a, atom_b, bond_k, bond_r0, bond_ene, - reinterpret_cast(stream_ptr)); - return true; - } - - protected: - void InitSizeLists() override { - input_size_list_.push_back(ele_uint_crd * sizeof(T2)); - input_size_list_.push_back(ele_scaler * sizeof(T)); - input_size_list_.push_back(ele_atom_a * sizeof(T1)); - input_size_list_.push_back(ele_atom_b * sizeof(T1)); - input_size_list_.push_back(ele_bond_k * sizeof(T)); - input_size_list_.push_back(ele_bond_r0 * sizeof(T)); - - output_size_list_.push_back(bond_numbers * sizeof(T)); - } - - private: - size_t ele_uint_crd = 1; - size_t ele_scaler = 1; - size_t ele_atom_a = 1; - size_t ele_atom_b = 1; - size_t ele_bond_k = 1; - size_t ele_bond_r0 = 1; - - int bond_numbers; - int atom_numbers; -}; -} // namespace kernel -} // namespace mindspore -#endif // MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONG_BOND_BOND_ENERGY_CUDA_GPU_KERNEL_H_ diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/bond/bond_force_cuda_gpu_kernel.cc b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/bond/bond_force_cuda_gpu_kernel.cc deleted file mode 100644 index a8e9fde0479..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/bond/bond_force_cuda_gpu_kernel.cc +++ /dev/null @@ -1,32 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "plugin/device/gpu/kernel/sponge/bond/bond_force_cuda_gpu_kernel.h" - -namespace mindspore { -namespace kernel { -MS_REG_GPU_KERNEL_THREE(BondForce, - KernelAttr() - .AddInputAttr(kNumberTypeUInt32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeFloat32) - .AddOutputAttr(kNumberTypeFloat32), - BondForceCudaGpuKernelMod, float, int, unsigned int) -} // namespace kernel -} // namespace mindspore diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/bond/bond_force_cuda_gpu_kernel.h b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/bond/bond_force_cuda_gpu_kernel.h deleted file mode 100644 index e6466e5ae55..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/bond/bond_force_cuda_gpu_kernel.h +++ /dev/null @@ -1,103 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONG_BOND_BOND_FORCE_CUDA_GPU_KERNEL_H_ -#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONG_BOND_BOND_FORCE_CUDA_GPU_KERNEL_H_ - -#include "plugin/device/gpu/kernel/cuda_impl/sponge/bond/bond_force_cuda_gpu_impl.cuh" - -#include -#include -#include -#include - -#include "plugin/device/gpu/kernel/gpu_kernel.h" -#include "plugin/device/gpu/kernel/gpu_kernel_factory.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" - -namespace mindspore { -namespace kernel { -template -class BondForceCudaGpuKernelMod : public DeprecatedNativeGpuKernelMod { - public: - BondForceCudaGpuKernelMod() : ele_uint_crd(1) {} - ~BondForceCudaGpuKernelMod() override = default; - - bool Init(const CNodePtr &kernel_node) override { - // get bond_numbers - kernel_node_ = kernel_node; - bond_numbers = static_cast(GetAttr(kernel_node, "bond_numbers")); - atom_numbers = static_cast(GetAttr(kernel_node, "atom_numbers")); - auto shape_uint_crd = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 0); - auto shape_scaler = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 1); - auto shape_atom_a = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 2); - auto shape_atom_b = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 3); - auto shape_bond_k = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 4); - auto shape_bond_r0 = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 5); - - ele_uint_crd *= SizeOf(shape_uint_crd); - ele_scaler *= SizeOf(shape_scaler); - ele_atom_a *= SizeOf(shape_atom_a); - ele_atom_b *= SizeOf(shape_atom_b); - ele_bond_k *= SizeOf(shape_bond_k); - ele_bond_r0 *= SizeOf(shape_bond_r0); - - InitSizeLists(); - return true; - } - - bool Launch(const std::vector &inputs, const std::vector &, - const std::vector &outputs, void *stream_ptr) override { - auto uint_crd_f = GetDeviceAddress(inputs, 0); - auto scaler_f = GetDeviceAddress(inputs, 1); - auto atom_a = GetDeviceAddress(inputs, 2); - auto atom_b = GetDeviceAddress(inputs, 3); - auto bond_k = GetDeviceAddress(inputs, 4); - auto bond_r0 = GetDeviceAddress(inputs, 5); - - auto frc_f = GetDeviceAddress(outputs, 0); - - BondForce(bond_numbers, atom_numbers, uint_crd_f, scaler_f, atom_a, atom_b, bond_k, bond_r0, frc_f, - reinterpret_cast(stream_ptr)); - return true; - } - - protected: - void InitSizeLists() override { - input_size_list_.push_back(ele_uint_crd * sizeof(T2)); - input_size_list_.push_back(ele_scaler * sizeof(T)); - input_size_list_.push_back(ele_atom_a * sizeof(T1)); - input_size_list_.push_back(ele_atom_b * sizeof(T1)); - input_size_list_.push_back(ele_bond_k * sizeof(T)); - input_size_list_.push_back(ele_bond_r0 * sizeof(T)); - - output_size_list_.push_back(atom_numbers * 3 * sizeof(T)); - } - - private: - size_t ele_uint_crd = 1; - size_t ele_scaler = 1; - size_t ele_atom_a = 1; - size_t ele_atom_b = 1; - size_t ele_bond_k = 1; - size_t ele_bond_r0 = 1; - - int bond_numbers; - int atom_numbers; -}; -} // namespace kernel -} // namespace mindspore -#endif // MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONG_BOND_BOND_FORCE_CUDA_GPU_KERNEL_H_ diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/bond/bond_force_with_atom_energy_and_virial_kernel.cc b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/bond/bond_force_with_atom_energy_and_virial_kernel.cc deleted file mode 100644 index cb1dea83757..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/bond/bond_force_with_atom_energy_and_virial_kernel.cc +++ /dev/null @@ -1,34 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "plugin/device/gpu/kernel/sponge/bond/bond_force_with_atom_energy_and_virial_kernel.h" - -namespace mindspore { -namespace kernel { -MS_REG_GPU_KERNEL_THREE(BondForceWithAtomEnergyAndVirial, - KernelAttr() - .AddInputAttr(kNumberTypeUInt32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeFloat32) - .AddOutputAttr(kNumberTypeFloat32) - .AddOutputAttr(kNumberTypeFloat32) - .AddOutputAttr(kNumberTypeFloat32), - BondForceWithAtomEnergyAndVirialGpuKernelMod, float, int, unsigned int) -} // namespace kernel -} // namespace mindspore diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/bond/bond_force_with_atom_energy_and_virial_kernel.h b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/bond/bond_force_with_atom_energy_and_virial_kernel.h deleted file mode 100644 index fb077750270..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/bond/bond_force_with_atom_energy_and_virial_kernel.h +++ /dev/null @@ -1,105 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONG_BOND_BOND_FORCE_WITH_ATOM_ENERGY_AND_VIRIAL_KERNEL_H_ -#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONG_BOND_BOND_FORCE_WITH_ATOM_ENERGY_AND_VIRIAL_KERNEL_H_ - -#include "plugin/device/gpu/kernel/cuda_impl/sponge/bond/bond_force_with_atom_energy_and_virial_impl.cuh" - -#include -#include -#include -#include - -#include "plugin/device/gpu/kernel/gpu_kernel.h" -#include "plugin/device/gpu/kernel/gpu_kernel_factory.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" - -namespace mindspore { -namespace kernel { -template -class BondForceWithAtomEnergyAndVirialGpuKernelMod : public DeprecatedNativeGpuKernelMod { - public: - BondForceWithAtomEnergyAndVirialGpuKernelMod() : ele_uint_crd(1) {} - ~BondForceWithAtomEnergyAndVirialGpuKernelMod() override = default; - - bool Init(const CNodePtr &kernel_node) override { - // get bond_numbers - kernel_node_ = kernel_node; - bond_numbers = static_cast(GetAttr(kernel_node, "bond_numbers")); - atom_numbers = static_cast(GetAttr(kernel_node, "atom_numbers")); - auto shape_uint_crd = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 0); - auto shape_scaler = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 1); - auto shape_atom_a = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 2); - auto shape_atom_b = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 3); - auto shape_bond_k = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 4); - auto shape_bond_r0 = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 5); - - ele_uint_crd *= SizeOf(shape_uint_crd); - ele_scaler *= SizeOf(shape_scaler); - ele_atom_a *= SizeOf(shape_atom_a); - ele_atom_b *= SizeOf(shape_atom_b); - ele_bond_k *= SizeOf(shape_bond_k); - ele_bond_r0 *= SizeOf(shape_bond_r0); - InitSizeLists(); - return true; - } - - bool Launch(const std::vector &inputs, const std::vector &, - const std::vector &outputs, void *stream_ptr) override { - auto uint_crd_f = GetDeviceAddress(inputs, 0); - auto scaler_f = GetDeviceAddress(inputs, 1); - auto atom_a = GetDeviceAddress(inputs, 2); - auto atom_b = GetDeviceAddress(inputs, 3); - auto bond_k = GetDeviceAddress(inputs, 4); - auto bond_r0 = GetDeviceAddress(inputs, 5); - - auto frc_f = GetDeviceAddress(outputs, 0); - auto atom_energy = GetDeviceAddress(outputs, 1); - auto atom_v = GetDeviceAddress(outputs, 2); - BondForceWithAtomEnergyAndVirial(bond_numbers, atom_numbers, uint_crd_f, scaler_f, atom_a, atom_b, bond_k, bond_r0, - frc_f, atom_energy, atom_v, reinterpret_cast(stream_ptr)); - return true; - } - - protected: - void InitSizeLists() override { - input_size_list_.push_back(ele_uint_crd * sizeof(T2)); - input_size_list_.push_back(ele_scaler * sizeof(T)); - input_size_list_.push_back(ele_atom_a * sizeof(T1)); - input_size_list_.push_back(ele_atom_b * sizeof(T1)); - input_size_list_.push_back(ele_bond_k * sizeof(T)); - input_size_list_.push_back(ele_bond_r0 * sizeof(T)); - - output_size_list_.push_back(atom_numbers * 3 * sizeof(T)); - output_size_list_.push_back(atom_numbers * sizeof(T)); - output_size_list_.push_back(atom_numbers * sizeof(T)); - } - - private: - size_t ele_uint_crd = 1; - size_t ele_scaler = 1; - size_t ele_atom_a = 1; - size_t ele_atom_b = 1; - size_t ele_bond_k = 1; - size_t ele_bond_r0 = 1; - - int bond_numbers; - int atom_numbers; -}; -} // namespace kernel -} // namespace mindspore -#endif diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/bond/bond_force_with_atom_energy_kernel.cc b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/bond/bond_force_with_atom_energy_kernel.cc deleted file mode 100644 index ce4dca2e80b..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/bond/bond_force_with_atom_energy_kernel.cc +++ /dev/null @@ -1,33 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "plugin/device/gpu/kernel/sponge/bond/bond_force_with_atom_energy_kernel.h" - -namespace mindspore { -namespace kernel { -MS_REG_GPU_KERNEL_TWO(BondForceWithAtomEnergy, - KernelAttr() - .AddInputAttr(kNumberTypeUInt32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeFloat32) - .AddOutputAttr(kNumberTypeFloat32) - .AddOutputAttr(kNumberTypeFloat32), - BondForceWithAtomEnergyGpuKernelMod, float, int) -} // namespace kernel -} // namespace mindspore diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/bond/bond_force_with_atom_energy_kernel.h b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/bond/bond_force_with_atom_energy_kernel.h deleted file mode 100644 index 2ffa77abcbe..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/bond/bond_force_with_atom_energy_kernel.h +++ /dev/null @@ -1,103 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONG_BOND_BOND_FORCE_WITH_ATOM_ENERGY_KERNEL_H_ -#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONG_BOND_BOND_FORCE_WITH_ATOM_ENERGY_KERNEL_H_ - -#include "plugin/device/gpu/kernel/cuda_impl/sponge/bond/bond_force_with_atom_energy_impl.cuh" - -#include -#include -#include -#include - -#include "plugin/device/gpu/kernel/gpu_kernel.h" -#include "plugin/device/gpu/kernel/gpu_kernel_factory.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" - -namespace mindspore { -namespace kernel { -template -class BondForceWithAtomEnergyGpuKernelMod : public DeprecatedNativeGpuKernelMod { - public: - BondForceWithAtomEnergyGpuKernelMod() : ele_uint_crd(1) {} - ~BondForceWithAtomEnergyGpuKernelMod() override = default; - - bool Init(const CNodePtr &kernel_node) override { - // get bond_numbers - kernel_node_ = kernel_node; - bond_numbers = static_cast(GetAttr(kernel_node, "bond_numbers")); - atom_numbers = static_cast(GetAttr(kernel_node, "atom_numbers")); - auto shape_uint_crd = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 0); - auto shape_scaler = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 1); - auto shape_atom_a = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 2); - auto shape_atom_b = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 3); - auto shape_bond_k = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 4); - auto shape_bond_r0 = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 5); - - ele_uint_crd *= SizeOf(shape_uint_crd); - ele_scaler *= SizeOf(shape_scaler); - ele_atom_a *= SizeOf(shape_atom_a); - ele_atom_b *= SizeOf(shape_atom_b); - ele_bond_k *= SizeOf(shape_bond_k); - ele_bond_r0 *= SizeOf(shape_bond_r0); - InitSizeLists(); - return true; - } - - bool Launch(const std::vector &inputs, const std::vector &, - const std::vector &outputs, void *stream_ptr) override { - auto uint_crd_f = GetDeviceAddress(inputs, 0); - auto scaler_f = GetDeviceAddress(inputs, 1); - auto atom_a = GetDeviceAddress(inputs, 2); - auto atom_b = GetDeviceAddress(inputs, 3); - auto bond_k = GetDeviceAddress(inputs, 4); - auto bond_r0 = GetDeviceAddress(inputs, 5); - - auto frc_f = GetDeviceAddress(outputs, 0); - auto atom_e = GetDeviceAddress(outputs, 1); - BondForceWithAtomEnergy(bond_numbers, atom_numbers, uint_crd_f, scaler_f, atom_a, atom_b, bond_k, bond_r0, frc_f, - atom_e, reinterpret_cast(stream_ptr)); - return true; - } - - protected: - void InitSizeLists() override { - input_size_list_.push_back(ele_uint_crd * sizeof(T1)); - input_size_list_.push_back(ele_scaler * sizeof(T)); - input_size_list_.push_back(ele_atom_a * sizeof(T1)); - input_size_list_.push_back(ele_atom_b * sizeof(T1)); - input_size_list_.push_back(ele_bond_k * sizeof(T)); - input_size_list_.push_back(ele_bond_r0 * sizeof(T)); - - output_size_list_.push_back(atom_numbers * 3 * sizeof(T)); - output_size_list_.push_back(atom_numbers * sizeof(T)); - } - - private: - size_t ele_uint_crd = 1; - size_t ele_scaler = 1; - size_t ele_atom_a = 1; - size_t ele_atom_b = 1; - size_t ele_bond_k = 1; - size_t ele_bond_r0 = 1; - - int bond_numbers; - int atom_numbers; -}; -} // namespace kernel -} // namespace mindspore -#endif diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/bond/bond_force_with_atom_virial_kernel.cc b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/bond/bond_force_with_atom_virial_kernel.cc deleted file mode 100644 index e9ae2d9341c..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/bond/bond_force_with_atom_virial_kernel.cc +++ /dev/null @@ -1,33 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "plugin/device/gpu/kernel/sponge/bond/bond_force_with_atom_virial_kernel.h" - -namespace mindspore { -namespace kernel { -MS_REG_GPU_KERNEL_TWO(BondForceWithAtomVirial, - KernelAttr() - .AddInputAttr(kNumberTypeUInt32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeFloat32) - .AddOutputAttr(kNumberTypeFloat32) - .AddOutputAttr(kNumberTypeFloat32), - BondForceWithAtomVirialGpuKernelMod, float, int) -} // namespace kernel -} // namespace mindspore diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/bond/bond_force_with_atom_virial_kernel.h b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/bond/bond_force_with_atom_virial_kernel.h deleted file mode 100644 index 876a975b9a3..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/bond/bond_force_with_atom_virial_kernel.h +++ /dev/null @@ -1,103 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONG_BOND_BOND_FORCE_WITH_ATOM_VIRIAL_KERNEL_H_ -#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONG_BOND_BOND_FORCE_WITH_ATOM_VIRIAL_KERNEL_H_ - -#include "plugin/device/gpu/kernel/cuda_impl/sponge/bond/bond_force_with_atom_virial_impl.cuh" - -#include -#include -#include -#include - -#include "plugin/device/gpu/kernel/gpu_kernel.h" -#include "plugin/device/gpu/kernel/gpu_kernel_factory.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" - -namespace mindspore { -namespace kernel { -template -class BondForceWithAtomVirialGpuKernelMod : public DeprecatedNativeGpuKernelMod { - public: - BondForceWithAtomVirialGpuKernelMod() : ele_uint_crd(1) {} - ~BondForceWithAtomVirialGpuKernelMod() override = default; - - bool Init(const CNodePtr &kernel_node) override { - // get bond_numbers - kernel_node_ = kernel_node; - bond_numbers = static_cast(GetAttr(kernel_node, "bond_numbers")); - atom_numbers = static_cast(GetAttr(kernel_node, "atom_numbers")); - auto shape_uint_crd = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 0); - auto shape_scaler = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 1); - auto shape_atom_a = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 2); - auto shape_atom_b = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 3); - auto shape_bond_k = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 4); - auto shape_bond_r0 = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 5); - - ele_uint_crd *= SizeOf(shape_uint_crd); - ele_scaler *= SizeOf(shape_scaler); - ele_atom_a *= SizeOf(shape_atom_a); - ele_atom_b *= SizeOf(shape_atom_b); - ele_bond_k *= SizeOf(shape_bond_k); - ele_bond_r0 *= SizeOf(shape_bond_r0); - InitSizeLists(); - return true; - } - - bool Launch(const std::vector &inputs, const std::vector &, - const std::vector &outputs, void *stream_ptr) override { - auto uint_crd_f = GetDeviceAddress(inputs, 0); - auto scaler_f = GetDeviceAddress(inputs, 1); - auto atom_a = GetDeviceAddress(inputs, 2); - auto atom_b = GetDeviceAddress(inputs, 3); - auto bond_k = GetDeviceAddress(inputs, 4); - auto bond_r0 = GetDeviceAddress(inputs, 5); - - auto frc_f = GetDeviceAddress(outputs, 0); - auto atom_v = GetDeviceAddress(outputs, 1); - BondForceWithAtomVirial(bond_numbers, atom_numbers, uint_crd_f, scaler_f, atom_a, atom_b, bond_k, bond_r0, frc_f, - atom_v, reinterpret_cast(stream_ptr)); - return true; - } - - protected: - void InitSizeLists() override { - input_size_list_.push_back(ele_uint_crd * sizeof(T1)); - input_size_list_.push_back(ele_scaler * sizeof(T)); - input_size_list_.push_back(ele_atom_a * sizeof(T1)); - input_size_list_.push_back(ele_atom_b * sizeof(T1)); - input_size_list_.push_back(ele_bond_k * sizeof(T)); - input_size_list_.push_back(ele_bond_r0 * sizeof(T)); - - output_size_list_.push_back(atom_numbers * 3 * sizeof(T)); - output_size_list_.push_back(atom_numbers * sizeof(T)); - } - - private: - size_t ele_uint_crd = 1; - size_t ele_scaler = 1; - size_t ele_atom_a = 1; - size_t ele_atom_b = 1; - size_t ele_bond_k = 1; - size_t ele_bond_r0 = 1; - - int bond_numbers; - int atom_numbers; -}; -} // namespace kernel -} // namespace mindspore -#endif diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/common/atomcrdtocv_kernel.cc b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/common/atomcrdtocv_kernel.cc deleted file mode 100644 index 6c26d16f7bd..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/common/atomcrdtocv_kernel.cc +++ /dev/null @@ -1,32 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "plugin/device/gpu/kernel/sponge/common/atomcrdtocv_kernel.h" - -namespace mindspore { -namespace kernel { -MS_REG_GPU_KERNEL_TWO(TransferCrd, - KernelAttr() - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeFloat32) - .AddOutputAttr(kNumberTypeFloat32) - .AddOutputAttr(kNumberTypeFloat32) - .AddOutputAttr(kNumberTypeFloat32) - .AddOutputAttr(kNumberTypeInt32), - AtomCrdToCVGpuKernelMod, float, int) -} // namespace kernel -} // namespace mindspore diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/common/atomcrdtocv_kernel.h b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/common/atomcrdtocv_kernel.h deleted file mode 100644 index 6dd8a53587e..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/common/atomcrdtocv_kernel.h +++ /dev/null @@ -1,99 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONG_COMMON_ATOMCRDTOCV_KERNEL_H_ -#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONG_COMMON_ATOMCRDTOCV_KERNEL_H_ - -#include "plugin/device/gpu/kernel/cuda_impl/sponge/common/atomcrdtocv_impl.cuh" - -#include -#include -#include -#include - -#include "plugin/device/gpu/kernel/gpu_kernel.h" -#include "plugin/device/gpu/kernel/gpu_kernel_factory.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" - -namespace mindspore { -namespace kernel { -template -class AtomCrdToCVGpuKernelMod : public DeprecatedNativeGpuKernelMod { - public: - AtomCrdToCVGpuKernelMod() : ele_crd(1) {} - ~AtomCrdToCVGpuKernelMod() override = default; - - bool Init(const CNodePtr &kernel_node) override { - kernel_node_ = kernel_node; - start_serial = static_cast(GetAttr(kernel_node, "start_serial")); - end_serial = static_cast(GetAttr(kernel_node, "end_serial")); - number = static_cast(GetAttr(kernel_node, "number")); - atom_numbers = static_cast(GetAttr(kernel_node, "atom_numbers")); - auto shape_crd = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 0); - auto shape_old_crd = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 1); - auto shape_box = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 0); - - ele_crd *= SizeOf(shape_crd); - ele_old_crd *= SizeOf(shape_old_crd); - ele_box *= SizeOf(shape_box); - - InitSizeLists(); - return true; - } - - bool Launch(const std::vector &inputs, const std::vector &workspace, - const std::vector &outputs, void *stream_ptr) override { - auto crd = GetDeviceAddress(inputs, 0); - auto old_crd = GetDeviceAddress(inputs, 1); - auto box = GetDeviceAddress(inputs, 2); - - auto g_radial = GetDeviceAddress(outputs, 0); - auto g_angular = GetDeviceAddress(outputs, 1); - - auto nowarp_crd = GetDeviceAddress(outputs, 2); - auto box_map_times = GetDeviceAddress(outputs, 3); - - AtomCrdToCV(atom_numbers, start_serial, end_serial, number, crd, old_crd, nowarp_crd, box_map_times, box, g_radial, - g_angular, reinterpret_cast(stream_ptr)); - return true; - } - - protected: - void InitSizeLists() override { - input_size_list_.push_back(ele_crd * sizeof(T)); - input_size_list_.push_back(ele_old_crd * sizeof(T)); - input_size_list_.push_back(ele_box * sizeof(T)); - - output_size_list_.push_back(number * sizeof(T)); - output_size_list_.push_back(number * sizeof(T)); - - output_size_list_.push_back(3 * atom_numbers * sizeof(T)); - output_size_list_.push_back(3 * atom_numbers * sizeof(T1)); - } - - private: - size_t ele_crd = 1; - size_t ele_old_crd = 1; - size_t ele_box = 1; - - int end_serial; - int start_serial; - int number; - int atom_numbers; -}; -} // namespace kernel -} // namespace mindspore -#endif // MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONG_COMMON_ATOMCRDTOCV_KERNEL_H_ diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/common/crd_to_uint_crd_kernel.cc b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/common/crd_to_uint_crd_kernel.cc deleted file mode 100644 index fb80d292791..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/common/crd_to_uint_crd_kernel.cc +++ /dev/null @@ -1,26 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "plugin/device/gpu/kernel/sponge/common/crd_to_uint_crd_kernel.h" - -namespace mindspore { -namespace kernel { -MS_REG_GPU_KERNEL_TWO( - CrdToUintCrd, - KernelAttr().AddInputAttr(kNumberTypeFloat32).AddInputAttr(kNumberTypeFloat32).AddOutputAttr(kNumberTypeUInt32), - CrdToUintCrdGpuKernelMod, float, unsigned int) -} // namespace kernel -} // namespace mindspore diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/common/crd_to_uint_crd_kernel.h b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/common/crd_to_uint_crd_kernel.h deleted file mode 100644 index 6f6a33655e3..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/common/crd_to_uint_crd_kernel.h +++ /dev/null @@ -1,79 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONG_COMMON_CRD_TO_UINT_CRD_KERNEL_H_ -#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONG_COMMON_CRD_TO_UINT_CRD_KERNEL_H_ - -#include -#include -#include -#include -#include "plugin/device/gpu/kernel/gpu_kernel.h" -#include "plugin/device/gpu/kernel/gpu_kernel_factory.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" -#include "plugin/device/gpu/kernel/cuda_impl/sponge/common/crd_to_uint_crd_impl.cuh" - -namespace mindspore { -namespace kernel { -template -class CrdToUintCrdGpuKernelMod : public DeprecatedNativeGpuKernelMod { - public: - CrdToUintCrdGpuKernelMod() : ele_crd(1) {} - ~CrdToUintCrdGpuKernelMod() override = default; - - bool Init(const CNodePtr &kernel_node) override { - kernel_node_ = kernel_node; - atom_numbers = static_cast(GetAttr(kernel_node, "atom_numbers")); - - auto shape_crd_to_uint_crd_cof = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 0); - auto shape_crd = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 1); - - ele_crd_to_uint_crd_cof *= SizeOf(shape_crd_to_uint_crd_cof); - ele_crd *= SizeOf(shape_crd); - - InitSizeLists(); - return true; - } - - bool Launch(const std::vector &inputs, const std::vector &, - const std::vector &outputs, void *stream_ptr) override { - auto crd_to_uint_crd_cof = GetDeviceAddress(inputs, 0); - auto crd = GetDeviceAddress(inputs, 1); - - auto uint_crd = GetDeviceAddress(outputs, 0); - - CrdToUintCrd(atom_numbers, crd_to_uint_crd_cof, crd, uint_crd, reinterpret_cast(stream_ptr)); - - return true; - } - - protected: - void InitSizeLists() override { - input_size_list_.push_back(ele_crd_to_uint_crd_cof * sizeof(T)); - input_size_list_.push_back(ele_crd * sizeof(T)); - - output_size_list_.push_back(3 * atom_numbers * sizeof(T)); - } - - private: - size_t ele_crd_to_uint_crd_cof = 1; - size_t ele_crd = 1; - - int atom_numbers; -}; -} // namespace kernel -} // namespace mindspore -#endif // MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONG_COMMON_CRD_TO_UINT_CRD_KERNEL_H_ diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/common/crd_to_uint_crd_quarter_kernel.cc b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/common/crd_to_uint_crd_quarter_kernel.cc deleted file mode 100644 index 63905c0a42b..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/common/crd_to_uint_crd_quarter_kernel.cc +++ /dev/null @@ -1,26 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "plugin/device/gpu/kernel/sponge/common/crd_to_uint_crd_quarter_kernel.h" - -namespace mindspore { -namespace kernel { -MS_REG_GPU_KERNEL_TWO( - CrdToUintCrdQuarter, - KernelAttr().AddInputAttr(kNumberTypeFloat32).AddInputAttr(kNumberTypeFloat32).AddOutputAttr(kNumberTypeUInt32), - CrdToUintCrdQuarterGpuKernelMod, float, unsigned int) -} // namespace kernel -} // namespace mindspore diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/common/crd_to_uint_crd_quarter_kernel.h b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/common/crd_to_uint_crd_quarter_kernel.h deleted file mode 100644 index bc5c3058f33..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/common/crd_to_uint_crd_quarter_kernel.h +++ /dev/null @@ -1,79 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONG_COMMON_CRD_TO_UINT_CRD_QUARTER_KERNEL_H_ -#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONG_COMMON_CRD_TO_UINT_CRD_QUARTER_KERNEL_H_ - -#include -#include -#include -#include -#include "plugin/device/gpu/kernel/gpu_kernel.h" -#include "plugin/device/gpu/kernel/gpu_kernel_factory.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" -#include "plugin/device/gpu/kernel/cuda_impl/sponge/common/crd_to_uint_crd_quarter_impl.cuh" - -namespace mindspore { -namespace kernel { -template -class CrdToUintCrdQuarterGpuKernelMod : public DeprecatedNativeGpuKernelMod { - public: - CrdToUintCrdQuarterGpuKernelMod() : ele_crd(1) {} - ~CrdToUintCrdQuarterGpuKernelMod() override = default; - - bool Init(const CNodePtr &kernel_node) override { - kernel_node_ = kernel_node; - atom_numbers = static_cast(GetAttr(kernel_node, "atom_numbers")); - - auto shape_crd_to_uint_crd_cof = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 0); - auto shape_crd = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 1); - - ele_crd_to_uint_crd_cof *= SizeOf(shape_crd_to_uint_crd_cof); - ele_crd *= SizeOf(shape_crd); - - InitSizeLists(); - return true; - } - - bool Launch(const std::vector &inputs, const std::vector &, - const std::vector &outputs, void *stream_ptr) override { - auto crd_to_uint_crd_cof = GetDeviceAddress(inputs, 0); - auto crd = GetDeviceAddress(inputs, 1); - - auto uint_crd = GetDeviceAddress(outputs, 0); - - CrdToUintCrdQuarter(atom_numbers, crd_to_uint_crd_cof, crd, uint_crd, reinterpret_cast(stream_ptr)); - - return true; - } - - protected: - void InitSizeLists() override { - input_size_list_.push_back(ele_crd_to_uint_crd_cof * sizeof(T)); - input_size_list_.push_back(ele_crd * sizeof(T)); - - output_size_list_.push_back(3 * atom_numbers * sizeof(T)); - } - - private: - size_t ele_crd_to_uint_crd_cof = 1; - size_t ele_crd = 1; - - int atom_numbers; -}; -} // namespace kernel -} // namespace mindspore -#endif // MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONG_COMMON_CRD_TO_UINT_CRD_KERNEL_H_ diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/common/get_center_of_mass_kernel.cc b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/common/get_center_of_mass_kernel.cc deleted file mode 100644 index 6b37707852b..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/common/get_center_of_mass_kernel.cc +++ /dev/null @@ -1,31 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "plugin/device/gpu/kernel/sponge/common/get_center_of_mass_kernel.h" - -namespace mindspore { -namespace kernel { -MS_REG_GPU_KERNEL_TWO(GetCenterOfMass, - KernelAttr() - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeFloat32) - .AddOutputAttr(kNumberTypeFloat32), - GetCenterOfMassGpuKernelMod, float, int) -} // namespace kernel -} // namespace mindspore diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/common/get_center_of_mass_kernel.h b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/common/get_center_of_mass_kernel.h deleted file mode 100644 index a80a7e4d765..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/common/get_center_of_mass_kernel.h +++ /dev/null @@ -1,94 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONG_COMMON_GET_CENTER_OF_MASS_KERNEL_H_ -#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONG_COMMON_GET_CENTER_OF_MASS_KERNEL_H_ - -#include -#include -#include -#include -#include "plugin/device/gpu/kernel/gpu_kernel.h" -#include "plugin/device/gpu/kernel/gpu_kernel_factory.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" -#include "plugin/device/gpu/kernel/cuda_impl/sponge/common/get_center_of_mass_impl.cuh" - -namespace mindspore { -namespace kernel { -template -class GetCenterOfMassGpuKernelMod : public DeprecatedNativeGpuKernelMod { - public: - GetCenterOfMassGpuKernelMod() : ele_start(1) {} - ~GetCenterOfMassGpuKernelMod() override = default; - - bool Init(const CNodePtr &kernel_node) override { - kernel_node_ = kernel_node; - residue_numbers = static_cast(GetAttr(kernel_node, "residue_numbers")); - - auto shape_start = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 0); - auto shape_end = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 1); - auto shape_crd = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 2); - auto shape_atom_mass = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 3); - auto shape_residue_mass_inverse = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 4); - - ele_start *= SizeOf(shape_start); - ele_end *= SizeOf(shape_end); - ele_crd *= SizeOf(shape_crd); - ele_atom_mass *= SizeOf(shape_atom_mass); - ele_residue_mass_inverse *= SizeOf(shape_residue_mass_inverse); - - InitSizeLists(); - return true; - } - - bool Launch(const std::vector &inputs, const std::vector &, - const std::vector &outputs, void *stream_ptr) override { - auto start = GetDeviceAddress(inputs, 0); - auto end = GetDeviceAddress(inputs, 1); - auto crd = GetDeviceAddress(inputs, 2); - auto atom_mass = GetDeviceAddress(inputs, 3); - auto residue_mass_inverse = GetDeviceAddress(inputs, 4); - - auto center_of_mass = GetDeviceAddress(outputs, 0); - - GetCenterOfMass(residue_numbers, start, end, crd, atom_mass, residue_mass_inverse, center_of_mass, - reinterpret_cast(stream_ptr)); - - return true; - } - - protected: - void InitSizeLists() override { - input_size_list_.push_back(ele_start * sizeof(T1)); - input_size_list_.push_back(ele_end * sizeof(T1)); - input_size_list_.push_back(ele_crd * sizeof(T)); - input_size_list_.push_back(ele_atom_mass * sizeof(T)); - input_size_list_.push_back(ele_residue_mass_inverse * sizeof(T)); - output_size_list_.push_back(3 * sizeof(T) * residue_numbers); - } - - private: - size_t ele_start = 1; - size_t ele_end = 1; - size_t ele_crd = 1; - size_t ele_atom_mass = 1; - size_t ele_residue_mass_inverse = 1; - - int residue_numbers; -}; -} // namespace kernel -} // namespace mindspore -#endif // MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONG_COMMON_GET_CENTER_OF_MASS_KERNEL_H_ diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/common/getcenter_kernel.cc b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/common/getcenter_kernel.cc deleted file mode 100644 index 78eb13f04e7..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/common/getcenter_kernel.cc +++ /dev/null @@ -1,26 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "plugin/device/gpu/kernel/sponge/common/getcenter_kernel.h" - -namespace mindspore { -namespace kernel { -MS_REG_GPU_KERNEL_TWO( - GetCenterOfGeometry, - KernelAttr().AddInputAttr(kNumberTypeInt32).AddInputAttr(kNumberTypeFloat32).AddOutputAttr(kNumberTypeFloat32), - GetCenterOfGeometryGpuKernelMod, float, int) -} // namespace kernel -} // namespace mindspore diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/common/getcenter_kernel.h b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/common/getcenter_kernel.h deleted file mode 100644 index c4116facfe7..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/common/getcenter_kernel.h +++ /dev/null @@ -1,82 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONG_COMMON_GETCENTER_KERNEL_H_ -#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONG_COMMON_GETCENTER_KERNEL_H_ - -#include -#include -#include -#include -#include "plugin/device/gpu/kernel/gpu_kernel.h" -#include "plugin/device/gpu/kernel/gpu_kernel_factory.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" -#include "plugin/device/gpu/kernel/cuda_impl/sponge/common/getcenter_impl.cuh" - -namespace mindspore { -namespace kernel { -template -class GetCenterOfGeometryGpuKernelMod : public DeprecatedNativeGpuKernelMod { - public: - GetCenterOfGeometryGpuKernelMod() : ele_center_atoms(1) {} - ~GetCenterOfGeometryGpuKernelMod() override = default; - - bool Init(const CNodePtr &kernel_node) override { - kernel_node_ = kernel_node; - center_numbers = static_cast(GetAttr(kernel_node, "center_numbers")); - center_numbers_inverse = static_cast(GetAttr(kernel_node, "center_numbers_inverse")); - - auto shape_center_atoms = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 0); - auto shape_crd = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 1); - - ele_center_atoms *= SizeOf(shape_center_atoms); - ele_crd *= SizeOf(shape_crd); - - InitSizeLists(); - return true; - } - - bool Launch(const std::vector &inputs, const std::vector &, - const std::vector &outputs, void *stream_ptr) override { - auto center_atoms = GetDeviceAddress(inputs, 0); - auto crd = GetDeviceAddress(inputs, 1); - - auto center_of_geometry = GetDeviceAddress(outputs, 0); - - GetCenterOfGeometry(center_numbers, center_numbers_inverse, center_atoms, crd, center_of_geometry, - reinterpret_cast(stream_ptr)); - - return true; - } - - protected: - void InitSizeLists() override { - input_size_list_.push_back(ele_center_atoms * sizeof(T1)); - input_size_list_.push_back(ele_crd * sizeof(T)); - - output_size_list_.push_back(3 * sizeof(T)); - } - - private: - size_t ele_center_atoms = 1; - size_t ele_crd = 1; - - int center_numbers; - float center_numbers_inverse; -}; -} // namespace kernel -} // namespace mindspore -#endif // MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONG_COMMON_GETCENTER_KERNEL_H_ diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/common/map_center_of_mass_kernel.cc b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/common/map_center_of_mass_kernel.cc deleted file mode 100644 index 08c27d7710c..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/common/map_center_of_mass_kernel.cc +++ /dev/null @@ -1,33 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "plugin/device/gpu/kernel/sponge/common/map_center_of_mass_kernel.h" - -namespace mindspore { -namespace kernel { -MS_REG_GPU_KERNEL_TWO(MapCenterOfMass, - KernelAttr() - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeFloat32) - .AddOutputAttr(kNumberTypeFloat32), - MapCenterOfMassGpuKernelMod, float, int) -} // namespace kernel -} // namespace mindspore diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/common/map_center_of_mass_kernel.h b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/common/map_center_of_mass_kernel.h deleted file mode 100644 index a711e610331..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/common/map_center_of_mass_kernel.h +++ /dev/null @@ -1,99 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONG_COMMON_MAP_CENTER_OF_MASS_KERNEL_H_ -#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONG_COMMON_MAP_CENTER_OF_MASS_KERNEL_H_ - -#include -#include -#include -#include -#include "plugin/device/gpu/kernel/gpu_kernel.h" -#include "plugin/device/gpu/kernel/gpu_kernel_factory.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" -#include "plugin/device/gpu/kernel/cuda_impl/sponge/common/map_center_of_mass_impl.cuh" - -namespace mindspore { -namespace kernel { -template -class MapCenterOfMassGpuKernelMod : public DeprecatedNativeGpuKernelMod { - public: - MapCenterOfMassGpuKernelMod() : ele_start(1) {} - ~MapCenterOfMassGpuKernelMod() override = default; - - bool Init(const CNodePtr &kernel_node) override { - kernel_node_ = kernel_node; - residue_numbers = static_cast(GetAttr(kernel_node, "residue_numbers")); - - auto shape_start = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 0); - auto shape_end = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 1); - auto shape_center_of_mass = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 2); - auto shape_box_length = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 3); - auto shape_no_wrap_crd = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 4); - auto shape_crd = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 5); - - ele_start *= SizeOf(shape_start); - ele_end *= SizeOf(shape_end); - ele_center_of_mass *= SizeOf(shape_center_of_mass); - ele_box_length *= SizeOf(shape_box_length); - ele_no_wrap_crd *= SizeOf(shape_no_wrap_crd); - ele_crd *= SizeOf(shape_crd); - - InitSizeLists(); - return true; - } - - bool Launch(const std::vector &inputs, const std::vector &, - const std::vector &outputs, void *stream_ptr) override { - auto start = GetDeviceAddress(inputs, 0); - auto end = GetDeviceAddress(inputs, 1); - auto center_of_mass = GetDeviceAddress(inputs, 2); - auto box_length = GetDeviceAddress(inputs, 3); - auto no_wrap_crd = GetDeviceAddress(inputs, 4); - auto crd = GetDeviceAddress(inputs, 5); - auto scaler = GetDeviceAddress(inputs, 6); - - MapCenterOfMass(residue_numbers, start, end, center_of_mass, box_length, no_wrap_crd, crd, scaler, - reinterpret_cast(stream_ptr)); - - return true; - } - - protected: - void InitSizeLists() override { - input_size_list_.push_back(ele_start * sizeof(T1)); - input_size_list_.push_back(ele_end * sizeof(T1)); - input_size_list_.push_back(ele_center_of_mass * sizeof(T)); - input_size_list_.push_back(ele_box_length * sizeof(T)); - input_size_list_.push_back(ele_no_wrap_crd * sizeof(T)); - input_size_list_.push_back(ele_crd * sizeof(T)); - input_size_list_.push_back(sizeof(T)); - output_size_list_.push_back(sizeof(T)); - } - - private: - size_t ele_start = 1; - size_t ele_end = 1; - size_t ele_center_of_mass = 1; - size_t ele_box_length = 1; - size_t ele_no_wrap_crd = 1; - size_t ele_crd = 1; - - int residue_numbers; -}; -} // namespace kernel -} // namespace mindspore -#endif // MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONG_COMMON_MAP_CENTER_OF_MASS_KERNEL_H_ diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/common/mdtemperature_kernel.cc b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/common/mdtemperature_kernel.cc deleted file mode 100644 index e4e0a5808c9..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/common/mdtemperature_kernel.cc +++ /dev/null @@ -1,30 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "plugin/device/gpu/kernel/sponge/common/mdtemperature_kernel.h" - -namespace mindspore { -namespace kernel { -MS_REG_GPU_KERNEL_TWO(MDTemperature, - KernelAttr() - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeFloat32) - .AddOutputAttr(kNumberTypeFloat32), - MDTemperatureGpuKernelMod, float, int) -} // namespace kernel -} // namespace mindspore diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/common/mdtemperature_kernel.h b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/common/mdtemperature_kernel.h deleted file mode 100644 index 7e4558144f0..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/common/mdtemperature_kernel.h +++ /dev/null @@ -1,89 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONG_COMMON_MDTEMPERATURE_KERNEL_H_ -#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONG_COMMON_MDTEMPERATURE_KERNEL_H_ - -#include -#include -#include -#include -#include "plugin/device/gpu/kernel/gpu_kernel.h" -#include "plugin/device/gpu/kernel/gpu_kernel_factory.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" -#include "plugin/device/gpu/kernel/cuda_impl/sponge/common/mdtemperature_impl.cuh" - -namespace mindspore { -namespace kernel { -template -class MDTemperatureGpuKernelMod : public DeprecatedNativeGpuKernelMod { - public: - MDTemperatureGpuKernelMod() : ele_start(1) {} - ~MDTemperatureGpuKernelMod() override = default; - - bool Init(const CNodePtr &kernel_node) override { - kernel_node_ = kernel_node; - residue_numbers = static_cast(GetAttr(kernel_node, "residue_numbers")); - - auto shape_start = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 0); - auto shape_end = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 1); - auto shape_atom_vel = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 2); - auto shape_atom_mass = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 3); - - ele_start *= SizeOf(shape_start); - ele_end *= SizeOf(shape_end); - ele_atom_vel *= SizeOf(shape_atom_vel); - ele_atom_mass *= SizeOf(shape_atom_mass); - - InitSizeLists(); - return true; - } - - bool Launch(const std::vector &inputs, const std::vector &, - const std::vector &outputs, void *stream_ptr) override { - auto start = GetDeviceAddress(inputs, 0); - auto end = GetDeviceAddress(inputs, 1); - auto atom_vel_f = GetDeviceAddress(inputs, 2); - auto atom_mass = GetDeviceAddress(inputs, 3); - - auto ek = GetDeviceAddress(outputs, 0); - - MDTemperature(residue_numbers, start, end, atom_vel_f, atom_mass, ek, reinterpret_cast(stream_ptr)); - - return true; - } - - protected: - void InitSizeLists() override { - input_size_list_.push_back(ele_start * sizeof(T1)); - input_size_list_.push_back(ele_end * sizeof(T1)); - input_size_list_.push_back(ele_atom_vel * sizeof(T)); - input_size_list_.push_back(ele_atom_mass * sizeof(T)); - - output_size_list_.push_back(residue_numbers * sizeof(T)); - } - - private: - size_t ele_start = 1; - size_t ele_end = 1; - size_t ele_atom_vel = 1; - size_t ele_atom_mass = 1; - - int residue_numbers; -}; -} // namespace kernel -} // namespace mindspore -#endif // MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONG_COMMON_MDTEMPERATURE_KERNEL_H_ diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/common/total_c6_get_kernel.cc b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/common/total_c6_get_kernel.cc deleted file mode 100644 index d1411045db9..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/common/total_c6_get_kernel.cc +++ /dev/null @@ -1,30 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/** - * Note: - * Totalc6get. This is an experimental interface that is subject to change and/or deletion. - */ - -#include "plugin/device/gpu/kernel/sponge/common/total_c6_get_kernel.h" - -namespace mindspore { -namespace kernel { -MS_REG_GPU_KERNEL_TWO( - Totalc6get, - KernelAttr().AddInputAttr(kNumberTypeInt32).AddInputAttr(kNumberTypeFloat32).AddOutputAttr(kNumberTypeFloat32), - TotalC6GetGpuKernelMod, float, int) -} // namespace kernel -} // namespace mindspore diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/common/total_c6_get_kernel.h b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/common/total_c6_get_kernel.h deleted file mode 100644 index 3dc019a64b2..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/common/total_c6_get_kernel.h +++ /dev/null @@ -1,79 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONG_COMMON_TOTAL_C6_GET_KERNEL_H_ -#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONG_COMMON_TOTAL_C6_GET_KERNEL_H_ - -#include -#include -#include -#include -#include "plugin/device/gpu/kernel/gpu_kernel.h" -#include "plugin/device/gpu/kernel/gpu_kernel_factory.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" -#include "plugin/device/gpu/kernel/cuda_impl/sponge/common/total_c6_get_impl.cuh" - -namespace mindspore { -namespace kernel { -template -class TotalC6GetGpuKernelMod : public DeprecatedNativeGpuKernelMod { - public: - TotalC6GetGpuKernelMod() : ele_atom_lj_type(1) {} - ~TotalC6GetGpuKernelMod() override = default; - - bool Init(const CNodePtr &kernel_node) override { - kernel_node_ = kernel_node; - atom_numbers = static_cast(GetAttr(kernel_node, "atom_numbers")); - - auto shape_atom_lj_type = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 0); - auto shape_lj_b = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 1); - - ele_atom_lj_type *= SizeOf(shape_atom_lj_type); - ele_lj_b *= SizeOf(shape_lj_b); - - InitSizeLists(); - return true; - } - - bool Launch(const std::vector &inputs, const std::vector &, - const std::vector &outputs, void *stream_ptr) override { - auto atom_lj_type = GetDeviceAddress(inputs, 0); - auto lj_b = GetDeviceAddress(inputs, 1); - - auto factor = GetDeviceAddress(outputs, 0); - - total_c6_get(atom_numbers, atom_lj_type, lj_b, factor, reinterpret_cast(stream_ptr)); - - return true; - } - - protected: - void InitSizeLists() override { - input_size_list_.push_back(ele_atom_lj_type * sizeof(T1)); - input_size_list_.push_back(ele_lj_b * sizeof(T)); - - output_size_list_.push_back(sizeof(T)); - } - - private: - size_t ele_atom_lj_type = 1; - size_t ele_lj_b = 1; - - int atom_numbers; -}; -} // namespace kernel -} // namespace mindspore -#endif // MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONG_COMMON_GETCENTER_KERNEL_H_ diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/crdmcmap/cal_no_wrap_crd_kernel.cc b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/crdmcmap/cal_no_wrap_crd_kernel.cc deleted file mode 100644 index f39f46e7b94..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/crdmcmap/cal_no_wrap_crd_kernel.cc +++ /dev/null @@ -1,33 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/** - * Note: - * CalculateNowrapCrd. This is an experimental interface that is subject to change and/or deletion. - */ - -#include "plugin/device/gpu/kernel/sponge/crdmcmap/cal_no_wrap_crd_kernel.h" - -namespace mindspore { -namespace kernel { -MS_REG_GPU_KERNEL_TWO(CalculateNowrapCrd, - KernelAttr() - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeInt32) - .AddOutputAttr(kNumberTypeFloat32), - CalculateNoWrapCrdGpuKernelMod, float, int) -} // namespace kernel -} // namespace mindspore diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/crdmcmap/cal_no_wrap_crd_kernel.h b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/crdmcmap/cal_no_wrap_crd_kernel.h deleted file mode 100644 index 959c3d0674d..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/crdmcmap/cal_no_wrap_crd_kernel.h +++ /dev/null @@ -1,85 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONG_CRDMCMAP_CAL_NO_WRAP_CRD_KERNEL_H_ -#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONG_CRDMCMAP_CAL_NO_WRAP_CRD_KERNEL_H_ - -#include "plugin/device/gpu/kernel/cuda_impl/sponge/crdmcmap/cal_no_wrap_crd_impl.cuh" - -#include -#include -#include -#include - -#include "plugin/device/gpu/kernel/gpu_kernel.h" -#include "plugin/device/gpu/kernel/gpu_kernel_factory.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" - -namespace mindspore { -namespace kernel { -template -class CalculateNoWrapCrdGpuKernelMod : public DeprecatedNativeGpuKernelMod { - public: - CalculateNoWrapCrdGpuKernelMod() : ele_crd(1) {} - ~CalculateNoWrapCrdGpuKernelMod() override = default; - - bool Init(const CNodePtr &kernel_node) override { - // get bond_numbers - kernel_node_ = kernel_node; - atom_numbers = static_cast(GetAttr(kernel_node, "atom_numbers")); - auto shape_crd = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 0); - auto shape_box = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 1); - auto shape_box_map_times = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 2); - - ele_crd *= SizeOf(shape_crd); - ele_box *= SizeOf(shape_box); - ele_box_map_times *= SizeOf(shape_box_map_times); - - InitSizeLists(); - return true; - } - - bool Launch(const std::vector &inputs, const std::vector &, - const std::vector &outputs, void *stream_ptr) override { - auto crd = GetDeviceAddress(inputs, 0); - auto box = GetDeviceAddress(inputs, 1); - auto box_map_times = GetDeviceAddress(inputs, 2); - - auto nowrap_crd = GetDeviceAddress(outputs, 0); - - calculatenowrapcrd(atom_numbers, box_map_times, box, crd, nowrap_crd, reinterpret_cast(stream_ptr)); - return true; - } - - protected: - void InitSizeLists() override { - input_size_list_.push_back(ele_crd * sizeof(T)); - input_size_list_.push_back(ele_box * sizeof(T)); - input_size_list_.push_back(ele_box_map_times * sizeof(T1)); - - output_size_list_.push_back(3 * atom_numbers * sizeof(T)); - } - - private: - size_t ele_crd = 1; - size_t ele_box = 1; - size_t ele_box_map_times = 1; - - int atom_numbers; -}; -} // namespace kernel -} // namespace mindspore -#endif // MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONG_CRDMCMAP_CAL_NO_WRAP_CRD_KERNEL_H_ diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/crdmcmap/refresh_boxmaptimes_kernel.cc b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/crdmcmap/refresh_boxmaptimes_kernel.cc deleted file mode 100644 index 4e0c49179a5..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/crdmcmap/refresh_boxmaptimes_kernel.cc +++ /dev/null @@ -1,34 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/** - * Note: - * RefreshBoxmapTimes. This is an experimental interface that is subject to change and/or deletion. - */ - -#include "plugin/device/gpu/kernel/sponge/crdmcmap/refresh_boxmaptimes_kernel.h" - -namespace mindspore { -namespace kernel { -MS_REG_GPU_KERNEL_TWO(RefreshBoxmapTimes, - KernelAttr() - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeInt32) - .AddOutputAttr(kNumberTypeFloat32), - RefreshBoxmaptimesGpuKernelMod, float, int) -} // namespace kernel -} // namespace mindspore diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/crdmcmap/refresh_boxmaptimes_kernel.h b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/crdmcmap/refresh_boxmaptimes_kernel.h deleted file mode 100644 index 8e8e9b156ca..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/crdmcmap/refresh_boxmaptimes_kernel.h +++ /dev/null @@ -1,89 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONG_CRDMCMAP_REFRESH_BOXMAPTIMES_KERNEL_H_ -#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONG_CRDMCMAP_REFRESH_BOXMAPTIMES_KERNEL_H_ - -#include "plugin/device/gpu/kernel/cuda_impl/sponge/crdmcmap/refresh_boxmaptimes_impl.cuh" - -#include -#include -#include -#include - -#include "plugin/device/gpu/kernel/gpu_kernel.h" -#include "plugin/device/gpu/kernel/gpu_kernel_factory.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" - -namespace mindspore { -namespace kernel { -template -class RefreshBoxmaptimesGpuKernelMod : public DeprecatedNativeGpuKernelMod { - public: - RefreshBoxmaptimesGpuKernelMod() : ele_crd(1) {} - ~RefreshBoxmaptimesGpuKernelMod() override = default; - - bool Init(const CNodePtr &kernel_node) override { - // get bond_numbers - kernel_node_ = kernel_node; - atom_numbers = static_cast(GetAttr(kernel_node, "atom_numbers")); - auto shape_crd = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 0); - auto shape_old_crd = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 1); - auto shape_box_length_inverse = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 2); - auto shape_box_map_times = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 3); - - ele_crd *= SizeOf(shape_crd); - ele_old_crd *= SizeOf(shape_old_crd); - ele_box_length_inverse *= SizeOf(shape_box_length_inverse); - ele_box_map_times *= SizeOf(shape_box_map_times); - - InitSizeLists(); - return true; - } - - bool Launch(const std::vector &inputs, const std::vector &, - const std::vector &outputs, void *stream_ptr) override { - auto crd = GetDeviceAddress(inputs, 0); - auto old_crd = GetDeviceAddress(inputs, 1); - auto box_length_inverse = GetDeviceAddress(inputs, 2); - auto box_map_times = GetDeviceAddress(inputs, 3); - - refresh_boxmaptimes(atom_numbers, box_length_inverse, crd, old_crd, box_map_times, - reinterpret_cast(stream_ptr)); - return true; - } - - protected: - void InitSizeLists() override { - input_size_list_.push_back(ele_crd * sizeof(T)); - input_size_list_.push_back(ele_old_crd * sizeof(T)); - input_size_list_.push_back(ele_box_length_inverse * sizeof(T)); - input_size_list_.push_back(ele_box_map_times * sizeof(T1)); - - output_size_list_.push_back(sizeof(T)); - } - - private: - size_t ele_crd = 1; - size_t ele_box_length_inverse = 1; - size_t ele_old_crd = 1; - size_t ele_box_map_times = 1; - - int atom_numbers; -}; -} // namespace kernel -} // namespace mindspore -#endif // MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONG_CRDMCMAP_CAL_NO_WRAP_CRD_KERNEL_H_ diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/dihedral/dihedral_atom_energy_kernel.cc b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/dihedral/dihedral_atom_energy_kernel.cc deleted file mode 100644 index 0af1b5608db..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/dihedral/dihedral_atom_energy_kernel.cc +++ /dev/null @@ -1,37 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "plugin/device/gpu/kernel/sponge/dihedral/dihedral_atom_energy_kernel.h" - -namespace mindspore { -namespace kernel { -MS_REG_GPU_KERNEL_TWO(DihedralAtomEnergy, - KernelAttr() - .AddInputAttr(kNumberTypeUInt32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeFloat32) - .AddOutputAttr(kNumberTypeFloat32), - DihedralAtomEnergyGpuKernelMod, float, int) -} // namespace kernel -} // namespace mindspore diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/dihedral/dihedral_atom_energy_kernel.h b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/dihedral/dihedral_atom_energy_kernel.h deleted file mode 100644 index a51c6f230cc..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/dihedral/dihedral_atom_energy_kernel.h +++ /dev/null @@ -1,119 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONGE_DIHEDRAL_DIHEDRAL_ATOM_ENERGY_KERNEL_H_ -#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONGE_DIHEDRAL_DIHEDRAL_ATOM_ENERGY_KERNEL_H_ -#include -#include -#include -#include -#include "plugin/device/gpu/kernel/gpu_kernel.h" -#include "plugin/device/gpu/kernel/gpu_kernel_factory.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" -#include "plugin/device/gpu/kernel/cuda_impl/sponge/dihedral/dihedral_atom_energy_impl.cuh" -namespace mindspore { -namespace kernel { -template -class DihedralAtomEnergyGpuKernelMod : public DeprecatedNativeGpuKernelMod { - public: - DihedralAtomEnergyGpuKernelMod() : ele_uint_crd(1) {} - ~DihedralAtomEnergyGpuKernelMod() override = default; - - bool Init(const CNodePtr &kernel_node) override { - kernel_node_ = kernel_node; - dihedral_numbers = static_cast(GetAttr(kernel_node, "dihedral_numbers")); - auto shape_uint_crd = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 0); - auto shape_scaler = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 1); - auto shape_atom_a = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 2); - auto shape_atom_b = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 3); - auto shape_atom_c = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 4); - auto shape_atom_d = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 5); - auto shape_ipn = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 6); - auto shape_pk = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 7); - auto shape_gamc = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 8); - auto shape_gams = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 9); - auto shape_pn = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 10); - - ele_uint_crd *= SizeOf(shape_uint_crd); - ele_scaler *= SizeOf(shape_scaler); - ele_atom_a *= SizeOf(shape_atom_a); - ele_atom_b *= SizeOf(shape_atom_b); - ele_atom_c *= SizeOf(shape_atom_c); - ele_atom_d *= SizeOf(shape_atom_d); - ele_ipn *= SizeOf(shape_ipn); - ele_pk *= SizeOf(shape_pk); - ele_gamc *= SizeOf(shape_gamc); - ele_gams *= SizeOf(shape_gams); - ele_pn *= SizeOf(shape_pn); - InitSizeLists(); - return true; - } - - bool Launch(const std::vector &inputs, const std::vector &, - const std::vector &outputs, void *stream_ptr) override { - auto uint_crd_f = GetDeviceAddress(inputs, 0); - auto scaler_f = GetDeviceAddress(inputs, 1); - auto atom_a = GetDeviceAddress(inputs, 2); - auto atom_b = GetDeviceAddress(inputs, 3); - auto atom_c = GetDeviceAddress(inputs, 4); - auto atom_d = GetDeviceAddress(inputs, 5); - auto ipn = GetDeviceAddress(inputs, 6); - auto pk = GetDeviceAddress(inputs, 7); - auto gamc = GetDeviceAddress(inputs, 8); - auto gams = GetDeviceAddress(inputs, 9); - auto pn = GetDeviceAddress(inputs, 10); - - auto ene = GetDeviceAddress(outputs, 0); - DihedralAtomEnergy(dihedral_numbers, ele_uint_crd, uint_crd_f, scaler_f, atom_a, atom_b, atom_c, atom_d, ipn, pk, - gamc, gams, pn, ene, reinterpret_cast(stream_ptr)); - return true; - } - - protected: - void InitSizeLists() override { - input_size_list_.push_back(ele_uint_crd * sizeof(T1)); - input_size_list_.push_back(ele_scaler * sizeof(T)); - input_size_list_.push_back(ele_atom_a * sizeof(T1)); - input_size_list_.push_back(ele_atom_b * sizeof(T1)); - input_size_list_.push_back(ele_atom_c * sizeof(T1)); - input_size_list_.push_back(ele_atom_d * sizeof(T1)); - input_size_list_.push_back(ele_ipn * sizeof(T1)); - input_size_list_.push_back(ele_pk * sizeof(T)); - input_size_list_.push_back(ele_gamc * sizeof(T)); - input_size_list_.push_back(ele_gams * sizeof(T)); - input_size_list_.push_back(ele_pn * sizeof(T)); - - output_size_list_.push_back(ele_uint_crd * sizeof(T)); - } - - private: - size_t ele_uint_crd = 1; - size_t ele_scaler = 1; - size_t ele_atom_a = 1; - size_t ele_atom_b = 1; - size_t ele_atom_c = 1; - size_t ele_atom_d = 1; - size_t ele_ipn = 1; - size_t ele_pk = 1; - size_t ele_gamc = 1; - size_t ele_gams = 1; - size_t ele_pn = 1; - - int dihedral_numbers; -}; -} // namespace kernel -} // namespace mindspore -#endif diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/dihedral/dihedral_energy_kernel.cc b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/dihedral/dihedral_energy_kernel.cc deleted file mode 100644 index af84b47b29b..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/dihedral/dihedral_energy_kernel.cc +++ /dev/null @@ -1,37 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "plugin/device/gpu/kernel/sponge/dihedral/dihedral_energy_kernel.h" - -namespace mindspore { -namespace kernel { -MS_REG_GPU_KERNEL_TWO(DihedralEnergy, - KernelAttr() - .AddInputAttr(kNumberTypeUInt32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeFloat32) - .AddOutputAttr(kNumberTypeFloat32), - DihedralEnergyGpuKernelMod, float, int) -} // namespace kernel -} // namespace mindspore diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/dihedral/dihedral_energy_kernel.h b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/dihedral/dihedral_energy_kernel.h deleted file mode 100644 index e5a42d440f3..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/dihedral/dihedral_energy_kernel.h +++ /dev/null @@ -1,119 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONGE_DIHEDRAL_DIHEDRAL_ENERGY_KERNEL_H_ -#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONGE_DIHEDRAL_DIHEDRAL_ENERGY_KERNEL_H_ -#include -#include -#include -#include -#include "plugin/device/gpu/kernel/gpu_kernel.h" -#include "plugin/device/gpu/kernel/gpu_kernel_factory.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" -#include "plugin/device/gpu/kernel/cuda_impl/sponge/dihedral/dihedral_energy_impl.cuh" -namespace mindspore { -namespace kernel { -template -class DihedralEnergyGpuKernelMod : public DeprecatedNativeGpuKernelMod { - public: - DihedralEnergyGpuKernelMod() : ele_uint_crd(1) {} - ~DihedralEnergyGpuKernelMod() override = default; - - bool Init(const CNodePtr &kernel_node) override { - kernel_node_ = kernel_node; - dihedral_numbers = static_cast(GetAttr(kernel_node, "dihedral_numbers")); - auto shape_uint_crd = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 0); - auto shape_scaler = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 1); - auto shape_atom_a = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 2); - auto shape_atom_b = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 3); - auto shape_atom_c = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 4); - auto shape_atom_d = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 5); - auto shape_ipn = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 6); - auto shape_pk = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 7); - auto shape_gamc = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 8); - auto shape_gams = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 9); - auto shape_pn = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 10); - - ele_uint_crd *= SizeOf(shape_uint_crd); - ele_scaler *= SizeOf(shape_scaler); - ele_atom_a *= SizeOf(shape_atom_a); - ele_atom_b *= SizeOf(shape_atom_b); - ele_atom_c *= SizeOf(shape_atom_c); - ele_atom_d *= SizeOf(shape_atom_d); - ele_ipn *= SizeOf(shape_ipn); - ele_pk *= SizeOf(shape_pk); - ele_gamc *= SizeOf(shape_gamc); - ele_gams *= SizeOf(shape_gams); - ele_pn *= SizeOf(shape_pn); - InitSizeLists(); - return true; - } - - bool Launch(const std::vector &inputs, const std::vector &, - const std::vector &outputs, void *stream_ptr) override { - auto uint_crd_f = GetDeviceAddress(inputs, 0); - auto scaler_f = GetDeviceAddress(inputs, 1); - auto atom_a = GetDeviceAddress(inputs, 2); - auto atom_b = GetDeviceAddress(inputs, 3); - auto atom_c = GetDeviceAddress(inputs, 4); - auto atom_d = GetDeviceAddress(inputs, 5); - auto ipn = GetDeviceAddress(inputs, 6); - auto pk = GetDeviceAddress(inputs, 7); - auto gamc = GetDeviceAddress(inputs, 8); - auto gams = GetDeviceAddress(inputs, 9); - auto pn = GetDeviceAddress(inputs, 10); - - auto ene = GetDeviceAddress(outputs, 0); - DihedralEnergy(dihedral_numbers, uint_crd_f, scaler_f, atom_a, atom_b, atom_c, atom_d, ipn, pk, gamc, gams, pn, ene, - reinterpret_cast(stream_ptr)); - return true; - } - - protected: - void InitSizeLists() override { - input_size_list_.push_back(ele_uint_crd * sizeof(T1)); - input_size_list_.push_back(ele_scaler * sizeof(T)); - input_size_list_.push_back(ele_atom_a * sizeof(T1)); - input_size_list_.push_back(ele_atom_b * sizeof(T1)); - input_size_list_.push_back(ele_atom_c * sizeof(T1)); - input_size_list_.push_back(ele_atom_d * sizeof(T1)); - input_size_list_.push_back(ele_ipn * sizeof(T1)); - input_size_list_.push_back(ele_pk * sizeof(T)); - input_size_list_.push_back(ele_gamc * sizeof(T)); - input_size_list_.push_back(ele_gams * sizeof(T)); - input_size_list_.push_back(ele_pn * sizeof(T)); - - output_size_list_.push_back(dihedral_numbers * sizeof(T)); - } - - private: - size_t ele_uint_crd = 1; - size_t ele_scaler = 1; - size_t ele_atom_a = 1; - size_t ele_atom_b = 1; - size_t ele_atom_c = 1; - size_t ele_atom_d = 1; - size_t ele_ipn = 1; - size_t ele_pk = 1; - size_t ele_gamc = 1; - size_t ele_gams = 1; - size_t ele_pn = 1; - - int dihedral_numbers; -}; -} // namespace kernel -} // namespace mindspore -#endif diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/dihedral/dihedral_force_kernel.cc b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/dihedral/dihedral_force_kernel.cc deleted file mode 100644 index 6802a89c475..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/dihedral/dihedral_force_kernel.cc +++ /dev/null @@ -1,37 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "plugin/device/gpu/kernel/sponge/dihedral/dihedral_force_kernel.h" - -namespace mindspore { -namespace kernel { -MS_REG_GPU_KERNEL_TWO(DihedralForce, - KernelAttr() - .AddInputAttr(kNumberTypeUInt32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeFloat32) - .AddOutputAttr(kNumberTypeFloat32), - DihedralForceGpuKernelMod, float, int) -} // namespace kernel -} // namespace mindspore diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/dihedral/dihedral_force_kernel.h b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/dihedral/dihedral_force_kernel.h deleted file mode 100644 index 945c8a5588a..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/dihedral/dihedral_force_kernel.h +++ /dev/null @@ -1,119 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONGE_DIHEDRAL_DIHEDRAL_FORCE_KERNEL_H_ -#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONGE_DIHEDRAL_DIHEDRAL_FORCE_KERNEL_H_ -#include -#include -#include -#include -#include "plugin/device/gpu/kernel/gpu_kernel.h" -#include "plugin/device/gpu/kernel/gpu_kernel_factory.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" -#include "plugin/device/gpu/kernel/cuda_impl/sponge/dihedral/dihedral_force_impl.cuh" -namespace mindspore { -namespace kernel { -template -class DihedralForceGpuKernelMod : public DeprecatedNativeGpuKernelMod { - public: - DihedralForceGpuKernelMod() : ele_uint_crd(1) {} - ~DihedralForceGpuKernelMod() override = default; - - bool Init(const CNodePtr &kernel_node) override { - kernel_node_ = kernel_node; - dihedral_numbers = static_cast(GetAttr(kernel_node, "dihedral_numbers")); - auto shape_uint_crd = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 0); - auto shape_scaler = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 1); - auto shape_atom_a = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 2); - auto shape_atom_b = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 3); - auto shape_atom_c = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 4); - auto shape_atom_d = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 5); - auto shape_ipn = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 6); - auto shape_pk = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 7); - auto shape_gamc = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 8); - auto shape_gams = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 9); - auto shape_pn = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 10); - - ele_uint_crd *= SizeOf(shape_uint_crd); - ele_scaler *= SizeOf(shape_scaler); - ele_atom_a *= SizeOf(shape_atom_a); - ele_atom_b *= SizeOf(shape_atom_b); - ele_atom_c *= SizeOf(shape_atom_c); - ele_atom_d *= SizeOf(shape_atom_d); - ele_ipn *= SizeOf(shape_ipn); - ele_pk *= SizeOf(shape_pk); - ele_gamc *= SizeOf(shape_gamc); - ele_gams *= SizeOf(shape_gams); - ele_pn *= SizeOf(shape_pn); - InitSizeLists(); - return true; - } - - bool Launch(const std::vector &inputs, const std::vector &, - const std::vector &outputs, void *stream_ptr) override { - auto uint_crd_f = GetDeviceAddress(inputs, 0); - auto scaler_f = GetDeviceAddress(inputs, 1); - auto atom_a = GetDeviceAddress(inputs, 2); - auto atom_b = GetDeviceAddress(inputs, 3); - auto atom_c = GetDeviceAddress(inputs, 4); - auto atom_d = GetDeviceAddress(inputs, 5); - auto ipn = GetDeviceAddress(inputs, 6); - auto pk = GetDeviceAddress(inputs, 7); - auto gamc = GetDeviceAddress(inputs, 8); - auto gams = GetDeviceAddress(inputs, 9); - auto pn = GetDeviceAddress(inputs, 10); - - auto frc_f = GetDeviceAddress(outputs, 0); - DihedralForce(dihedral_numbers, ele_uint_crd, uint_crd_f, scaler_f, atom_a, atom_b, atom_c, atom_d, ipn, pk, gamc, - gams, pn, frc_f, reinterpret_cast(stream_ptr)); - return true; - } - - protected: - void InitSizeLists() override { - input_size_list_.push_back(ele_uint_crd * sizeof(T1)); - input_size_list_.push_back(ele_scaler * sizeof(T)); - input_size_list_.push_back(ele_atom_a * sizeof(T1)); - input_size_list_.push_back(ele_atom_b * sizeof(T1)); - input_size_list_.push_back(ele_atom_c * sizeof(T1)); - input_size_list_.push_back(ele_atom_d * sizeof(T1)); - input_size_list_.push_back(ele_ipn * sizeof(T1)); - input_size_list_.push_back(ele_pk * sizeof(T)); - input_size_list_.push_back(ele_gamc * sizeof(T)); - input_size_list_.push_back(ele_gams * sizeof(T)); - input_size_list_.push_back(ele_pn * sizeof(T)); - - output_size_list_.push_back(ele_uint_crd * 3 * sizeof(T)); - } - - private: - size_t ele_uint_crd = 1; - size_t ele_scaler = 1; - size_t ele_atom_a = 1; - size_t ele_atom_b = 1; - size_t ele_atom_c = 1; - size_t ele_atom_d = 1; - size_t ele_ipn = 1; - size_t ele_pk = 1; - size_t ele_gamc = 1; - size_t ele_gams = 1; - size_t ele_pn = 1; - - int dihedral_numbers; -}; -} // namespace kernel -} // namespace mindspore -#endif diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/dihedral/dihedral_force_with_atom_energy_kernel.cc b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/dihedral/dihedral_force_with_atom_energy_kernel.cc deleted file mode 100644 index 04436b949a9..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/dihedral/dihedral_force_with_atom_energy_kernel.cc +++ /dev/null @@ -1,38 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "plugin/device/gpu/kernel/sponge/dihedral/dihedral_force_with_atom_energy_kernel.h" - -namespace mindspore { -namespace kernel { -MS_REG_GPU_KERNEL_TWO(DihedralForceWithAtomEnergy, - KernelAttr() - .AddInputAttr(kNumberTypeUInt32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeFloat32) - .AddOutputAttr(kNumberTypeFloat32) - .AddOutputAttr(kNumberTypeFloat32), - DihedralForceWithAtomEnergyGpuKernelMod, float, int) -} // namespace kernel -} // namespace mindspore diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/dihedral/dihedral_force_with_atom_energy_kernel.h b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/dihedral/dihedral_force_with_atom_energy_kernel.h deleted file mode 100644 index df2291272f0..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/dihedral/dihedral_force_with_atom_energy_kernel.h +++ /dev/null @@ -1,121 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONGE_DIHEDRAL_DIHEDRAL_FORCE_WITH_ATOM_ENERGY_KERNEL_H_ -#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONGE_DIHEDRAL_DIHEDRAL_FORCE_WITH_ATOM_ENERGY_KERNEL_H_ -#include -#include -#include -#include -#include "plugin/device/gpu/kernel/gpu_kernel.h" -#include "plugin/device/gpu/kernel/gpu_kernel_factory.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" -#include "plugin/device/gpu/kernel/cuda_impl/sponge/dihedral/dihedral_force_with_atom_energy_impl.cuh" -namespace mindspore { -namespace kernel { -template -class DihedralForceWithAtomEnergyGpuKernelMod : public DeprecatedNativeGpuKernelMod { - public: - DihedralForceWithAtomEnergyGpuKernelMod() : ele_uint_crd(1) {} - ~DihedralForceWithAtomEnergyGpuKernelMod() override = default; - - bool Init(const CNodePtr &kernel_node) override { - kernel_node_ = kernel_node; - dihedral_numbers = static_cast(GetAttr(kernel_node, "dihedral_numbers")); - auto shape_uint_crd = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 0); - auto shape_scaler = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 1); - auto shape_atom_a = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 2); - auto shape_atom_b = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 3); - auto shape_atom_c = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 4); - auto shape_atom_d = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 5); - auto shape_ipn = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 6); - auto shape_pk = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 7); - auto shape_gamc = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 8); - auto shape_gams = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 9); - auto shape_pn = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 10); - - ele_uint_crd *= SizeOf(shape_uint_crd); - ele_scaler *= SizeOf(shape_scaler); - ele_atom_a *= SizeOf(shape_atom_a); - ele_atom_b *= SizeOf(shape_atom_b); - ele_atom_c *= SizeOf(shape_atom_c); - ele_atom_d *= SizeOf(shape_atom_d); - ele_ipn *= SizeOf(shape_ipn); - ele_pk *= SizeOf(shape_pk); - ele_gamc *= SizeOf(shape_gamc); - ele_gams *= SizeOf(shape_gams); - ele_pn *= SizeOf(shape_pn); - InitSizeLists(); - return true; - } - - bool Launch(const std::vector &inputs, const std::vector &, - const std::vector &outputs, void *stream_ptr) override { - auto uint_crd_f = GetDeviceAddress(inputs, 0); - auto scaler_f = GetDeviceAddress(inputs, 1); - auto atom_a = GetDeviceAddress(inputs, 2); - auto atom_b = GetDeviceAddress(inputs, 3); - auto atom_c = GetDeviceAddress(inputs, 4); - auto atom_d = GetDeviceAddress(inputs, 5); - auto ipn = GetDeviceAddress(inputs, 6); - auto pk = GetDeviceAddress(inputs, 7); - auto gamc = GetDeviceAddress(inputs, 8); - auto gams = GetDeviceAddress(inputs, 9); - auto pn = GetDeviceAddress(inputs, 10); - - auto frc_f = GetDeviceAddress(outputs, 0); - auto ene = GetDeviceAddress(outputs, 1); - DihedralForceWithAtomEnergy(dihedral_numbers, ele_uint_crd, uint_crd_f, scaler_f, atom_a, atom_b, atom_c, atom_d, - ipn, pk, gamc, gams, pn, frc_f, ene, reinterpret_cast(stream_ptr)); - return true; - } - - protected: - void InitSizeLists() override { - input_size_list_.push_back(ele_uint_crd * sizeof(T1)); - input_size_list_.push_back(ele_scaler * sizeof(T)); - input_size_list_.push_back(ele_atom_a * sizeof(T1)); - input_size_list_.push_back(ele_atom_b * sizeof(T1)); - input_size_list_.push_back(ele_atom_c * sizeof(T1)); - input_size_list_.push_back(ele_atom_d * sizeof(T1)); - input_size_list_.push_back(ele_ipn * sizeof(T1)); - input_size_list_.push_back(ele_pk * sizeof(T)); - input_size_list_.push_back(ele_gamc * sizeof(T)); - input_size_list_.push_back(ele_gams * sizeof(T)); - input_size_list_.push_back(ele_pn * sizeof(T)); - - output_size_list_.push_back(ele_uint_crd * 3 * sizeof(T)); - output_size_list_.push_back(ele_uint_crd * sizeof(T)); - } - - private: - size_t ele_uint_crd = 1; - size_t ele_scaler = 1; - size_t ele_atom_a = 1; - size_t ele_atom_b = 1; - size_t ele_atom_c = 1; - size_t ele_atom_d = 1; - size_t ele_ipn = 1; - size_t ele_pk = 1; - size_t ele_gamc = 1; - size_t ele_gams = 1; - size_t ele_pn = 1; - - int dihedral_numbers; -}; -} // namespace kernel -} // namespace mindspore -#endif diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/lj/lj_direct_cf_force_with_lj_virial_direct_cf_energy_kernel.cc b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/lj/lj_direct_cf_force_with_lj_virial_direct_cf_energy_kernel.cc deleted file mode 100644 index 4faf780a09f..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/lj/lj_direct_cf_force_with_lj_virial_direct_cf_energy_kernel.cc +++ /dev/null @@ -1,39 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/** - * Note: - * LJForceWithVirialEnergy. This is an experimental interface that is subject to change and/or deletion. - */ -#include "plugin/device/gpu/kernel/sponge/lj/lj_direct_cf_force_with_lj_virial_direct_cf_energy_kernel.h" - -namespace mindspore { -namespace kernel { -MS_REG_GPU_KERNEL_THREE(LJForceWithVirialEnergy, - KernelAttr() - .AddInputAttr(kNumberTypeUInt32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeFloat32) - .AddOutputAttr(kNumberTypeFloat32) - .AddOutputAttr(kNumberTypeFloat32) - .AddOutputAttr(kNumberTypeFloat32), - LJForceWithVirialEnergyGpuKernelMod, float, int, unsigned int) -} // namespace kernel -} // namespace mindspore diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/lj/lj_direct_cf_force_with_lj_virial_direct_cf_energy_kernel.h b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/lj/lj_direct_cf_force_with_lj_virial_direct_cf_energy_kernel.h deleted file mode 100644 index 8c562943f2e..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/lj/lj_direct_cf_force_with_lj_virial_direct_cf_energy_kernel.h +++ /dev/null @@ -1,135 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/** - * Note: - * LJForceWithVirialEnergy. This is an experimental interface that is subject to change and/or deletion. - */ -#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_LJ_DIRECT_CF_FORCE_WITH_LJ_VIRIAL_DIRECT_CF_ENERGY_KERNEL_H_ -#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_LJ_DIRECT_CF_FORCE_WITH_LJ_VIRIAL_DIRECT_CF_ENERGY_KERNEL_H_ -#include -#include -#include -#include -#include "plugin/device/gpu/kernel/gpu_kernel.h" -#include "plugin/device/gpu/kernel/gpu_kernel_factory.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" -#include "plugin/device/gpu/kernel/cuda_impl/sponge/lj/lj_direct_cf_force_with_lj_virial_direct_cf_energy_impl.cuh" -namespace mindspore { -namespace kernel { -template -class LJForceWithVirialEnergyGpuKernelMod : public DeprecatedNativeGpuKernelMod { - public: - LJForceWithVirialEnergyGpuKernelMod() : ele_uint_crd(1) {} - ~LJForceWithVirialEnergyGpuKernelMod() override = default; - - bool Init(const CNodePtr &kernel_node) override { - kernel_node_ = kernel_node; - atom_numbers = static_cast(GetAttr(kernel_node, "atom_numbers")); - max_neighbor_numbers = static_cast(GetAttr(kernel_node, "max_neighbor_numbers")); - cutoff = static_cast(GetAttr(kernel_node, "cutoff")); - pme_beta = static_cast(GetAttr(kernel_node, "pme_beta")); - - auto shape_uint_crd = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 0); - auto shape_LJtype = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 1); - auto shape_charge = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 2); - auto shape_scaler = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 3); - auto shape_nl_numbers = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 4); - auto shape_nl_serial = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 5); - auto shape_d_LJ_a = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 6); - auto shape_d_LJ_b = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 7); - - ele_uint_crd *= SizeOf(shape_uint_crd); - ele_LJtype *= SizeOf(shape_LJtype); - ele_charge *= SizeOf(shape_charge); - ele_scaler *= SizeOf(shape_scaler); - ele_d_LJ_a *= SizeOf(shape_d_LJ_a); - ele_d_LJ_b *= SizeOf(shape_d_LJ_b); - - InitSizeLists(); - return true; - } - - bool Launch(const std::vector &inputs, const std::vector &workspace, - const std::vector &outputs, void *stream_ptr) override { - auto uint_crd = GetDeviceAddress(inputs, 0); - auto LJtype = GetDeviceAddress(inputs, 1); - auto charge = GetDeviceAddress(inputs, 2); - auto scaler = GetDeviceAddress(inputs, 3); - auto nl_numbers = GetDeviceAddress(inputs, 4); - auto nl_serial = GetDeviceAddress(inputs, 5); - auto d_LJ_a = GetDeviceAddress(inputs, 6); - auto d_LJ_b = GetDeviceAddress(inputs, 7); - - auto uint_crd_with_LJ = GetDeviceAddress(workspace, 0); - auto nl = GetDeviceAddress(workspace, 1); - - auto frc = GetDeviceAddress(outputs, 0); - auto atom_lj_virial = GetDeviceAddress(outputs, 1); - auto atom_energy = GetDeviceAddress(outputs, 2); - LJ_Direct_CF_Force_With_LJ_Virial_Direct_CF_Energy(atom_numbers, cutoff, pme_beta, uint_crd, LJtype, charge, scaler, - uint_crd_with_LJ, nl_numbers, nl_serial, nl, d_LJ_a, d_LJ_b, frc, - atom_lj_virial, atom_energy, max_neighbor_numbers, - reinterpret_cast(stream_ptr)); - return true; - } - - protected: - void InitSizeLists() override { - input_size_list_.push_back(ele_uint_crd * sizeof(T2)); - input_size_list_.push_back(ele_LJtype * sizeof(T1)); - input_size_list_.push_back(ele_charge * sizeof(T)); - input_size_list_.push_back(ele_scaler * sizeof(T)); - input_size_list_.push_back(atom_numbers * sizeof(T1)); - input_size_list_.push_back(max_neighbor_numbers * sizeof(T1)); - input_size_list_.push_back(ele_d_LJ_a * sizeof(T)); - input_size_list_.push_back(ele_d_LJ_b * sizeof(T)); - - workspace_size_list_.push_back(atom_numbers * max_neighbor_numbers * sizeof(T1)); - workspace_size_list_.push_back(atom_numbers * sizeof(UINT_VECTOR_LJ_TYPE)); - - output_size_list_.push_back(atom_numbers * 3 * sizeof(T)); - output_size_list_.push_back(atom_numbers * sizeof(T)); - output_size_list_.push_back(atom_numbers * sizeof(T)); - } - - private: - size_t ele_uint_crd = 1; - size_t ele_LJtype = 1; - size_t ele_charge = 1; - size_t ele_scaler = 1; - size_t ele_nl = 1; - size_t ele_d_LJ_a = 1; - size_t ele_d_LJ_b = 1; - - int atom_numbers; - int max_neighbor_numbers; - float pme_beta; - float cutoff; - struct UINT_VECTOR_LJ_TYPE { - unsigned int uint_x; - unsigned int uint_y; - unsigned int uint_z; - int LJ_type; - float charge; - }; - struct NEIGHBOR_LIST { - int atom_numbers; - int *atom_serial; - }; -}; -} // namespace kernel -} // namespace mindspore -#endif diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/lj/lj_energy_kernel.cc b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/lj/lj_energy_kernel.cc deleted file mode 100644 index 361e853cf58..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/lj/lj_energy_kernel.cc +++ /dev/null @@ -1,33 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#include "plugin/device/gpu/kernel/sponge/lj/lj_energy_kernel.h" - -namespace mindspore { -namespace kernel { -MS_REG_GPU_KERNEL_TWO(LJEnergy, - KernelAttr() - .AddInputAttr(kNumberTypeUInt32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeFloat32) - .AddOutputAttr(kNumberTypeFloat32), - LJEnergyGpuKernelMod, float, int) -} // namespace kernel -} // namespace mindspore diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/lj/lj_energy_kernel.h b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/lj/lj_energy_kernel.h deleted file mode 100644 index 6a5c8c217fd..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/lj/lj_energy_kernel.h +++ /dev/null @@ -1,122 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONGE_LJ_LJ_ENERGY_KERNEL_H_ -#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONGE_LJ_LJ_ENERGY_KERNEL_H_ -#include -#include -#include -#include -#include "plugin/device/gpu/kernel/gpu_kernel.h" -#include "plugin/device/gpu/kernel/gpu_kernel_factory.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" -#include "plugin/device/gpu/kernel/cuda_impl/sponge/lj/lj_energy_impl.cuh" -namespace mindspore { -namespace kernel { -template -class LJEnergyGpuKernelMod : public DeprecatedNativeGpuKernelMod { - public: - LJEnergyGpuKernelMod() : ele_uint_crd(1) {} - ~LJEnergyGpuKernelMod() override = default; - - bool Init(const CNodePtr &kernel_node) override { - kernel_node_ = kernel_node; - atom_numbers = static_cast(GetAttr(kernel_node, "atom_numbers")); - cutoff_square = static_cast(GetAttr(kernel_node, "cutoff_square")); - - auto shape_uint_crd = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 0); - auto shape_LJtype = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 1); - auto shape_charge = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 2); - auto shape_scaler = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 3); - auto shape_nl_numbers = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 4); - auto shape_nl_serial = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 5); - auto shape_d_LJ_a = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 6); - auto shape_d_LJ_b = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 7); - - ele_uint_crd *= SizeOf(shape_uint_crd); - ele_LJtype *= SizeOf(shape_LJtype); - ele_charge *= SizeOf(shape_charge); - ele_scaler *= SizeOf(shape_scaler); - ele_d_LJ_a *= SizeOf(shape_d_LJ_a); - ele_d_LJ_b *= SizeOf(shape_d_LJ_b); - - InitSizeLists(); - return true; - } - - bool Launch(const std::vector &inputs, const std::vector &workspace, - const std::vector &outputs, void *stream_ptr) override { - auto uint_crd = GetDeviceAddress(inputs, 0); - auto LJtype = GetDeviceAddress(inputs, 1); - auto charge = GetDeviceAddress(inputs, 2); - auto scaler = GetDeviceAddress(inputs, 3); - auto nl_numbers = GetDeviceAddress(inputs, 4); - auto nl_serial = GetDeviceAddress(inputs, 5); - auto d_LJ_a = GetDeviceAddress(inputs, 6); - auto d_LJ_b = GetDeviceAddress(inputs, 7); - - auto uint_crd_with_LJ = GetDeviceAddress(workspace, 0); - auto nl = GetDeviceAddress(workspace, 1); - - auto d_LJ_energy_atom = GetDeviceAddress(outputs, 0); - LJEnergy(atom_numbers, cutoff_square, uint_crd, LJtype, charge, scaler, uint_crd_with_LJ, nl_numbers, nl_serial, nl, - d_LJ_a, d_LJ_b, d_LJ_energy_atom, reinterpret_cast(stream_ptr)); - return true; - } - - protected: - void InitSizeLists() override { - input_size_list_.push_back(ele_uint_crd * sizeof(T1)); - input_size_list_.push_back(ele_LJtype * sizeof(T1)); - input_size_list_.push_back(ele_charge * sizeof(T)); - input_size_list_.push_back(ele_scaler * sizeof(T)); - input_size_list_.push_back(atom_numbers * sizeof(T1)); - input_size_list_.push_back(max_nl_numbers * sizeof(T1)); - input_size_list_.push_back(ele_d_LJ_a * sizeof(T)); - input_size_list_.push_back(ele_d_LJ_b * sizeof(T)); - - workspace_size_list_.push_back(atom_numbers * max_nl_numbers * sizeof(T1)); - workspace_size_list_.push_back(atom_numbers * sizeof(UINT_VECTOR_LJ_TYPE)); - - output_size_list_.push_back(atom_numbers * sizeof(T)); - } - - private: - size_t ele_uint_crd = 1; - size_t ele_LJtype = 1; - size_t ele_charge = 1; - size_t ele_scaler = 1; - size_t ele_nl = 1; - size_t ele_d_LJ_a = 1; - size_t ele_d_LJ_b = 1; - - int atom_numbers; - float cutoff_square; - int max_nl_numbers = 800; - struct UINT_VECTOR_LJ_TYPE { - unsigned int uint_x; - unsigned int uint_y; - unsigned int uint_z; - int LJ_type; - float charge; - }; - struct NEIGHBOR_LIST { - int atom_numbers; - int *atom_serial; - }; -}; -} // namespace kernel -} // namespace mindspore -#endif diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/lj/lj_force_kernel.cc b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/lj/lj_force_kernel.cc deleted file mode 100644 index 0887aade6ad..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/lj/lj_force_kernel.cc +++ /dev/null @@ -1,33 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#include "plugin/device/gpu/kernel/sponge/lj/lj_force_kernel.h" - -namespace mindspore { -namespace kernel { -MS_REG_GPU_KERNEL_TWO(LJForce, - KernelAttr() - .AddInputAttr(kNumberTypeUInt32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeFloat32) - .AddOutputAttr(kNumberTypeFloat32), - LJForceGpuKernelMod, float, int) -} // namespace kernel -} // namespace mindspore diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/lj/lj_force_kernel.h b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/lj/lj_force_kernel.h deleted file mode 100644 index 9c1f12a82dd..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/lj/lj_force_kernel.h +++ /dev/null @@ -1,121 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONGE_LJ_LJ_FORCE_KERNEL_H_ -#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONGE_LJ_LJ_FORCE_KERNEL_H_ -#include -#include -#include -#include -#include "plugin/device/gpu/kernel/gpu_kernel.h" -#include "plugin/device/gpu/kernel/gpu_kernel_factory.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" -#include "plugin/device/gpu/kernel/cuda_impl/sponge/lj/lj_force_impl.cuh" -namespace mindspore { -namespace kernel { -template -class LJForceGpuKernelMod : public DeprecatedNativeGpuKernelMod { - public: - LJForceGpuKernelMod() : ele_uint_crd(1) {} - ~LJForceGpuKernelMod() override = default; - - bool Init(const CNodePtr &kernel_node) override { - kernel_node_ = kernel_node; - atom_numbers = static_cast(GetAttr(kernel_node, "atom_numbers")); - cutoff_square = static_cast(GetAttr(kernel_node, "cutoff_square")); - - auto shape_uint_crd = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 0); - auto shape_LJtype = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 1); - auto shape_charge = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 2); - auto shape_scaler = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 3); - auto shape_nl_numbers = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 4); - auto shape_nl_serial = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 5); - auto shape_d_LJ_a = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 6); - auto shape_d_LJ_b = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 7); - - ele_uint_crd *= SizeOf(shape_uint_crd); - ele_LJtype *= SizeOf(shape_LJtype); - ele_charge *= SizeOf(shape_charge); - ele_scaler *= SizeOf(shape_scaler); - ele_d_LJ_a *= SizeOf(shape_d_LJ_a); - ele_d_LJ_b *= SizeOf(shape_d_LJ_b); - - InitSizeLists(); - return true; - } - - bool Launch(const std::vector &inputs, const std::vector &workspace, - const std::vector &outputs, void *stream_ptr) override { - auto uint_crd = GetDeviceAddress(inputs, 0); - auto LJtype = GetDeviceAddress(inputs, 1); - auto charge = GetDeviceAddress(inputs, 2); - auto scaler = GetDeviceAddress(inputs, 3); - auto nl_numbers = GetDeviceAddress(inputs, 4); - auto nl_serial = GetDeviceAddress(inputs, 5); - auto d_LJ_a = GetDeviceAddress(inputs, 6); - auto d_LJ_b = GetDeviceAddress(inputs, 7); - - auto uint_crd_with_LJ = GetDeviceAddress(workspace, 0); - auto nl = GetDeviceAddress(workspace, 1); - - auto frc = GetDeviceAddress(outputs, 0); - LJForce(atom_numbers, cutoff_square, uint_crd, LJtype, charge, scaler, uint_crd_with_LJ, nl_numbers, nl_serial, nl, - d_LJ_a, d_LJ_b, frc, reinterpret_cast(stream_ptr)); - return true; - } - - protected: - void InitSizeLists() override { - input_size_list_.push_back(ele_uint_crd * sizeof(T1)); - input_size_list_.push_back(ele_LJtype * sizeof(T1)); - input_size_list_.push_back(ele_charge * sizeof(T)); - input_size_list_.push_back(ele_scaler * sizeof(T)); - input_size_list_.push_back(atom_numbers * sizeof(T1)); - input_size_list_.push_back(max_nl_numbers * sizeof(T1)); - input_size_list_.push_back(ele_d_LJ_a * sizeof(T)); - input_size_list_.push_back(ele_d_LJ_b * sizeof(T)); - - workspace_size_list_.push_back(atom_numbers * max_nl_numbers * sizeof(T1)); - workspace_size_list_.push_back(atom_numbers * sizeof(UINT_VECTOR_LJ_TYPE)); - - output_size_list_.push_back(atom_numbers * 3 * sizeof(T)); - } - - private: - size_t ele_uint_crd = 1; - size_t ele_LJtype = 1; - size_t ele_charge = 1; - size_t ele_scaler = 1; - size_t ele_d_LJ_a = 1; - size_t ele_d_LJ_b = 1; - - int atom_numbers; - float cutoff_square; - int max_nl_numbers = 800; - struct UINT_VECTOR_LJ_TYPE { - unsigned int uint_x; - unsigned int uint_y; - unsigned int uint_z; - int LJ_type; - float charge; - }; - struct NEIGHBOR_LIST { - int atom_numbers; - int *atom_serial; - }; -}; -} // namespace kernel -} // namespace mindspore -#endif diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/lj/lj_force_with_pme_direct_force_kernel.cc b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/lj/lj_force_with_pme_direct_force_kernel.cc deleted file mode 100644 index 20eb3981f51..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/lj/lj_force_with_pme_direct_force_kernel.cc +++ /dev/null @@ -1,33 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#include "plugin/device/gpu/kernel/sponge/lj/lj_force_with_pme_direct_force_kernel.h" - -namespace mindspore { -namespace kernel { -MS_REG_GPU_KERNEL_TWO(LJForceWithPMEDirectForce, - KernelAttr() - .AddInputAttr(kNumberTypeUInt32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeFloat32) - .AddOutputAttr(kNumberTypeFloat32), - LJForceWithPMEDirectForceGpuKernelMod, float, int) -} // namespace kernel -} // namespace mindspore diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/lj/lj_force_with_pme_direct_force_kernel.h b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/lj/lj_force_with_pme_direct_force_kernel.h deleted file mode 100644 index b47a1de9c6e..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/lj/lj_force_with_pme_direct_force_kernel.h +++ /dev/null @@ -1,125 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONGE_LJ_LJ_FORCE_WITH_PME_DIRECT_FORCE_KERNEL_H_ -#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONGE_LJ_LJ_FORCE_WITH_PME_DIRECT_FORCE_KERNEL_H_ -#include -#include -#include -#include -#include "plugin/device/gpu/kernel/gpu_kernel.h" -#include "plugin/device/gpu/kernel/gpu_kernel_factory.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" -#include "plugin/device/gpu/kernel/cuda_impl/sponge/lj/lj_force_with_pme_direct_force_impl.cuh" -namespace mindspore { -namespace kernel { -template -class LJForceWithPMEDirectForceGpuKernelMod : public DeprecatedNativeGpuKernelMod { - public: - LJForceWithPMEDirectForceGpuKernelMod() : ele_uint_crd(1) {} - ~LJForceWithPMEDirectForceGpuKernelMod() override = default; - - bool Init(const CNodePtr &kernel_node) override { - kernel_node_ = kernel_node; - atom_numbers = static_cast(GetAttr(kernel_node, "atom_numbers")); - cutoff = static_cast(GetAttr(kernel_node, "cutoff")); - pme_beta = static_cast(GetAttr(kernel_node, "pme_beta")); - - auto shape_uint_crd = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 0); - auto shape_LJtype = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 1); - auto shape_charge = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 2); - auto shape_scaler = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 3); - auto shape_nl_numbers = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 4); - auto shape_nl_serial = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 5); - auto shape_d_LJ_a = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 6); - auto shape_d_LJ_b = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 7); - - ele_uint_crd *= SizeOf(shape_uint_crd); - ele_LJtype *= SizeOf(shape_LJtype); - ele_charge *= SizeOf(shape_charge); - ele_scaler *= SizeOf(shape_scaler); - ele_d_LJ_a *= SizeOf(shape_d_LJ_a); - ele_d_LJ_b *= SizeOf(shape_d_LJ_b); - - InitSizeLists(); - return true; - } - - bool Launch(const std::vector &inputs, const std::vector &workspace, - const std::vector &outputs, void *stream_ptr) override { - auto uint_crd = GetDeviceAddress(inputs, 0); - auto LJtype = GetDeviceAddress(inputs, 1); - auto charge = GetDeviceAddress(inputs, 2); - auto scaler = GetDeviceAddress(inputs, 3); - auto nl_numbers = GetDeviceAddress(inputs, 4); - auto nl_serial = GetDeviceAddress(inputs, 5); - auto d_LJ_a = GetDeviceAddress(inputs, 6); - auto d_LJ_b = GetDeviceAddress(inputs, 7); - - auto uint_crd_with_LJ = GetDeviceAddress(workspace, 0); - auto nl = GetDeviceAddress(workspace, 1); - - auto frc = GetDeviceAddress(outputs, 0); - LJForceWithPMEDirectForce(atom_numbers, cutoff, pme_beta, uint_crd, LJtype, charge, scaler, uint_crd_with_LJ, - nl_numbers, nl_serial, nl, d_LJ_a, d_LJ_b, frc, - reinterpret_cast(stream_ptr)); - return true; - } - - protected: - void InitSizeLists() override { - input_size_list_.push_back(ele_uint_crd * sizeof(T1)); - input_size_list_.push_back(ele_LJtype * sizeof(T1)); - input_size_list_.push_back(ele_charge * sizeof(T)); - input_size_list_.push_back(ele_scaler * sizeof(T)); - input_size_list_.push_back(atom_numbers * sizeof(T1)); - input_size_list_.push_back(max_nl_numbers * sizeof(T1)); - input_size_list_.push_back(ele_d_LJ_a * sizeof(T)); - input_size_list_.push_back(ele_d_LJ_b * sizeof(T)); - - workspace_size_list_.push_back(atom_numbers * max_nl_numbers * sizeof(T1)); - workspace_size_list_.push_back(atom_numbers * sizeof(UINT_VECTOR_LJ_TYPE)); - - output_size_list_.push_back(atom_numbers * 3 * sizeof(T)); - } - - private: - size_t ele_uint_crd = 1; - size_t ele_LJtype = 1; - size_t ele_charge = 1; - size_t ele_scaler = 1; - size_t ele_nl = 1; - size_t ele_d_LJ_a = 1; - size_t ele_d_LJ_b = 1; - - int atom_numbers; - float pme_beta; - float cutoff; - int max_nl_numbers = 800; - struct UINT_VECTOR_LJ_TYPE { - unsigned int uint_x; - unsigned int uint_y; - unsigned int uint_z; - int LJ_type; - float charge; - }; - struct NEIGHBOR_LIST { - int atom_numbers; - int *atom_serial; - }; -}; -} // namespace kernel -} // namespace mindspore -#endif diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/lj/lj_force_with_pme_direct_force_update_kernel.cc b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/lj/lj_force_with_pme_direct_force_update_kernel.cc deleted file mode 100644 index a7c9664e870..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/lj/lj_force_with_pme_direct_force_update_kernel.cc +++ /dev/null @@ -1,38 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/** - * Note: - * LJForceWithPMEDirectForceUpdate. This is an experimental interface that is subject to change and/or deletion. - */ -#include "plugin/device/gpu/kernel/sponge/lj/lj_force_with_pme_direct_force_update_kernel.h" - -namespace mindspore { -namespace kernel { -MS_REG_GPU_KERNEL_TWO(LJForceWithPMEDirectForceUpdate, - KernelAttr() - .AddInputAttr(kNumberTypeUInt32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeFloat32) - .AddOutputAttr(kNumberTypeFloat32), - LJForceWithPMEDirectForceUpdateGpuKernelMod, float, int) -} // namespace kernel -} // namespace mindspore diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/lj/lj_force_with_pme_direct_force_update_kernel.h b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/lj/lj_force_with_pme_direct_force_update_kernel.h deleted file mode 100644 index 39827ddbeb5..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/lj/lj_force_with_pme_direct_force_update_kernel.h +++ /dev/null @@ -1,144 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/** - * Note: - * LJForceWithPMEDirectForceUpdate. This is an experimental interface that is subject to change and/or deletion. - */ -#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONGE_LJ_LJ_FORCE_WITH_PME_DIRECT_FORCE_KERNEL_H_ -#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONGE_LJ_LJ_FORCE_WITH_PME_DIRECT_FORCE_KERNEL_H_ -#include -#include -#include -#include -#include "plugin/device/gpu/kernel/gpu_kernel.h" -#include "plugin/device/gpu/kernel/gpu_kernel_factory.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" -#include "plugin/device/gpu/kernel/cuda_impl/sponge/lj/lj_force_with_pme_direct_force_impl.cuh" -namespace mindspore { -namespace kernel { -template -class LJForceWithPMEDirectForceUpdateGpuKernelMod : public DeprecatedNativeGpuKernelMod { - public: - LJForceWithPMEDirectForceUpdateGpuKernelMod() : ele_uint_crd(1) {} - ~LJForceWithPMEDirectForceUpdateGpuKernelMod() override = default; - - bool Init(const CNodePtr &kernel_node) override { - kernel_node_ = kernel_node; - atom_numbers = static_cast(GetAttr(kernel_node, "atom_numbers")); - cutoff = static_cast(GetAttr(kernel_node, "cutoff")); - pme_beta = static_cast(GetAttr(kernel_node, "pme_beta")); - need_update = static_cast(GetAttr(kernel_node, "need_update")); - - auto shape_uint_crd = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 0); - auto shape_LJtype = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 1); - auto shape_charge = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 2); - auto shape_scaler = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 3); - auto shape_nl_numbers = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 4); - auto shape_nl_serial = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 5); - auto shape_d_LJ_a = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 6); - auto shape_d_LJ_b = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 7); - - ele_uint_crd *= SizeOf(shape_uint_crd); - ele_LJtype *= SizeOf(shape_LJtype); - ele_charge *= SizeOf(shape_charge); - ele_scaler *= SizeOf(shape_scaler); - ele_d_LJ_a *= SizeOf(shape_d_LJ_a); - ele_d_LJ_b *= SizeOf(shape_d_LJ_b); - - InitSizeLists(); - return true; - } - - bool Launch(const std::vector &inputs, const std::vector &workspace, - const std::vector &outputs, void *stream_ptr) override { - auto uint_crd = GetDeviceAddress(inputs, 0); - auto LJtype = GetDeviceAddress(inputs, 1); - auto charge = GetDeviceAddress(inputs, 2); - auto scaler = GetDeviceAddress(inputs, 3); - auto nl_numbers = GetDeviceAddress(inputs, 4); - auto nl_serial = GetDeviceAddress(inputs, 5); - auto d_LJ_a = GetDeviceAddress(inputs, 6); - auto d_LJ_b = GetDeviceAddress(inputs, 7); - auto d_beta = GetDeviceAddress(inputs, 8); - - if (need_update) { - cudaMemcpyAsync(&pme_beta, d_beta, sizeof(float), cudaMemcpyDeviceToHost, - reinterpret_cast(stream_ptr)); - cudaStreamSynchronize(reinterpret_cast(stream_ptr)); - } - - auto uint_crd_with_LJ = GetDeviceAddress(workspace, 0); - auto nl = GetDeviceAddress(workspace, 1); - - auto frc = GetDeviceAddress(outputs, 0); - LJForceWithPMEDirectForce(atom_numbers, cutoff, pme_beta, uint_crd, LJtype, charge, scaler, uint_crd_with_LJ, - nl_numbers, nl_serial, nl, d_LJ_a, d_LJ_b, frc, - reinterpret_cast(stream_ptr)); - return true; - } - - protected: - void InitSizeLists() override { - input_size_list_.push_back(ele_uint_crd * sizeof(T1)); - input_size_list_.push_back(ele_LJtype * sizeof(T1)); - input_size_list_.push_back(ele_charge * sizeof(T)); - input_size_list_.push_back(ele_scaler * sizeof(T)); - input_size_list_.push_back(atom_numbers * sizeof(T1)); - input_size_list_.push_back(max_nl_numbers * sizeof(T1)); - input_size_list_.push_back(ele_d_LJ_a * sizeof(T)); - input_size_list_.push_back(ele_d_LJ_b * sizeof(T)); - input_size_list_.push_back(sizeof(T)); - - workspace_size_list_.push_back(atom_numbers * max_nl_numbers * sizeof(T1)); - workspace_size_list_.push_back(atom_numbers * sizeof(UINT_VECTOR_LJ_TYPE)); - - output_size_list_.push_back(atom_numbers * 3 * sizeof(T)); - } - - private: - size_t ele_uint_crd = 1; - size_t ele_LJtype = 1; - size_t ele_charge = 1; - size_t ele_scaler = 1; - size_t ele_nl = 1; - size_t ele_d_LJ_a = 1; - size_t ele_d_LJ_b = 1; - - int atom_numbers; - float pme_beta; - float cutoff; - int need_update; - int max_nl_numbers = 800; - struct UINT_VECTOR_LJ_TYPE { - unsigned int uint_x; - unsigned int uint_y; - unsigned int uint_z; - int LJ_type; - float charge; - }; - struct NEIGHBOR_LIST { - int atom_numbers; - int *atom_serial; - }; - struct VECTOR { - float x; - float y; - float z; - }; -}; -} // namespace kernel -} // namespace mindspore -#endif diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/lj/lj_force_with_virial_energy_update_kernel.cc b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/lj/lj_force_with_virial_energy_update_kernel.cc deleted file mode 100644 index 46667e01eee..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/lj/lj_force_with_virial_energy_update_kernel.cc +++ /dev/null @@ -1,40 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/** - * Note: - * LJForceWithVirialEnergyUpdate. This is an experimental interface that is subject to change and/or deletion. - */ -#include "plugin/device/gpu/kernel/sponge/lj/lj_force_with_virial_energy_update_kernel.h" - -namespace mindspore { -namespace kernel { -MS_REG_GPU_KERNEL_THREE(LJForceWithVirialEnergyUpdate, - KernelAttr() - .AddInputAttr(kNumberTypeUInt32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeFloat32) - .AddOutputAttr(kNumberTypeFloat32) - .AddOutputAttr(kNumberTypeFloat32) - .AddOutputAttr(kNumberTypeFloat32), - LJForceWithVirialEnergyUpdateGpuKernelMod, float, int, unsigned int) -} // namespace kernel -} // namespace mindspore diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/lj/lj_force_with_virial_energy_update_kernel.h b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/lj/lj_force_with_virial_energy_update_kernel.h deleted file mode 100644 index 5cdb6bc3320..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/lj/lj_force_with_virial_energy_update_kernel.h +++ /dev/null @@ -1,145 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/** - * Note: - * LJForceWithVirialEnergyUpdate. This is an experimental interface that is subject to change and/or deletion. - */ -#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_LJ_FORCE_WITH_VIRIAL_ENERGY_UPDATE_KERNEL_H_ -#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_LJ_FORCE_WITH_VIRIAL_ENERGY_UPDATE_KERNEL_H_ -#include -#include -#include -#include -#include "plugin/device/gpu/kernel/gpu_kernel.h" -#include "plugin/device/gpu/kernel/gpu_kernel_factory.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" -#include "plugin/device/gpu/kernel/cuda_impl/sponge/lj/lj_direct_cf_force_with_lj_virial_direct_cf_energy_impl.cuh" -namespace mindspore { -namespace kernel { -template -class LJForceWithVirialEnergyUpdateGpuKernelMod : public DeprecatedNativeGpuKernelMod { - public: - LJForceWithVirialEnergyUpdateGpuKernelMod() : ele_uint_crd(1) {} - ~LJForceWithVirialEnergyUpdateGpuKernelMod() override = default; - - bool Init(const CNodePtr &kernel_node) override { - kernel_node_ = kernel_node; - atom_numbers = static_cast(GetAttr(kernel_node, "atom_numbers")); - max_neighbor_numbers = static_cast(GetAttr(kernel_node, "max_neighbor_numbers")); - cutoff = static_cast(GetAttr(kernel_node, "cutoff")); - pme_beta = static_cast(GetAttr(kernel_node, "pme_beta")); - need_update = static_cast(GetAttr(kernel_node, "need_update")); - - auto shape_uint_crd = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 0); - auto shape_LJtype = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 1); - auto shape_charge = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 2); - auto shape_scaler = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 3); - auto shape_nl_numbers = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 4); - auto shape_nl_serial = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 5); - auto shape_d_LJ_a = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 6); - auto shape_d_LJ_b = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 7); - - ele_uint_crd *= SizeOf(shape_uint_crd); - ele_LJtype *= SizeOf(shape_LJtype); - ele_charge *= SizeOf(shape_charge); - ele_scaler *= SizeOf(shape_scaler); - ele_d_LJ_a *= SizeOf(shape_d_LJ_a); - ele_d_LJ_b *= SizeOf(shape_d_LJ_b); - - InitSizeLists(); - return true; - } - - bool Launch(const std::vector &inputs, const std::vector &workspace, - const std::vector &outputs, void *stream_ptr) override { - auto uint_crd = GetDeviceAddress(inputs, 0); - auto LJtype = GetDeviceAddress(inputs, 1); - auto charge = GetDeviceAddress(inputs, 2); - auto scaler = GetDeviceAddress(inputs, 3); - auto nl_numbers = GetDeviceAddress(inputs, 4); - auto nl_serial = GetDeviceAddress(inputs, 5); - auto d_LJ_a = GetDeviceAddress(inputs, 6); - auto d_LJ_b = GetDeviceAddress(inputs, 7); - auto d_beta = GetDeviceAddress(inputs, 8); - - auto uint_crd_with_LJ = GetDeviceAddress(workspace, 0); - auto nl = GetDeviceAddress(workspace, 1); - - auto frc = GetDeviceAddress(outputs, 0); - auto atom_lj_virial = GetDeviceAddress(outputs, 1); - auto atom_energy = GetDeviceAddress(outputs, 2); - - if (need_update) { - cudaMemcpyAsync(&pme_beta, d_beta, sizeof(float), cudaMemcpyDeviceToHost, - reinterpret_cast(stream_ptr)); - cudaStreamSynchronize(reinterpret_cast(stream_ptr)); - } - - LJ_Direct_CF_Force_With_LJ_Virial_Direct_CF_Energy(atom_numbers, cutoff, pme_beta, uint_crd, LJtype, charge, scaler, - uint_crd_with_LJ, nl_numbers, nl_serial, nl, d_LJ_a, d_LJ_b, frc, - atom_lj_virial, atom_energy, max_neighbor_numbers, - reinterpret_cast(stream_ptr)); - return true; - } - - protected: - void InitSizeLists() override { - input_size_list_.push_back(ele_uint_crd * sizeof(T2)); - input_size_list_.push_back(ele_LJtype * sizeof(T1)); - input_size_list_.push_back(ele_charge * sizeof(T)); - input_size_list_.push_back(ele_scaler * sizeof(T)); - input_size_list_.push_back(atom_numbers * sizeof(T1)); - input_size_list_.push_back(max_neighbor_numbers * sizeof(T1)); - input_size_list_.push_back(ele_d_LJ_a * sizeof(T)); - input_size_list_.push_back(ele_d_LJ_b * sizeof(T)); - - workspace_size_list_.push_back(atom_numbers * max_neighbor_numbers * sizeof(T1)); - workspace_size_list_.push_back(atom_numbers * sizeof(UINT_VECTOR_LJ_TYPE)); - - output_size_list_.push_back(atom_numbers * 3 * sizeof(T)); - output_size_list_.push_back(atom_numbers * sizeof(T)); - output_size_list_.push_back(atom_numbers * sizeof(T)); - } - - private: - size_t ele_uint_crd = 1; - size_t ele_LJtype = 1; - size_t ele_charge = 1; - size_t ele_scaler = 1; - size_t ele_nl = 1; - size_t ele_d_LJ_a = 1; - size_t ele_d_LJ_b = 1; - - int atom_numbers; - int max_neighbor_numbers; - float pme_beta; - float cutoff; - int need_update; - struct UINT_VECTOR_LJ_TYPE { - unsigned int uint_x; - unsigned int uint_y; - unsigned int uint_z; - int LJ_type; - float charge; - }; - struct NEIGHBOR_LIST { - int atom_numbers; - int *atom_serial; - }; -}; -} // namespace kernel -} // namespace mindspore -#endif diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/nb14/dihedral_14_cf_atom_energy_kernel.cc b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/nb14/dihedral_14_cf_atom_energy_kernel.cc deleted file mode 100644 index cae02b472de..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/nb14/dihedral_14_cf_atom_energy_kernel.cc +++ /dev/null @@ -1,33 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "plugin/device/gpu/kernel/sponge/nb14/dihedral_14_cf_atom_energy_kernel.h" - -namespace mindspore { -namespace kernel { -MS_REG_GPU_KERNEL_TWO(Dihedral14CFAtomEnergy, - KernelAttr() - .AddInputAttr(kNumberTypeUInt32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeFloat32) - .AddOutputAttr(kNumberTypeFloat32), - Dihedral14CFAtomEnergyGpuKernelMod, float, int) -} // namespace kernel -} // namespace mindspore diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/nb14/dihedral_14_cf_atom_energy_kernel.h b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/nb14/dihedral_14_cf_atom_energy_kernel.h deleted file mode 100644 index 64dd2f34bfb..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/nb14/dihedral_14_cf_atom_energy_kernel.h +++ /dev/null @@ -1,106 +0,0 @@ -/** - * Copyright 2019-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONG_NB14_DIHEDRAL_14_CF_ATOM_ENERGY_KERNEL_H_ -#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONG_NB14_DIHEDRAL_14_CF_ATOM_ENERGY_KERNEL_H_ - -#include -#include -#include -#include -#include "plugin/device/gpu/kernel/gpu_kernel.h" -#include "plugin/device/gpu/kernel/gpu_kernel_factory.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" -#include "plugin/device/gpu/kernel/cuda_impl/sponge/nb14/dihedral_14_cf_atom_energy_impl.cuh" - -namespace mindspore { -namespace kernel { -template -class Dihedral14CFAtomEnergyGpuKernelMod : public DeprecatedNativeGpuKernelMod { - public: - Dihedral14CFAtomEnergyGpuKernelMod() : ele_uint_crd(1) {} - ~Dihedral14CFAtomEnergyGpuKernelMod() override = default; - - bool Init(const CNodePtr &kernel_node) override { - kernel_node_ = kernel_node; - dihedral_14_numbers = static_cast(GetAttr(kernel_node, "dihedral_14_numbers")); - atom_numbers = static_cast(GetAttr(kernel_node, "atom_numbers")); - - auto shape_uint_crd = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 0); - auto shape_LJtype = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 1); - auto shape_charge = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 2); - auto shape_boxlength_f = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 3); - auto shape_a_14 = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 4); - auto shape_b_14 = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 5); - auto shape_cf_scale_factor = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 6); - - ele_uint_crd *= SizeOf(shape_uint_crd); - ele_LJtype *= SizeOf(shape_LJtype); - ele_charge *= SizeOf(shape_charge); - ele_boxlength_f *= SizeOf(shape_boxlength_f); - ele_a_14 *= SizeOf(shape_a_14); - ele_b_14 *= SizeOf(shape_b_14); - ele_cf_scale_factor *= SizeOf(shape_cf_scale_factor); - - InitSizeLists(); - return true; - } - - bool Launch(const std::vector &inputs, const std::vector &, - const std::vector &outputs, void *stream_ptr) override { - auto uint_crd_f = GetDeviceAddress(inputs, 0); - auto LJtype = GetDeviceAddress(inputs, 1); - auto charge = GetDeviceAddress(inputs, 2); - auto boxlength_f = GetDeviceAddress(inputs, 3); - auto a_14 = GetDeviceAddress(inputs, 4); - auto b_14 = GetDeviceAddress(inputs, 5); - auto cf_scale_factor = GetDeviceAddress(inputs, 6); - auto ene = GetDeviceAddress(outputs, 0); - - Dihedral14CFAtomEnergy(dihedral_14_numbers, atom_numbers, uint_crd_f, LJtype, charge, boxlength_f, a_14, b_14, - cf_scale_factor, ene, reinterpret_cast(stream_ptr)); - - return true; - } - - protected: - void InitSizeLists() override { - input_size_list_.push_back(ele_uint_crd * sizeof(T1)); - input_size_list_.push_back(ele_LJtype * sizeof(T1)); - input_size_list_.push_back(ele_charge * sizeof(T)); - input_size_list_.push_back(ele_boxlength_f * sizeof(T)); - input_size_list_.push_back(ele_a_14 * sizeof(T1)); - input_size_list_.push_back(ele_b_14 * sizeof(T1)); - input_size_list_.push_back(ele_cf_scale_factor * sizeof(T)); - - output_size_list_.push_back(atom_numbers * sizeof(T)); - } - - private: - size_t ele_uint_crd = 1; - size_t ele_LJtype = 1; - size_t ele_charge = 1; - size_t ele_boxlength_f = 1; - size_t ele_a_14 = 1; - size_t ele_b_14 = 1; - size_t ele_cf_scale_factor = 1; - - int dihedral_14_numbers; - int atom_numbers; -}; -} // namespace kernel -} // namespace mindspore -#endif // MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONG_NB14_DIHEDRAL_14_CF_ATOM_ENERGY_KERNEL_H_ diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/nb14/dihedral_14_cf_energy_kernel.cc b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/nb14/dihedral_14_cf_energy_kernel.cc deleted file mode 100644 index b4142216476..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/nb14/dihedral_14_cf_energy_kernel.cc +++ /dev/null @@ -1,33 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "plugin/device/gpu/kernel/sponge/nb14/dihedral_14_cf_energy_kernel.h" - -namespace mindspore { -namespace kernel { -MS_REG_GPU_KERNEL_TWO(Dihedral14CFEnergy, - KernelAttr() - .AddInputAttr(kNumberTypeUInt32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeFloat32) - .AddOutputAttr(kNumberTypeFloat32), - Dihedral14CFEnergyGpuKernelMod, float, int) -} // namespace kernel -} // namespace mindspore diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/nb14/dihedral_14_cf_energy_kernel.h b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/nb14/dihedral_14_cf_energy_kernel.h deleted file mode 100644 index 16f3123a95c..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/nb14/dihedral_14_cf_energy_kernel.h +++ /dev/null @@ -1,114 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONG_NB14_DIHEDRAL_14_CF_ENERGY_KERNEL_H_ -#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONG_NB14_DIHEDRAL_14_CF_ENERGY_KERNEL_H_ - -#include -#include -#include -#include -#include "plugin/device/gpu/kernel/gpu_kernel.h" -#include "plugin/device/gpu/kernel/gpu_kernel_factory.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" -#include "plugin/device/gpu/kernel/cuda_impl/sponge/nb14/dihedral_14_cf_energy_impl.cuh" - -namespace mindspore { -namespace kernel { -template -class Dihedral14CFEnergyGpuKernelMod : public DeprecatedNativeGpuKernelMod { - public: - Dihedral14CFEnergyGpuKernelMod() : ele_uint_crd(1) {} - ~Dihedral14CFEnergyGpuKernelMod() override = default; - - bool Init(const CNodePtr &kernel_node) override { - kernel_node_ = kernel_node; - dihedral_14_numbers = static_cast(GetAttr(kernel_node, "dihedral_14_numbers")); - atom_numbers = static_cast(GetAttr(kernel_node, "atom_numbers")); - - auto shape_uint_crd = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 0); - auto shape_LJtype = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 1); - auto shape_charge = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 2); - auto shape_boxlength_f = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 3); - auto shape_a_14 = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 4); - auto shape_b_14 = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 5); - auto shape_cf_scale_factor = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 6); - - ele_uint_crd *= SizeOf(shape_uint_crd); - ele_LJtype *= SizeOf(shape_LJtype); - ele_charge *= SizeOf(shape_charge); - ele_boxlength_f *= SizeOf(shape_boxlength_f); - ele_a_14 *= SizeOf(shape_a_14); - ele_b_14 *= SizeOf(shape_b_14); - ele_cf_scale_factor *= SizeOf(shape_cf_scale_factor); - - InitSizeLists(); - return true; - } - - bool Launch(const std::vector &inputs, const std::vector &workspace, - const std::vector &outputs, void *stream_ptr) override { - auto uint_crd_f = GetDeviceAddress(inputs, 0); - auto LJtype = GetDeviceAddress(inputs, 1); - auto charge = GetDeviceAddress(inputs, 2); - auto boxlength_f = GetDeviceAddress(inputs, 3); - auto a_14 = GetDeviceAddress(inputs, 4); - auto b_14 = GetDeviceAddress(inputs, 5); - auto cf_scale_factor = GetDeviceAddress(inputs, 6); - auto ene = GetDeviceAddress(outputs, 0); - auto uint_crd_with_LJ = GetDeviceAddress(workspace, 0); - - Dihedral14CFEnergy(dihedral_14_numbers, atom_numbers, uint_crd_f, LJtype, charge, uint_crd_with_LJ, boxlength_f, - a_14, b_14, cf_scale_factor, ene, reinterpret_cast(stream_ptr)); - - return true; - } - - protected: - void InitSizeLists() override { - input_size_list_.push_back(ele_uint_crd * sizeof(T1)); - input_size_list_.push_back(ele_LJtype * sizeof(T1)); - input_size_list_.push_back(ele_charge * sizeof(T)); - input_size_list_.push_back(ele_boxlength_f * sizeof(T)); - input_size_list_.push_back(ele_a_14 * sizeof(T1)); - input_size_list_.push_back(ele_b_14 * sizeof(T1)); - input_size_list_.push_back(ele_cf_scale_factor * sizeof(T)); - workspace_size_list_.push_back(atom_numbers * sizeof(UINT_VECTOR_LJ_TYPE)); - output_size_list_.push_back(dihedral_14_numbers * sizeof(T)); - } - - private: - size_t ele_uint_crd = 1; - size_t ele_LJtype = 1; - size_t ele_charge = 1; - size_t ele_boxlength_f = 1; - size_t ele_a_14 = 1; - size_t ele_b_14 = 1; - size_t ele_cf_scale_factor = 1; - - int dihedral_14_numbers; - int atom_numbers; - struct UINT_VECTOR_LJ_TYPE { - unsigned int uint_x; - unsigned int uint_y; - unsigned int uint_z; - int LJ_type; - float charge; - }; -}; -} // namespace kernel -} // namespace mindspore -#endif // MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONG_NB14_DIHEDRAL_14_LJ_FORCE_WITH_DIRECT_CF_KERNEL_H_ diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/nb14/dihedral_14_lj_atom_energy_kernel.cc b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/nb14/dihedral_14_lj_atom_energy_kernel.cc deleted file mode 100644 index 0954a2df89f..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/nb14/dihedral_14_lj_atom_energy_kernel.cc +++ /dev/null @@ -1,35 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "plugin/device/gpu/kernel/sponge/nb14/dihedral_14_lj_atom_energy_kernel.h" - -namespace mindspore { -namespace kernel { -MS_REG_GPU_KERNEL_TWO(Dihedral14LJAtomEnergy, - KernelAttr() - .AddInputAttr(kNumberTypeUInt32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeFloat32) - .AddOutputAttr(kNumberTypeFloat32), - Dihedral14LJAtomEnergyGpuKernelMod, float, int) -} // namespace kernel -} // namespace mindspore diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/nb14/dihedral_14_lj_atom_energy_kernel.h b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/nb14/dihedral_14_lj_atom_energy_kernel.h deleted file mode 100644 index 8bbb560f739..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/nb14/dihedral_14_lj_atom_energy_kernel.h +++ /dev/null @@ -1,115 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONG_NB14_DIHEDRAL_14_LJ_ATOM_ENERGY_KERNEL_H_ -#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONG_NB14_DIHEDRAL_14_LJ_ATOM_ENERGY_KERNEL_H_ - -#include -#include -#include -#include -#include "plugin/device/gpu/kernel/gpu_kernel.h" -#include "plugin/device/gpu/kernel/gpu_kernel_factory.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" -#include "plugin/device/gpu/kernel/cuda_impl/sponge/nb14/dihedral_14_lj_atom_energy_impl.cuh" - -namespace mindspore { -namespace kernel { -template -class Dihedral14LJAtomEnergyGpuKernelMod : public DeprecatedNativeGpuKernelMod { - public: - Dihedral14LJAtomEnergyGpuKernelMod() : ele_uint_crd(1) {} - ~Dihedral14LJAtomEnergyGpuKernelMod() override = default; - - bool Init(const CNodePtr &kernel_node) override { - kernel_node_ = kernel_node; - dihedral_14_numbers = static_cast(GetAttr(kernel_node, "dihedral_14_numbers")); - atom_numbers = static_cast(GetAttr(kernel_node, "atom_numbers")); - - auto shape_uint_crd = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 0); - auto shape_LJtype = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 1); - auto shape_charge = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 2); - auto shape_boxlength_f = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 3); - auto shape_a_14 = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 4); - auto shape_b_14 = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 5); - auto shape_lj_scale_factor = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 6); - auto shape_LJ_type_A = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 7); - auto shape_LJ_type_B = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 8); - - ele_uint_crd *= SizeOf(shape_uint_crd); - ele_LJtype *= SizeOf(shape_LJtype); - ele_charge *= SizeOf(shape_charge); - ele_boxlength_f *= SizeOf(shape_boxlength_f); - ele_a_14 *= SizeOf(shape_a_14); - ele_b_14 *= SizeOf(shape_b_14); - ele_lj_scale_factor *= SizeOf(shape_lj_scale_factor); - ele_LJ_type_A *= SizeOf(shape_LJ_type_A); - ele_LJ_type_B *= SizeOf(shape_LJ_type_B); - - InitSizeLists(); - return true; - } - - bool Launch(const std::vector &inputs, const std::vector &, - const std::vector &outputs, void *stream_ptr) override { - auto uint_crd_f = GetDeviceAddress(inputs, 0); - auto LJtype = GetDeviceAddress(inputs, 1); - auto charge = GetDeviceAddress(inputs, 2); - auto boxlength_f = GetDeviceAddress(inputs, 3); - auto a_14 = GetDeviceAddress(inputs, 4); - auto b_14 = GetDeviceAddress(inputs, 5); - auto lj_scale_factor = GetDeviceAddress(inputs, 6); - auto LJ_type_A = GetDeviceAddress(inputs, 7); - auto LJ_type_B = GetDeviceAddress(inputs, 8); - auto ene = GetDeviceAddress(outputs, 0); - - Dihedral14LJAtomEnergy(dihedral_14_numbers, atom_numbers, uint_crd_f, LJtype, charge, boxlength_f, a_14, b_14, - lj_scale_factor, LJ_type_A, LJ_type_B, ene, reinterpret_cast(stream_ptr)); - return true; - } - - protected: - void InitSizeLists() override { - input_size_list_.push_back(ele_uint_crd * sizeof(T1)); - input_size_list_.push_back(ele_LJtype * sizeof(T1)); - input_size_list_.push_back(ele_charge * sizeof(T)); - input_size_list_.push_back(ele_boxlength_f * sizeof(T)); - input_size_list_.push_back(ele_a_14 * sizeof(T1)); - input_size_list_.push_back(ele_b_14 * sizeof(T1)); - input_size_list_.push_back(ele_lj_scale_factor * sizeof(T)); - input_size_list_.push_back(ele_LJ_type_A * sizeof(T)); - input_size_list_.push_back(ele_LJ_type_B * sizeof(T)); - - output_size_list_.push_back(atom_numbers * sizeof(T)); - } - - private: - size_t ele_uint_crd = 1; - size_t ele_LJtype = 1; - size_t ele_charge = 1; - size_t ele_boxlength_f = 1; - size_t ele_a_14 = 1; - size_t ele_b_14 = 1; - size_t ele_lj_scale_factor = 1; - size_t ele_LJ_type_A = 1; - size_t ele_LJ_type_B = 1; - - int dihedral_14_numbers; - int atom_numbers; -}; -} // namespace kernel -} // namespace mindspore -#endif // MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONG_NB14_DIHEDRAL_14_LJ_ATOM_ENERGY_KERNEL_H_ diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/nb14/dihedral_14_lj_cf_force_with_atom_energy_and_virial_kernel.cc b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/nb14/dihedral_14_lj_cf_force_with_atom_energy_and_virial_kernel.cc deleted file mode 100644 index 024d06ea47d..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/nb14/dihedral_14_lj_cf_force_with_atom_energy_and_virial_kernel.cc +++ /dev/null @@ -1,38 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "plugin/device/gpu/kernel/sponge/nb14/dihedral_14_lj_cf_force_with_atom_energy_and_virial_kernel.h" - -namespace mindspore { -namespace kernel { -MS_REG_GPU_KERNEL_TWO(Dihedral14ForceWithAtomEnergyVirial, - KernelAttr() - .AddInputAttr(kNumberTypeUInt32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeFloat32) - .AddOutputAttr(kNumberTypeFloat32) - .AddOutputAttr(kNumberTypeFloat32) - .AddOutputAttr(kNumberTypeFloat32), - Dihedral14LJCFForceWithAtomEnergyAndVirialGpuKernelMod, float, int) -} // namespace kernel -} // namespace mindspore diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/nb14/dihedral_14_lj_cf_force_with_atom_energy_and_virial_kernel.h b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/nb14/dihedral_14_lj_cf_force_with_atom_energy_and_virial_kernel.h deleted file mode 100644 index 7f534620ed3..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/nb14/dihedral_14_lj_cf_force_with_atom_energy_and_virial_kernel.h +++ /dev/null @@ -1,138 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONG_NB14_DIHEDRAL_14_LJ_CF_FORCE_WITH_ATOM_ENE_VIRIAL_KERNEL_H_ -#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONG_NB14_DIHEDRAL_14_LJ_CF_FORCE_WITH_ATOM_ENE_VIRIAL_KERNEL_H_ - -#include -#include -#include -#include -#include "plugin/device/gpu/kernel/gpu_kernel.h" -#include "plugin/device/gpu/kernel/gpu_kernel_factory.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" -#include "plugin/device/gpu/kernel/cuda_impl/sponge/nb14/dihedral_14_lj_cf_force_with_atom_energy_and_virial_impl.cuh" - -namespace mindspore { -namespace kernel { - -template -class Dihedral14LJCFForceWithAtomEnergyAndVirialGpuKernelMod : public DeprecatedNativeGpuKernelMod { - public: - Dihedral14LJCFForceWithAtomEnergyAndVirialGpuKernelMod() : ele_uint_crd(1) {} - ~Dihedral14LJCFForceWithAtomEnergyAndVirialGpuKernelMod() override = default; - - bool Init(const CNodePtr &kernel_node) override { - kernel_node_ = kernel_node; - dihedral_14_numbers = static_cast(GetAttr(kernel_node, "dihedral_14_numbers")); - atom_numbers = static_cast(GetAttr(kernel_node, "atom_numbers")); - - auto shape_uint_crd = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 0); - auto shape_LJtype = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 1); - auto shape_charge = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 2); - auto shape_boxlength_f = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 3); - auto shape_a_14 = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 4); - auto shape_b_14 = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 5); - auto shape_lj_scale_factor = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 6); - auto shape_cf_scale_factor = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 7); - auto shape_LJ_type_A = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 8); - auto shape_LJ_type_B = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 9); - - ele_uint_crd *= SizeOf(shape_uint_crd); - ele_LJtype *= SizeOf(shape_LJtype); - ele_charge *= SizeOf(shape_charge); - ele_boxlength_f *= SizeOf(shape_boxlength_f); - ele_a_14 *= SizeOf(shape_a_14); - ele_b_14 *= SizeOf(shape_b_14); - ele_lj_scale_factor *= SizeOf(shape_lj_scale_factor); - ele_cf_scale_factor *= SizeOf(shape_cf_scale_factor); - ele_LJ_type_A *= SizeOf(shape_LJ_type_A); - ele_LJ_type_B *= SizeOf(shape_LJ_type_B); - - InitSizeLists(); - return true; - } - - bool Launch(const std::vector &inputs, const std::vector &workspace, - const std::vector &outputs, void *stream_ptr) override { - auto uint_crd_f = GetDeviceAddress(inputs, 0); - auto LJtype = GetDeviceAddress(inputs, 1); - auto charge = GetDeviceAddress(inputs, 2); - auto boxlength_f = GetDeviceAddress(inputs, 3); - auto a_14 = GetDeviceAddress(inputs, 4); - auto b_14 = GetDeviceAddress(inputs, 5); - auto lj_scale_factor = GetDeviceAddress(inputs, 6); - auto cf_scale_factor = GetDeviceAddress(inputs, 7); - auto LJ_type_A = GetDeviceAddress(inputs, 8); - auto LJ_type_B = GetDeviceAddress(inputs, 9); - auto frc_f = GetDeviceAddress(outputs, 0); - auto atom_energy = GetDeviceAddress(outputs, 1); - auto atom_virial = GetDeviceAddress(outputs, 2); - - auto uint_crd_with_LJ = GetDeviceAddress(workspace, 0); - - Dihedral14LJCFForceWithAtomEnergyAndVirial(dihedral_14_numbers, atom_numbers, uint_crd_f, LJtype, charge, - uint_crd_with_LJ, boxlength_f, a_14, b_14, lj_scale_factor, - cf_scale_factor, LJ_type_A, LJ_type_B, frc_f, atom_energy, atom_virial, - reinterpret_cast(stream_ptr)); - - return true; - } - - protected: - void InitSizeLists() override { - input_size_list_.push_back(ele_uint_crd * sizeof(T1)); - input_size_list_.push_back(ele_LJtype * sizeof(T1)); - input_size_list_.push_back(ele_charge * sizeof(T)); - input_size_list_.push_back(ele_boxlength_f * sizeof(T)); - input_size_list_.push_back(ele_a_14 * sizeof(T1)); - input_size_list_.push_back(ele_b_14 * sizeof(T1)); - input_size_list_.push_back(ele_lj_scale_factor * sizeof(T)); - input_size_list_.push_back(ele_cf_scale_factor * sizeof(T)); - input_size_list_.push_back(ele_LJ_type_A * sizeof(T)); - input_size_list_.push_back(ele_LJ_type_B * sizeof(T)); - workspace_size_list_.push_back(atom_numbers * sizeof(UINT_VECTOR_LJ_TYPE)); - - output_size_list_.push_back(3 * atom_numbers * sizeof(T)); - output_size_list_.push_back(atom_numbers * sizeof(T)); - output_size_list_.push_back(atom_numbers * sizeof(T)); - } - - private: - size_t ele_uint_crd = 1; - size_t ele_LJtype = 1; - size_t ele_charge = 1; - size_t ele_boxlength_f = 1; - size_t ele_a_14 = 1; - size_t ele_b_14 = 1; - size_t ele_lj_scale_factor = 1; - size_t ele_cf_scale_factor = 1; - size_t ele_LJ_type_A = 1; - size_t ele_LJ_type_B = 1; - - int dihedral_14_numbers; - int atom_numbers; - struct UINT_VECTOR_LJ_TYPE { - unsigned int uint_x; - unsigned int uint_y; - unsigned int uint_z; - int LJ_type; - float charge; - }; -}; -} // namespace kernel -} // namespace mindspore -#endif diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/nb14/dihedral_14_lj_cf_force_with_atom_energy_kernel.cc b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/nb14/dihedral_14_lj_cf_force_with_atom_energy_kernel.cc deleted file mode 100644 index fa2b36fb501..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/nb14/dihedral_14_lj_cf_force_with_atom_energy_kernel.cc +++ /dev/null @@ -1,37 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "plugin/device/gpu/kernel/sponge/nb14/dihedral_14_lj_cf_force_with_atom_energy_kernel.h" - -namespace mindspore { -namespace kernel { -MS_REG_GPU_KERNEL_TWO(Dihedral14LJCFForceWithAtomEnergy, - KernelAttr() - .AddInputAttr(kNumberTypeUInt32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeFloat32) - .AddOutputAttr(kNumberTypeFloat32) - .AddOutputAttr(kNumberTypeFloat32), - Dihedral14LJCFForceWithAtomEnergyGpuKernelMod, float, int) -} // namespace kernel -} // namespace mindspore diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/nb14/dihedral_14_lj_cf_force_with_atom_energy_kernel.h b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/nb14/dihedral_14_lj_cf_force_with_atom_energy_kernel.h deleted file mode 100644 index 289d4f4f4c4..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/nb14/dihedral_14_lj_cf_force_with_atom_energy_kernel.h +++ /dev/null @@ -1,134 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONG_NB14_DIHEDRAL_14_LJ_CF_FORCE_WITH_ATOM_ENERGY_KERNEL_H_ -#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONG_NB14_DIHEDRAL_14_LJ_CF_FORCE_WITH_ATOM_ENERGY_KERNEL_H_ - -#include -#include -#include -#include -#include "plugin/device/gpu/kernel/gpu_kernel.h" -#include "plugin/device/gpu/kernel/gpu_kernel_factory.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" -#include "plugin/device/gpu/kernel/cuda_impl/sponge/nb14/dihedral_14_lj_cf_force_with_atom_energy_impl.cuh" - -namespace mindspore { -namespace kernel { -template -class Dihedral14LJCFForceWithAtomEnergyGpuKernelMod : public DeprecatedNativeGpuKernelMod { - public: - Dihedral14LJCFForceWithAtomEnergyGpuKernelMod() : ele_uint_crd(1) {} - ~Dihedral14LJCFForceWithAtomEnergyGpuKernelMod() override = default; - - bool Init(const CNodePtr &kernel_node) override { - kernel_node_ = kernel_node; - dihedral_14_numbers = static_cast(GetAttr(kernel_node, "dihedral_14_numbers")); - atom_numbers = static_cast(GetAttr(kernel_node, "atom_numbers")); - - auto shape_uint_crd = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 0); - auto shape_LJtype = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 1); - auto shape_charge = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 2); - auto shape_boxlength_f = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 3); - auto shape_a_14 = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 4); - auto shape_b_14 = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 5); - auto shape_lj_scale_factor = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 6); - auto shape_cf_scale_factor = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 7); - auto shape_LJ_type_A = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 8); - auto shape_LJ_type_B = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 9); - - ele_uint_crd *= SizeOf(shape_uint_crd); - ele_LJtype *= SizeOf(shape_LJtype); - ele_charge *= SizeOf(shape_charge); - ele_boxlength_f *= SizeOf(shape_boxlength_f); - ele_a_14 *= SizeOf(shape_a_14); - ele_b_14 *= SizeOf(shape_b_14); - ele_lj_scale_factor *= SizeOf(shape_lj_scale_factor); - ele_cf_scale_factor *= SizeOf(shape_cf_scale_factor); - ele_LJ_type_A *= SizeOf(shape_LJ_type_A); - ele_LJ_type_B *= SizeOf(shape_LJ_type_B); - - InitSizeLists(); - return true; - } - - bool Launch(const std::vector &inputs, const std::vector &workspace, - const std::vector &outputs, void *stream_ptr) override { - auto uint_crd_f = GetDeviceAddress(inputs, 0); - auto LJtype = GetDeviceAddress(inputs, 1); - auto charge = GetDeviceAddress(inputs, 2); - auto boxlength_f = GetDeviceAddress(inputs, 3); - auto a_14 = GetDeviceAddress(inputs, 4); - auto b_14 = GetDeviceAddress(inputs, 5); - auto lj_scale_factor = GetDeviceAddress(inputs, 6); - auto cf_scale_factor = GetDeviceAddress(inputs, 7); - auto LJ_type_A = GetDeviceAddress(inputs, 8); - auto LJ_type_B = GetDeviceAddress(inputs, 9); - auto frc_f = GetDeviceAddress(outputs, 0); - auto atom_energy = GetDeviceAddress(outputs, 1); - - auto uint_crd_with_LJ = GetDeviceAddress(workspace, 0); - - Dihedral14LJCFForceWithAtomEnergy(dihedral_14_numbers, atom_numbers, uint_crd_f, LJtype, charge, uint_crd_with_LJ, - boxlength_f, a_14, b_14, lj_scale_factor, cf_scale_factor, LJ_type_A, LJ_type_B, - frc_f, atom_energy, reinterpret_cast(stream_ptr)); - - return true; - } - - protected: - void InitSizeLists() override { - input_size_list_.push_back(ele_uint_crd * sizeof(T1)); - input_size_list_.push_back(ele_LJtype * sizeof(T1)); - input_size_list_.push_back(ele_charge * sizeof(T)); - input_size_list_.push_back(ele_boxlength_f * sizeof(T)); - input_size_list_.push_back(ele_a_14 * sizeof(T1)); - input_size_list_.push_back(ele_b_14 * sizeof(T1)); - input_size_list_.push_back(ele_lj_scale_factor * sizeof(T)); - input_size_list_.push_back(ele_cf_scale_factor * sizeof(T)); - input_size_list_.push_back(ele_LJ_type_A * sizeof(T)); - input_size_list_.push_back(ele_LJ_type_B * sizeof(T)); - workspace_size_list_.push_back(atom_numbers * sizeof(UINT_VECTOR_LJ_TYPE)); - - output_size_list_.push_back(3 * atom_numbers * sizeof(T)); - output_size_list_.push_back(atom_numbers * sizeof(T)); - } - - private: - size_t ele_uint_crd = 1; - size_t ele_LJtype = 1; - size_t ele_charge = 1; - size_t ele_boxlength_f = 1; - size_t ele_a_14 = 1; - size_t ele_b_14 = 1; - size_t ele_lj_scale_factor = 1; - size_t ele_cf_scale_factor = 1; - size_t ele_LJ_type_A = 1; - size_t ele_LJ_type_B = 1; - - int dihedral_14_numbers; - int atom_numbers; - struct UINT_VECTOR_LJ_TYPE { - unsigned int uint_x; - unsigned int uint_y; - unsigned int uint_z; - int LJ_type; - float charge; - }; -}; -} // namespace kernel -} // namespace mindspore -#endif // MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONG_NB14_DIHEDRAL_14_LJ_CF_FORCE_WITH_ATOM_ENERGY_KERNEL_H_ diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/nb14/dihedral_14_lj_energy_kernel.cc b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/nb14/dihedral_14_lj_energy_kernel.cc deleted file mode 100644 index eb47127df5b..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/nb14/dihedral_14_lj_energy_kernel.cc +++ /dev/null @@ -1,35 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "plugin/device/gpu/kernel/sponge/nb14/dihedral_14_lj_energy_kernel.h" - -namespace mindspore { -namespace kernel { -MS_REG_GPU_KERNEL_TWO(Dihedral14LJEnergy, - KernelAttr() - .AddInputAttr(kNumberTypeUInt32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeFloat32) - .AddOutputAttr(kNumberTypeFloat32), - Dihedral14LJEnergyGpuKernelMod, float, int) -} // namespace kernel -} // namespace mindspore diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/nb14/dihedral_14_lj_energy_kernel.h b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/nb14/dihedral_14_lj_energy_kernel.h deleted file mode 100644 index ab65bfabf39..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/nb14/dihedral_14_lj_energy_kernel.h +++ /dev/null @@ -1,126 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONG_NB14_DIHEDRAL_14_LJ_ENERGY_KERNEL_H_ -#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONG_NB14_DIHEDRAL_14_LJ_ENERGY_KERNEL_H_ - -#include -#include -#include -#include -#include "plugin/device/gpu/kernel/gpu_kernel.h" -#include "plugin/device/gpu/kernel/gpu_kernel_factory.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" -#include "plugin/device/gpu/kernel/cuda_impl/sponge/nb14/dihedral_14_lj_energy_impl.cuh" - -namespace mindspore { -namespace kernel { -template -class Dihedral14LJEnergyGpuKernelMod : public DeprecatedNativeGpuKernelMod { - public: - Dihedral14LJEnergyGpuKernelMod() : ele_uint_crd(1) {} - ~Dihedral14LJEnergyGpuKernelMod() override = default; - - bool Init(const CNodePtr &kernel_node) override { - kernel_node_ = kernel_node; - dihedral_14_numbers = static_cast(GetAttr(kernel_node, "dihedral_14_numbers")); - atom_numbers = static_cast(GetAttr(kernel_node, "atom_numbers")); - - auto shape_uint_crd = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 0); - auto shape_LJtype = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 1); - auto shape_charge = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 2); - auto shape_boxlength_f = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 3); - auto shape_a_14 = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 4); - auto shape_b_14 = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 5); - auto shape_lj_scale_factor = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 6); - auto shape_LJ_type_A = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 7); - auto shape_LJ_type_B = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 8); - - ele_uint_crd *= SizeOf(shape_uint_crd); - ele_LJtype *= SizeOf(shape_LJtype); - ele_charge *= SizeOf(shape_charge); - ele_boxlength_f *= SizeOf(shape_boxlength_f); - ele_a_14 *= SizeOf(shape_a_14); - ele_b_14 *= SizeOf(shape_b_14); - ele_lj_scale_factor *= SizeOf(shape_lj_scale_factor); - ele_LJ_type_A *= SizeOf(shape_LJ_type_A); - ele_LJ_type_B *= SizeOf(shape_LJ_type_B); - - InitSizeLists(); - return true; - } - - bool Launch(const std::vector &inputs, const std::vector &workspace, - const std::vector &outputs, void *stream_ptr) override { - auto uint_crd_f = GetDeviceAddress(inputs, 0); - auto LJtype = GetDeviceAddress(inputs, 1); - auto charge = GetDeviceAddress(inputs, 2); - auto boxlength_f = GetDeviceAddress(inputs, 3); - auto a_14 = GetDeviceAddress(inputs, 4); - auto b_14 = GetDeviceAddress(inputs, 5); - auto lj_scale_factor = GetDeviceAddress(inputs, 6); - auto LJ_type_A = GetDeviceAddress(inputs, 7); - auto LJ_type_B = GetDeviceAddress(inputs, 8); - auto ene = GetDeviceAddress(outputs, 0); - auto uint_crd_with_LJ = GetDeviceAddress(workspace, 0); - - Dihedral14LJEnergy(dihedral_14_numbers, atom_numbers, uint_crd_f, LJtype, charge, uint_crd_with_LJ, boxlength_f, - a_14, b_14, lj_scale_factor, LJ_type_A, LJ_type_B, ene, - reinterpret_cast(stream_ptr)); - - return true; - } - - protected: - void InitSizeLists() override { - input_size_list_.push_back(ele_uint_crd * sizeof(T1)); - input_size_list_.push_back(ele_LJtype * sizeof(T1)); - input_size_list_.push_back(ele_charge * sizeof(T)); - input_size_list_.push_back(ele_boxlength_f * sizeof(T)); - input_size_list_.push_back(ele_a_14 * sizeof(T1)); - input_size_list_.push_back(ele_b_14 * sizeof(T1)); - input_size_list_.push_back(ele_lj_scale_factor * sizeof(T)); - input_size_list_.push_back(ele_LJ_type_A * sizeof(T)); - input_size_list_.push_back(ele_LJ_type_B * sizeof(T)); - workspace_size_list_.push_back(atom_numbers * sizeof(UINT_VECTOR_LJ_TYPE)); - - output_size_list_.push_back(dihedral_14_numbers * sizeof(T)); - } - - private: - size_t ele_uint_crd = 1; - size_t ele_LJtype = 1; - size_t ele_charge = 1; - size_t ele_boxlength_f = 1; - size_t ele_a_14 = 1; - size_t ele_b_14 = 1; - size_t ele_lj_scale_factor = 1; - size_t ele_LJ_type_A = 1; - size_t ele_LJ_type_B = 1; - - int dihedral_14_numbers; - int atom_numbers; - struct UINT_VECTOR_LJ_TYPE { - unsigned int uint_x; - unsigned int uint_y; - unsigned int uint_z; - int LJ_type; - float charge; - }; -}; -} // namespace kernel -} // namespace mindspore -#endif // MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONG_NB14_DIHEDRAL_14_LJ_ENERGY_KERNEL_H_ diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/nb14/dihedral_14_lj_force_gpu_kernel.cc b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/nb14/dihedral_14_lj_force_gpu_kernel.cc deleted file mode 100644 index b6cb2639697..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/nb14/dihedral_14_lj_force_gpu_kernel.cc +++ /dev/null @@ -1,35 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "plugin/device/gpu/kernel/sponge/nb14/dihedral_14_lj_force_gpu_kernel.h" - -namespace mindspore { -namespace kernel { -MS_REG_GPU_KERNEL_TWO(Dihedral14LJForce, - KernelAttr() - .AddInputAttr(kNumberTypeUInt32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeFloat32) - .AddOutputAttr(kNumberTypeFloat32), - Dihedral14LJForceGpuKernelMod, float, int) -} // namespace kernel -} // namespace mindspore diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/nb14/dihedral_14_lj_force_gpu_kernel.h b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/nb14/dihedral_14_lj_force_gpu_kernel.h deleted file mode 100644 index 082987b7d06..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/nb14/dihedral_14_lj_force_gpu_kernel.h +++ /dev/null @@ -1,114 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONG_NB14_DIHEDRAL_14_LJ_FORCE_KERNEL_H_ -#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONG_NB14_DIHEDRAL_14_LJ_FORCE_KERNEL_H_ - -#include -#include -#include -#include -#include "plugin/device/gpu/kernel/gpu_kernel.h" -#include "plugin/device/gpu/kernel/gpu_kernel_factory.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" -#include "plugin/device/gpu/kernel/cuda_impl/sponge/nb14/dihedral_14_lj_force_impl.cuh" - -namespace mindspore { -namespace kernel { -template -class Dihedral14LJForceGpuKernelMod : public DeprecatedNativeGpuKernelMod { - public: - Dihedral14LJForceGpuKernelMod() : ele_uint_crd(1) {} - ~Dihedral14LJForceGpuKernelMod() override = default; - - bool Init(const CNodePtr &kernel_node) override { - kernel_node_ = kernel_node; - dihedral_14_numbers = static_cast(GetAttr(kernel_node, "dihedral_14_numbers")); - atom_numbers = static_cast(GetAttr(kernel_node, "atom_numbers")); - - auto shape_uint_crd = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 0); - auto shape_LJtype = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 1); - auto shape_charge = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 2); - auto shape_boxlength_f = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 3); - auto shape_a_14 = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 4); - auto shape_b_14 = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 5); - auto shape_lj_scale_factor = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 6); - auto shape_LJ_type_A = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 7); - auto shape_LJ_type_B = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 8); - - ele_uint_crd *= SizeOf(shape_uint_crd); - ele_LJtype *= SizeOf(shape_LJtype); - ele_charge *= SizeOf(shape_charge); - ele_boxlength_f *= SizeOf(shape_boxlength_f); - ele_a_14 *= SizeOf(shape_a_14); - ele_b_14 *= SizeOf(shape_b_14); - ele_lj_scale_factor *= SizeOf(shape_lj_scale_factor); - ele_LJ_type_A *= SizeOf(shape_LJ_type_A); - ele_LJ_type_B *= SizeOf(shape_LJ_type_B); - - InitSizeLists(); - return true; - } - - bool Launch(const std::vector &inputs, const std::vector &, - const std::vector &outputs, void *stream_ptr) override { - auto uint_crd_f = GetDeviceAddress(inputs, 0); - auto LJtype = GetDeviceAddress(inputs, 1); - auto charge = GetDeviceAddress(inputs, 2); - auto boxlength_f = GetDeviceAddress(inputs, 3); - auto a_14 = GetDeviceAddress(inputs, 4); - auto b_14 = GetDeviceAddress(inputs, 5); - auto lj_scale_factor = GetDeviceAddress(inputs, 6); - auto LJ_type_A = GetDeviceAddress(inputs, 7); - auto LJ_type_B = GetDeviceAddress(inputs, 8); - auto frc_f = GetDeviceAddress(outputs, 0); - Dihedral14LJForce(dihedral_14_numbers, atom_numbers, uint_crd_f, LJtype, charge, boxlength_f, a_14, b_14, - lj_scale_factor, LJ_type_A, LJ_type_B, frc_f, reinterpret_cast(stream_ptr)); - return true; - } - - protected: - void InitSizeLists() override { - input_size_list_.push_back(ele_uint_crd * sizeof(T1)); - input_size_list_.push_back(ele_LJtype * sizeof(T1)); - input_size_list_.push_back(ele_charge * sizeof(T)); - input_size_list_.push_back(ele_boxlength_f * sizeof(T)); - input_size_list_.push_back(ele_a_14 * sizeof(T1)); - input_size_list_.push_back(ele_b_14 * sizeof(T1)); - input_size_list_.push_back(ele_lj_scale_factor * sizeof(T)); - input_size_list_.push_back(ele_LJ_type_A * sizeof(T)); - input_size_list_.push_back(ele_LJ_type_B * sizeof(T)); - - output_size_list_.push_back(atom_numbers * 3 * sizeof(T)); - } - - private: - size_t ele_uint_crd = 1; - size_t ele_LJtype = 1; - size_t ele_charge = 1; - size_t ele_boxlength_f = 1; - size_t ele_a_14 = 1; - size_t ele_b_14 = 1; - size_t ele_lj_scale_factor = 1; - size_t ele_LJ_type_A = 1; - size_t ele_LJ_type_B = 1; - - int dihedral_14_numbers; - int atom_numbers; -}; -} // namespace kernel -} // namespace mindspore -#endif // MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONG_NB14_DIHEDRAL_14_LJ_FORCE_KERNEL_H_ diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/nb14/dihedral_14_lj_force_with_direct_cf_kernel.cc b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/nb14/dihedral_14_lj_force_with_direct_cf_kernel.cc deleted file mode 100644 index d3635b716c7..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/nb14/dihedral_14_lj_force_with_direct_cf_kernel.cc +++ /dev/null @@ -1,36 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "plugin/device/gpu/kernel/sponge/nb14/dihedral_14_lj_force_with_direct_cf_kernel.h" - -namespace mindspore { -namespace kernel { -MS_REG_GPU_KERNEL_TWO(Dihedral14LJForceWithDirectCF, - KernelAttr() - .AddInputAttr(kNumberTypeUInt32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeFloat32) - .AddOutputAttr(kNumberTypeFloat32), - Dihedral14LJForceWithDirectCFGpuKernelMod, float, int) -} // namespace kernel -} // namespace mindspore diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/nb14/dihedral_14_lj_force_with_direct_cf_kernel.h b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/nb14/dihedral_14_lj_force_with_direct_cf_kernel.h deleted file mode 100644 index 27b080c00df..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/nb14/dihedral_14_lj_force_with_direct_cf_kernel.h +++ /dev/null @@ -1,122 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONG_NB14_DIHEDRAL_14_LJ_FORCE_WITH_DIRECT_CF_KERNEL_H_ -#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONG_NB14_DIHEDRAL_14_LJ_FORCE_WITH_DIRECT_CF_KERNEL_H_ - -#include -#include -#include -#include -#include "plugin/device/gpu/kernel/gpu_kernel.h" -#include "plugin/device/gpu/kernel/gpu_kernel_factory.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" -#include "plugin/device/gpu/kernel/cuda_impl/sponge/nb14/dihedral_14_lj_force_with_direct_cf_impl.cuh" - -namespace mindspore { -namespace kernel { -template -class Dihedral14LJForceWithDirectCFGpuKernelMod : public DeprecatedNativeGpuKernelMod { - public: - Dihedral14LJForceWithDirectCFGpuKernelMod() : ele_uint_crd(1) {} - ~Dihedral14LJForceWithDirectCFGpuKernelMod() override = default; - - bool Init(const CNodePtr &kernel_node) override { - kernel_node_ = kernel_node; - dihedral_14_numbers = static_cast(GetAttr(kernel_node, "dihedral_14_numbers")); - atom_numbers = static_cast(GetAttr(kernel_node, "atom_numbers")); - - auto shape_uint_crd = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 0); - auto shape_LJtype = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 1); - auto shape_charge = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 2); - auto shape_boxlength_f = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 3); - auto shape_a_14 = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 4); - auto shape_b_14 = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 5); - auto shape_lj_scale_factor = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 6); - auto shape_cf_scale_factor = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 7); - auto shape_LJ_type_A = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 8); - auto shape_LJ_type_B = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 9); - - ele_uint_crd *= SizeOf(shape_uint_crd); - ele_LJtype *= SizeOf(shape_LJtype); - ele_charge *= SizeOf(shape_charge); - ele_boxlength_f *= SizeOf(shape_boxlength_f); - ele_a_14 *= SizeOf(shape_a_14); - ele_b_14 *= SizeOf(shape_b_14); - ele_lj_scale_factor *= SizeOf(shape_lj_scale_factor); - ele_cf_scale_factor *= SizeOf(shape_cf_scale_factor); - ele_LJ_type_A *= SizeOf(shape_LJ_type_A); - ele_LJ_type_B *= SizeOf(shape_LJ_type_B); - - InitSizeLists(); - return true; - } - - bool Launch(const std::vector &inputs, const std::vector &, - const std::vector &outputs, void *stream_ptr) override { - auto uint_crd_f = GetDeviceAddress(inputs, 0); - auto LJtype = GetDeviceAddress(inputs, 1); - auto charge = GetDeviceAddress(inputs, 2); - auto boxlength_f = GetDeviceAddress(inputs, 3); - auto a_14 = GetDeviceAddress(inputs, 4); - auto b_14 = GetDeviceAddress(inputs, 5); - auto lj_scale_factor = GetDeviceAddress(inputs, 6); - auto cf_scale_factor = GetDeviceAddress(inputs, 7); - auto LJ_type_A = GetDeviceAddress(inputs, 8); - auto LJ_type_B = GetDeviceAddress(inputs, 9); - auto frc_f = GetDeviceAddress(outputs, 0); - - Dihedral14LJForceWithDirectCF(dihedral_14_numbers, atom_numbers, uint_crd_f, LJtype, charge, boxlength_f, a_14, - b_14, lj_scale_factor, cf_scale_factor, LJ_type_A, LJ_type_B, frc_f, - reinterpret_cast(stream_ptr)); - - return true; - } - - protected: - void InitSizeLists() override { - input_size_list_.push_back(ele_uint_crd * sizeof(T1)); - input_size_list_.push_back(ele_LJtype * sizeof(T1)); - input_size_list_.push_back(ele_charge * sizeof(T)); - input_size_list_.push_back(ele_boxlength_f * sizeof(T)); - input_size_list_.push_back(ele_a_14 * sizeof(T1)); - input_size_list_.push_back(ele_b_14 * sizeof(T1)); - input_size_list_.push_back(ele_lj_scale_factor * sizeof(T)); - input_size_list_.push_back(ele_cf_scale_factor * sizeof(T)); - input_size_list_.push_back(ele_LJ_type_A * sizeof(T)); - input_size_list_.push_back(ele_LJ_type_B * sizeof(T)); - - output_size_list_.push_back(atom_numbers * 3 * sizeof(T)); - } - - private: - size_t ele_uint_crd = 1; - size_t ele_LJtype = 1; - size_t ele_charge = 1; - size_t ele_boxlength_f = 1; - size_t ele_a_14 = 1; - size_t ele_b_14 = 1; - size_t ele_lj_scale_factor = 1; - size_t ele_cf_scale_factor = 1; - size_t ele_LJ_type_A = 1; - size_t ele_LJ_type_B = 1; - - int dihedral_14_numbers; - int atom_numbers; -}; -} // namespace kernel -} // namespace mindspore -#endif // MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONG_NB14_DIHEDRAL_14_LJ_FORCE_WITH_DIRECT_CF_KERNEL_H_ diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/neighbor_list/neighbor_list_update_kernel.cc b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/neighbor_list/neighbor_list_update_kernel.cc deleted file mode 100644 index e3d0d4230cd..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/neighbor_list/neighbor_list_update_kernel.cc +++ /dev/null @@ -1,49 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/** - * Note: - * NeighborListUpdate. This is an experimental interface that is subject to change and/or deletion. - */ - -#include "plugin/device/gpu/kernel/sponge/neighbor_list/neighbor_list_update_kernel.h" - -namespace mindspore { -namespace kernel { -MS_REG_GPU_KERNEL_TWO(NeighborListUpdate, - KernelAttr() - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeUInt32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeInt32) - .AddOutputAttr(kNumberTypeFloat32), - NeighborListUpdateGpuKernelMod, int, float) -} // namespace kernel -} // namespace mindspore diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/neighbor_list/neighbor_list_update_kernel.h b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/neighbor_list/neighbor_list_update_kernel.h deleted file mode 100644 index 5140b57a609..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/neighbor_list/neighbor_list_update_kernel.h +++ /dev/null @@ -1,198 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/** - * Note: - * NeighborListUpdate. This is an experimental interface that is subject to change and/or deletion. - */ - -#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONGE_NEIGHBOR_LIST_UPDATE_KERNEL_H_ -#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONGE_NEIGHBOR_LIST_UPDATE_KERNEL_H_ - -#include -#include -#include -#include -#include "plugin/device/gpu/kernel/gpu_kernel.h" -#include "plugin/device/gpu/kernel/gpu_kernel_factory.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" -#include "plugin/device/gpu/kernel/cuda_impl/sponge/neighbor_list/neighbor_list_impl.cuh" - -namespace mindspore { -namespace kernel { -constexpr float kSkinDef = 2.0; -constexpr float kCutoffDef = 10.0; -constexpr int kMaxAtomInGridNumDef = 64; -constexpr int kMaxNbNumDef = 800; - -constexpr size_t kIdx3 = 3; -constexpr size_t kIdx5 = 5; -constexpr size_t kIdx6 = 6; -constexpr size_t kIdx8 = 8; -constexpr size_t kIdx2 = 2; -constexpr size_t kIdx4 = 4; -constexpr size_t kIdx9 = 9; -constexpr size_t kIdx16 = 16; -constexpr size_t kIdx10 = 10; -constexpr size_t kIdx7 = 7; -constexpr size_t kIdx14 = 14; -constexpr size_t kIdx12 = 12; -constexpr size_t kIdx11 = 11; -constexpr size_t kIdx13 = 13; -constexpr size_t kIdx18 = 18; -constexpr size_t kIdx15 = 15; -constexpr size_t kIdx17 = 17; - -template -class NeighborListUpdateGpuKernelMod : public DeprecatedNativeGpuKernelMod { - public: - NeighborListUpdateGpuKernelMod() - : skin(kSkinDef), - cutoff(kCutoffDef), - max_atom_in_grid_numbers(kMaxAtomInGridNumDef), - max_neighbor_numbers(kMaxNbNumDef) {} - ~NeighborListUpdateGpuKernelMod() override = default; - bool Init(const CNodePtr &kernel_node) override { - kernel_node_ = kernel_node; - grid_numbers = static_cast(GetAttr(kernel_node, "grid_numbers")); - atom_numbers = static_cast(GetAttr(kernel_node, "atom_numbers")); - refresh_interval = static_cast(GetAttr(kernel_node, "refresh_interval")); - not_first_time = static_cast(GetAttr(kernel_node, "not_first_time")); - nxy = static_cast(GetAttr(kernel_node, "nxy")); - excluded_atom_numbers = static_cast(GetAttr(kernel_node, "excluded_atom_numbers")); - - cutoff_square = static_cast(GetAttr(kernel_node, "cutoff_square")); - half_skin_square = static_cast(GetAttr(kernel_node, "half_skin_square")); - cutoff_with_skin = static_cast(GetAttr(kernel_node, "cutoff_with_skin")); - half_cutoff_with_skin = static_cast(GetAttr(kernel_node, "half_cutoff_with_skin")); - cutoff_with_skin_square = static_cast(GetAttr(kernel_node, "cutoff_with_skin_square")); - h_bucket.resize(grid_numbers); - h_gpointer.resize(grid_numbers); - InitSizeLists(); - return true; - } - - bool Launch(const std::vector &inputs, const std::vector &workspaces, - const std::vector &outputs, void *stream_ptr) override { - auto atom_numbers_in_grid_bucket = GetDeviceAddress(inputs, 0); - auto bucket = GetDeviceAddress(inputs, 1); - auto crd = GetDeviceAddress(inputs, kIdx2); - auto box_length = GetDeviceAddress(inputs, kIdx3); - auto grid_n = GetDeviceAddress(inputs, kIdx4); - auto grid_length_inverse = GetDeviceAddress(inputs, kIdx5); - auto atom_in_grid_serial = GetDeviceAddress(inputs, kIdx6); - auto old_crd = GetDeviceAddress(inputs, kIdx7); - auto crd_to_uint_crd_cof = GetDeviceAddress(inputs, kIdx8); - auto uint_crd = GetDeviceAddress(inputs, kIdx9); - auto gpointer = GetDeviceAddress(inputs, kIdx10); - auto nl_atom_numbers = GetDeviceAddress(inputs, kIdx11); - auto nl_atom_serial = GetDeviceAddress(inputs, kIdx12); - auto uint_dr_to_dr_cof = GetDeviceAddress(inputs, kIdx13); - auto excluded_list_start = GetDeviceAddress(inputs, kIdx14); - auto excluded_list = GetDeviceAddress(inputs, kIdx15); - auto excluded_numbers = GetDeviceAddress(inputs, kIdx16); - auto need_refresh_flag = GetDeviceAddress(inputs, kIdx17); - auto d_refresh_count = GetDeviceAddress(inputs, kIdx18); - - GRID_BUCKET *d_bucket = reinterpret_cast(GetDeviceAddress(workspaces, 0)); - GRID_POINTER *d_gpointer = reinterpret_cast(GetDeviceAddress(workspaces, 1)); - NEIGHBOR_LIST *nl = GetDeviceAddress(workspaces, kIdx2); - float *half_crd_to_uint_crd_cof = GetDeviceAddress(workspaces, kIdx3); - - for (size_t i = 0; i < h_bucket.size(); i += 1) { - h_bucket[i].atom_serial = bucket + i * max_atom_in_grid_numbers; - } - const size_t kGridSize = 125; - for (size_t i = 0; i < h_gpointer.size(); i += 1) { - h_gpointer[i].grid_serial = gpointer + i * kGridSize; - } - - cudaMemcpyAsync(d_bucket, h_bucket.data(), sizeof(GRID_BUCKET) * grid_numbers, cudaMemcpyHostToDevice, - reinterpret_cast(stream_ptr)); - cudaMemcpyAsync(d_gpointer, h_gpointer.data(), sizeof(GRID_POINTER) * grid_numbers, cudaMemcpyHostToDevice, - reinterpret_cast(stream_ptr)); - ConstructNeighborListHalf(atom_numbers, max_neighbor_numbers, nl_atom_numbers, nl_atom_serial, nl, - reinterpret_cast(stream_ptr)); - NeighborListUpdate(grid_numbers, atom_numbers, d_refresh_count, refresh_interval, not_first_time, skin, nxy, - cutoff_square, cutoff_with_skin_square, grid_n, box_length, atom_numbers_in_grid_bucket, - grid_length_inverse, atom_in_grid_serial, d_bucket, crd, old_crd, crd_to_uint_crd_cof, - half_crd_to_uint_crd_cof, uint_crd, uint_dr_to_dr_cof, d_gpointer, nl, excluded_list_start, - excluded_list, excluded_numbers, half_skin_square, need_refresh_flag, - reinterpret_cast(stream_ptr)); - CopyNeighborListHalf(atom_numbers, nl, nl_atom_numbers, reinterpret_cast(stream_ptr)); - return true; - } - - protected: - void InitSizeLists() override { - input_size_list_.push_back(sizeof(int) * grid_numbers); - input_size_list_.push_back(sizeof(int) * max_atom_in_grid_numbers * grid_numbers); - input_size_list_.push_back(sizeof(VECTOR) * atom_numbers); - input_size_list_.push_back(sizeof(VECTOR)); - - input_size_list_.push_back(sizeof(INT_VECTOR)); - input_size_list_.push_back(sizeof(VECTOR)); - input_size_list_.push_back(sizeof(int) * atom_numbers); - - input_size_list_.push_back(sizeof(VECTOR) * atom_numbers); - input_size_list_.push_back(sizeof(VECTOR)); - input_size_list_.push_back(sizeof(UNSIGNED_INT_VECTOR) * atom_numbers); - - const size_t kGridSize = 125; - input_size_list_.push_back(sizeof(int) * grid_numbers * kGridSize); - input_size_list_.push_back(sizeof(int) * atom_numbers); - input_size_list_.push_back(sizeof(int) * atom_numbers * max_neighbor_numbers); - input_size_list_.push_back(sizeof(VECTOR)); - - input_size_list_.push_back(sizeof(int) * atom_numbers); - input_size_list_.push_back(sizeof(int) * excluded_atom_numbers); - input_size_list_.push_back(sizeof(int) * atom_numbers); - - input_size_list_.push_back(sizeof(int)); - input_size_list_.push_back(sizeof(int)); - - workspace_size_list_.push_back(sizeof(GRID_BUCKET) * grid_numbers); - workspace_size_list_.push_back(sizeof(GRID_POINTER) * grid_numbers); - workspace_size_list_.push_back(sizeof(NEIGHBOR_LIST) * atom_numbers); - const size_t kCrdSize = 3; - workspace_size_list_.push_back(sizeof(float) * kCrdSize); - - output_size_list_.push_back(sizeof(float)); - } - - private: - float skin; - float cutoff; - int not_first_time; - int grid_numbers; - int refresh_interval; - int atom_numbers; - int nxy; - int max_atom_in_grid_numbers; - int max_neighbor_numbers; - int excluded_atom_numbers; - float half_skin_square; - float cutoff_square; - float cutoff_with_skin; - float half_cutoff_with_skin; - float cutoff_with_skin_square; - - std::vector h_bucket; - std::vector h_gpointer; -}; -} // namespace kernel -} // namespace mindspore - -#endif diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/neighbor_list/neighbor_list_update_new_kernel.cc b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/neighbor_list/neighbor_list_update_new_kernel.cc deleted file mode 100644 index a224be13136..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/neighbor_list/neighbor_list_update_new_kernel.cc +++ /dev/null @@ -1,49 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/** - * Note: - * NeighborListRefresh. This is an experimental interface that is subject to change and/or deletion. - */ - -#include "plugin/device/gpu/kernel/sponge/neighbor_list/neighbor_list_update_new_kernel.h" - -namespace mindspore { -namespace kernel { -MS_REG_GPU_KERNEL_TWO(NeighborListRefresh, - KernelAttr() - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeUInt32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeInt32) - .AddOutputAttr(kNumberTypeFloat32), - NeighborListUpdateNewGpuKernelMod, int, float) -} // namespace kernel -} // namespace mindspore diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/neighbor_list/neighbor_list_update_new_kernel.h b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/neighbor_list/neighbor_list_update_new_kernel.h deleted file mode 100644 index bbfdbb671e5..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/neighbor_list/neighbor_list_update_new_kernel.h +++ /dev/null @@ -1,203 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/** - * Note: - * NeighborListRefresh. This is an experimental interface that is subject to change and/or deletion. - */ - -#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONGE_NEIGHBOR_LIST_UPDATE_NEW_KERNEL_H_ -#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONGE_NEIGHBOR_LIST_UPDATE_NEW_KERNEL_H_ - -#include -#include -#include -#include "plugin/device/gpu/kernel/gpu_kernel.h" -#include "plugin/device/gpu/kernel/gpu_kernel_factory.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" -#include "plugin/device/gpu/kernel/cuda_impl/sponge/neighbor_list/neighbor_list_impl.cuh" - -namespace mindspore { -namespace kernel { -constexpr float kSkinDef = 2.0; -constexpr float kCutoffDef = 9.0; -constexpr int kMaxAtomInGridNumDef = 64; -constexpr int kMaxNbNumDef = 800; - -constexpr size_t kIdx2 = 2; -constexpr size_t kIdx3 = 3; -constexpr size_t kIdx4 = 4; -constexpr size_t kIdx5 = 5; -constexpr size_t kIdx6 = 6; -constexpr size_t kIdx7 = 7; -constexpr size_t kIdx8 = 8; -constexpr size_t kIdx9 = 9; -constexpr size_t kIdx10 = 10; -constexpr size_t kIdx11 = 11; -constexpr size_t kIdx12 = 12; -constexpr size_t kIdx13 = 13; -constexpr size_t kIdx14 = 14; -constexpr size_t kIdx15 = 15; -constexpr size_t kIdx16 = 16; -constexpr size_t kIdx17 = 17; -constexpr size_t kIdx18 = 18; - -template -class NeighborListUpdateNewGpuKernelMod : public DeprecatedNativeGpuKernelMod { - public: - NeighborListUpdateNewGpuKernelMod() - : skin(kSkinDef), - cutoff(kCutoffDef), - max_atom_in_grid_numbers(kMaxAtomInGridNumDef), - max_neighbor_numbers(kMaxNbNumDef) {} - ~NeighborListUpdateNewGpuKernelMod() override = default; - bool Init(const CNodePtr &kernel_node) override { - kernel_node_ = kernel_node; - grid_numbers = static_cast(GetAttr(kernel_node, "grid_numbers")); - atom_numbers = static_cast(GetAttr(kernel_node, "atom_numbers")); - refresh_interval = static_cast(GetAttr(kernel_node, "refresh_interval")); - not_first_time = static_cast(GetAttr(kernel_node, "not_first_time")); - nxy = static_cast(GetAttr(kernel_node, "nxy")); - excluded_atom_numbers = static_cast(GetAttr(kernel_node, "excluded_atom_numbers")); - - cutoff_square = static_cast(GetAttr(kernel_node, "cutoff_square")); - half_skin_square = static_cast(GetAttr(kernel_node, "half_skin_square")); - cutoff_with_skin = static_cast(GetAttr(kernel_node, "cutoff_with_skin")); - half_cutoff_with_skin = static_cast(GetAttr(kernel_node, "half_cutoff_with_skin")); - cutoff_with_skin_square = static_cast(GetAttr(kernel_node, "cutoff_with_skin_square")); - forced_update = static_cast(GetAttr(kernel_node, "forced_update")); - forced_check = static_cast(GetAttr(kernel_node, "forced_check")); - h_bucket.resize(grid_numbers); - h_gpointer.resize(grid_numbers); - InitSizeLists(); - return true; - } - - bool Launch(const std::vector &inputs, const std::vector &workspaces, - const std::vector &outputs, void *stream_ptr) override { - auto atom_numbers_in_grid_bucket = GetDeviceAddress(inputs, 0); - auto bucket = GetDeviceAddress(inputs, 1); - auto crd = GetDeviceAddress(inputs, kIdx2); - auto box_length = GetDeviceAddress(inputs, kIdx3); - auto grid_n = GetDeviceAddress(inputs, kIdx4); - auto grid_length_inverse = GetDeviceAddress(inputs, kIdx5); - auto atom_in_grid_serial = GetDeviceAddress(inputs, kIdx6); - auto old_crd = GetDeviceAddress(inputs, kIdx7); - auto crd_to_uint_crd_cof = GetDeviceAddress(inputs, kIdx8); - auto uint_crd = GetDeviceAddress(inputs, kIdx9); - auto gpointer = GetDeviceAddress(inputs, kIdx10); - auto nl_atom_numbers = GetDeviceAddress(inputs, kIdx11); - auto nl_atom_serial = GetDeviceAddress(inputs, kIdx12); - auto uint_dr_to_dr_cof = GetDeviceAddress(inputs, kIdx13); - auto excluded_list_start = GetDeviceAddress(inputs, kIdx14); - auto excluded_list = GetDeviceAddress(inputs, kIdx15); - auto excluded_numbers = GetDeviceAddress(inputs, kIdx16); - auto need_refresh_flag = GetDeviceAddress(inputs, kIdx17); - auto d_refresh_count = GetDeviceAddress(inputs, kIdx18); - - GRID_BUCKET *d_bucket = reinterpret_cast(GetDeviceAddress(workspaces, 0)); - GRID_POINTER *d_gpointer = reinterpret_cast(GetDeviceAddress(workspaces, 1)); - NEIGHBOR_LIST *nl = GetDeviceAddress(workspaces, kIdx2); - float *half_crd_to_uint_crd_cof = GetDeviceAddress(workspaces, kIdx3); - - for (size_t i = 0; i < h_bucket.size(); i += 1) { - h_bucket[i].atom_serial = bucket + i * max_atom_in_grid_numbers; - } - const int kGridSize = 125; - for (size_t i = 0; i < h_gpointer.size(); i += 1) { - h_gpointer[i].grid_serial = gpointer + i * kGridSize; - } - - cudaMemcpyAsync(d_bucket, h_bucket.data(), sizeof(GRID_BUCKET) * grid_numbers, cudaMemcpyHostToDevice, - reinterpret_cast(stream_ptr)); - cudaMemcpyAsync(d_gpointer, h_gpointer.data(), sizeof(GRID_POINTER) * grid_numbers, cudaMemcpyHostToDevice, - reinterpret_cast(stream_ptr)); - ConstructNeighborList(atom_numbers, max_neighbor_numbers, nl_atom_numbers, nl_atom_serial, nl, - reinterpret_cast(stream_ptr)); - - NeighborListRefresh(grid_numbers, atom_numbers, d_refresh_count, refresh_interval, not_first_time, skin, nxy, - cutoff_square, cutoff_with_skin_square, grid_n, box_length, atom_numbers_in_grid_bucket, - grid_length_inverse, atom_in_grid_serial, d_bucket, crd, old_crd, crd_to_uint_crd_cof, - half_crd_to_uint_crd_cof, uint_crd, uint_dr_to_dr_cof, d_gpointer, nl, excluded_list_start, - excluded_list, excluded_numbers, half_skin_square, need_refresh_flag, forced_update, - forced_check, reinterpret_cast(stream_ptr)); - CopyNeighborList(atom_numbers, max_neighbor_numbers, nl, nl_atom_numbers, nl_atom_serial, - reinterpret_cast(stream_ptr)); - return true; - } - - protected: - void InitSizeLists() override { - input_size_list_.push_back(sizeof(int) * grid_numbers); - input_size_list_.push_back(sizeof(int) * max_atom_in_grid_numbers * grid_numbers); - input_size_list_.push_back(sizeof(VECTOR) * atom_numbers); - input_size_list_.push_back(sizeof(VECTOR)); - - input_size_list_.push_back(sizeof(INT_VECTOR)); - input_size_list_.push_back(sizeof(VECTOR)); - input_size_list_.push_back(sizeof(int) * atom_numbers); - - input_size_list_.push_back(sizeof(VECTOR) * atom_numbers); - input_size_list_.push_back(sizeof(VECTOR)); - input_size_list_.push_back(sizeof(UNSIGNED_INT_VECTOR) * atom_numbers); - - const size_t kGridSize = 125; - input_size_list_.push_back(sizeof(int) * grid_numbers * kGridSize); - input_size_list_.push_back(sizeof(int) * atom_numbers); - input_size_list_.push_back(sizeof(int) * atom_numbers * max_neighbor_numbers); - input_size_list_.push_back(sizeof(VECTOR)); - - input_size_list_.push_back(sizeof(int) * atom_numbers); - input_size_list_.push_back(sizeof(int) * excluded_atom_numbers); - input_size_list_.push_back(sizeof(int) * atom_numbers); - - input_size_list_.push_back(sizeof(int)); - input_size_list_.push_back(sizeof(int)); - - workspace_size_list_.push_back(sizeof(GRID_BUCKET) * grid_numbers); - workspace_size_list_.push_back(sizeof(GRID_POINTER) * grid_numbers); - workspace_size_list_.push_back(sizeof(NEIGHBOR_LIST) * atom_numbers); - const size_t kCrtSize = 3; - workspace_size_list_.push_back(sizeof(float) * kCrtSize); - - output_size_list_.push_back(sizeof(float)); - } - - private: - float skin; - float cutoff; - int not_first_time; - int atom_numbers; - int grid_numbers; - int refresh_interval; - int nxy; - int max_atom_in_grid_numbers; - int max_neighbor_numbers; - int excluded_atom_numbers; - float half_skin_square; - float cutoff_square; - float cutoff_with_skin; - float half_cutoff_with_skin; - float cutoff_with_skin_square; - int forced_update; - int forced_check; - - std::vector h_bucket; - std::vector h_gpointer; -}; -} // namespace kernel -} // namespace mindspore - -#endif diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/nvtit/md_iteration_gradient_descent_kernel.cc b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/nvtit/md_iteration_gradient_descent_kernel.cc deleted file mode 100644 index 4bcfb32bf3f..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/nvtit/md_iteration_gradient_descent_kernel.cc +++ /dev/null @@ -1,26 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "plugin/device/gpu/kernel/sponge/nvtit/md_iteration_gradient_descent_kernel.h" - -namespace mindspore { -namespace kernel { -MS_REG_GPU_KERNEL_ONE( - MDIterationGradientDescent, - KernelAttr().AddInputAttr(kNumberTypeFloat32).AddInputAttr(kNumberTypeFloat32).AddOutputAttr(kNumberTypeFloat32), - MDIterationGradientDescentGpuKernelMod, float) -} // namespace kernel -} // namespace mindspore diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/nvtit/md_iteration_gradient_descent_kernel.h b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/nvtit/md_iteration_gradient_descent_kernel.h deleted file mode 100644 index 08cf221881f..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/nvtit/md_iteration_gradient_descent_kernel.h +++ /dev/null @@ -1,70 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_MD_ITERATION_GRADIENT_DESCENT_KERNEL_H_ -#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_MD_ITERATION_GRADIENT_DESCENT_KERNEL_H_ - -#include "plugin/device/gpu/kernel/cuda_impl/sponge/nvtit/md_iteration_gradient_descent_impl.cuh" -#include -#include -#include -#include - -#include "plugin/device/gpu/kernel/gpu_kernel.h" -#include "plugin/device/gpu/kernel/gpu_kernel_factory.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" - -namespace mindspore { -namespace kernel { -template -class MDIterationGradientDescentGpuKernelMod : public DeprecatedNativeGpuKernelMod { - public: - MDIterationGradientDescentGpuKernelMod() {} - ~MDIterationGradientDescentGpuKernelMod() override = default; - - bool Init(const CNodePtr &kernel_node) override { - // get bond_numbers - kernel_node_ = kernel_node; - atom_numbers = static_cast(GetAttr(kernel_node, "atom_numbers")); - learning_rate = static_cast(GetAttr(kernel_node, "learning_rate")); - InitSizeLists(); - return true; - } - - bool Launch(const std::vector &inputs, const std::vector &workspace, - const std::vector &outputs, void *stream_ptr) override { - auto crd = GetDeviceAddress(inputs, 0); - auto frc = GetDeviceAddress(inputs, 1); - - MDIterationGradientDescent(atom_numbers, crd, frc, learning_rate, reinterpret_cast(stream_ptr)); - return true; - } - - protected: - void InitSizeLists() override { - input_size_list_.push_back(atom_numbers * 3 * sizeof(T)); - input_size_list_.push_back(atom_numbers * 3 * sizeof(T)); - - output_size_list_.push_back(sizeof(T)); - } - - private: - int atom_numbers; - float learning_rate; -}; -} // namespace kernel -} // namespace mindspore -#endif diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/nvtit/md_iteration_leap_frog_kernel.cc b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/nvtit/md_iteration_leap_frog_kernel.cc deleted file mode 100644 index 422e18a285a..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/nvtit/md_iteration_leap_frog_kernel.cc +++ /dev/null @@ -1,31 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "plugin/device/gpu/kernel/sponge/nvtit/md_iteration_leap_frog_kernel.h" - -namespace mindspore { -namespace kernel { -MS_REG_GPU_KERNEL_ONE(MDIterationLeapFrog, - KernelAttr() - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeFloat32) - .AddOutputAttr(kNumberTypeFloat32), - MDIterationLeapFrogGpuKernelMod, float) -} // namespace kernel -} // namespace mindspore diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/nvtit/md_iteration_leap_frog_kernel.h b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/nvtit/md_iteration_leap_frog_kernel.h deleted file mode 100644 index ff694288559..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/nvtit/md_iteration_leap_frog_kernel.h +++ /dev/null @@ -1,79 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/** - * Note: - * MDIterationLeapFrog. This is an experimental interface that is subject to change and/or deletion. - */ - -#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_MD_ITERATION_LEAP_FROG_KERNEL_H_ -#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_MD_ITERATION_LEAP_FROG_KERNEL_H_ - -#include -#include -#include -#include - -#include "plugin/device/gpu/kernel/gpu_kernel.h" -#include "plugin/device/gpu/kernel/gpu_kernel_factory.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" -#include "plugin/device/gpu/kernel/cuda_impl/sponge/nvtit/md_iteration_leap_frog_impl.cuh" - -namespace mindspore { -namespace kernel { -template -class MDIterationLeapFrogGpuKernelMod : public DeprecatedNativeGpuKernelMod { - public: - MDIterationLeapFrogGpuKernelMod() {} - ~MDIterationLeapFrogGpuKernelMod() override = default; - - bool Init(const CNodePtr &kernel_node) override { - kernel_node_ = kernel_node; - atom_numbers = static_cast(GetAttr(kernel_node, "atom_numbers")); - dt = static_cast(GetAttr(kernel_node, "dt")); - InitSizeLists(); - return true; - } - - bool Launch(const std::vector &inputs, const std::vector &workspace, - const std::vector &outputs, void *stream_ptr) override { - auto vel = GetDeviceAddress(inputs, 0); - auto crd = GetDeviceAddress(inputs, 1); - auto frc = GetDeviceAddress(inputs, 2); - auto acc = GetDeviceAddress(inputs, 3); - auto inverse_mass = GetDeviceAddress(inputs, 4); - - MDIterationLeapFrog(atom_numbers, vel, crd, frc, acc, inverse_mass, dt, reinterpret_cast(stream_ptr)); - return true; - } - - protected: - void InitSizeLists() override { - input_size_list_.push_back(atom_numbers * 3 * sizeof(T)); - input_size_list_.push_back(atom_numbers * 3 * sizeof(T)); - input_size_list_.push_back(atom_numbers * 3 * sizeof(T)); - input_size_list_.push_back(atom_numbers * 3 * sizeof(T)); - input_size_list_.push_back(atom_numbers * sizeof(T)); - - output_size_list_.push_back(sizeof(T)); - } - - private: - int atom_numbers; - float dt; -}; -} // namespace kernel -} // namespace mindspore -#endif diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/nvtit/md_iteration_leap_frog_liujian_gpu_kernel.cc b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/nvtit/md_iteration_leap_frog_liujian_gpu_kernel.cc deleted file mode 100644 index 327bb97a5ab..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/nvtit/md_iteration_leap_frog_liujian_gpu_kernel.cc +++ /dev/null @@ -1,34 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "plugin/device/gpu/kernel/sponge/nvtit/md_iteration_leap_frog_liujian_gpu_kernel.h" - -namespace mindspore { -namespace kernel { -MS_REG_GPU_KERNEL_TWO(MDIterationLeapFrogLiujian, - KernelAttr() - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeFloat32) - .AddOutputAttr(kNumberTypeFloat32), - MDIterationLeapFrogLiujianCudaGpuKernelMod, float, int) -} // namespace kernel -} // namespace mindspore diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/nvtit/md_iteration_leap_frog_liujian_gpu_kernel.h b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/nvtit/md_iteration_leap_frog_liujian_gpu_kernel.h deleted file mode 100644 index ed2bebf05c7..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/nvtit/md_iteration_leap_frog_liujian_gpu_kernel.h +++ /dev/null @@ -1,104 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_MD_ITERATION_LEAP_FROG_LIUJIAN_GPU_KERNEL_H_ -#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_MD_ITERATION_LEAP_FROG_LIUJIAN_GPU_KERNEL_H_ - -#include "plugin/device/gpu/kernel/cuda_impl/sponge/nvtit/md_iteration_leap_frog_liujian_gpu_impl.cuh" -#include -#include -#include -#include - -#include "plugin/device/gpu/kernel/gpu_kernel_factory.h" -#include "plugin/device/gpu/kernel/gpu_kernel.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" - -namespace mindspore { -namespace kernel { -constexpr size_t kIdx5 = 5; -constexpr size_t kIdx4 = 4; -constexpr size_t kIdx2 = 2; -constexpr size_t kIdx6 = 6; -constexpr size_t kIdx7 = 7; -constexpr size_t kIdx3 = 3; - -template -class MDIterationLeapFrogLiujianCudaGpuKernelMod : public DeprecatedNativeGpuKernelMod { - public: - MDIterationLeapFrogLiujianCudaGpuKernelMod() {} - ~MDIterationLeapFrogLiujianCudaGpuKernelMod() override = default; - - bool Init(const CNodePtr &kernel_node) override { - // get bond_numbers - kernel_node_ = kernel_node; - half_dt = static_cast(GetAttr(kernel_node, "half_dt")); - atom_numbers = static_cast(GetAttr(kernel_node, "atom_numbers")); - exp_gamma = static_cast(GetAttr(kernel_node, "exp_gamma")); - dt = static_cast(GetAttr(kernel_node, "dt")); - const double kCoef1 = 3.; - const double kCoef2 = 4.; - float4_numbers = ceil(kCoef1 * static_cast(atom_numbers) / kCoef2); - InitSizeLists(); - return true; - } - - bool Launch(const std::vector &inputs, const std::vector &workspace, - const std::vector &outputs, void *stream_ptr) override { - auto inverse_mass = GetDeviceAddress(inputs, 0); - auto sqrt_mass_inverse = GetDeviceAddress(inputs, 1); - auto vel = GetDeviceAddress(inputs, kIdx2); - auto frc = GetDeviceAddress(inputs, kIdx4); - auto crd = GetDeviceAddress(inputs, kIdx3); - auto acc = GetDeviceAddress(inputs, kIdx5); - auto rand_frc = GetDeviceAddress(inputs, kIdx7); - auto rand_state = GetDeviceAddress(inputs, kIdx6); - - auto output = GetDeviceAddress(outputs, 0); - - MD_Iteration_Leap_Frog_With_LiuJian(atom_numbers, half_dt, dt, exp_gamma, float4_numbers, inverse_mass, - sqrt_mass_inverse, vel, crd, frc, acc, - reinterpret_cast(rand_state), rand_frc, output, - reinterpret_cast(stream_ptr)); - return true; - } - - protected: - void InitSizeLists() override { - const int kAtomSize = 3; - - input_size_list_.push_back(atom_numbers * sizeof(float)); - input_size_list_.push_back(atom_numbers * sizeof(float)); - input_size_list_.push_back(atom_numbers * kAtomSize * sizeof(float)); - input_size_list_.push_back(atom_numbers * kAtomSize * sizeof(float)); - input_size_list_.push_back(atom_numbers * kAtomSize * sizeof(float)); - input_size_list_.push_back(atom_numbers * kAtomSize * sizeof(float)); - input_size_list_.push_back(float4_numbers * sizeof(curandStatePhilox4_32_10_t)); - input_size_list_.push_back(atom_numbers * kAtomSize * sizeof(float)); - - output_size_list_.push_back(atom_numbers * kAtomSize * sizeof(T)); - } - - private: - float half_dt; - float exp_gamma; - float dt; - int atom_numbers; - int float4_numbers; -}; -} // namespace kernel -} // namespace mindspore -#endif diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/nvtit/md_iteration_leap_frog_liujian_gpu_with_max_vel_kernel.cc b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/nvtit/md_iteration_leap_frog_liujian_gpu_with_max_vel_kernel.cc deleted file mode 100644 index 8931ab5793d..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/nvtit/md_iteration_leap_frog_liujian_gpu_with_max_vel_kernel.cc +++ /dev/null @@ -1,34 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "plugin/device/gpu/kernel/sponge/nvtit/md_iteration_leap_frog_liujian_gpu_with_max_vel_kernel.h" - -namespace mindspore { -namespace kernel { -MS_REG_GPU_KERNEL_TWO(MDIterationLeapFrogLiujianWithMaxVel, - KernelAttr() - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeFloat32) - .AddOutputAttr(kNumberTypeFloat32), - MDIterationLeapFrogLiujianMaxVelCudaGpuKernelMod, float, int) -} // namespace kernel -} // namespace mindspore diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/nvtit/md_iteration_leap_frog_liujian_gpu_with_max_vel_kernel.h b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/nvtit/md_iteration_leap_frog_liujian_gpu_with_max_vel_kernel.h deleted file mode 100644 index ac511bb13c7..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/nvtit/md_iteration_leap_frog_liujian_gpu_with_max_vel_kernel.h +++ /dev/null @@ -1,105 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONG_MD_ITERATION_LEAP_FROG_LIUJIAN_GPU_WITH_MAX_VEL_KERNEL_H_ -#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONG_MD_ITERATION_LEAP_FROG_LIUJIAN_GPU_WITH_MAX_VEL_KERNEL_H_ - -#include "plugin/device/gpu/kernel/cuda_impl/sponge/nvtit/md_iteration_leap_frog_liujian_with_max_vel_impl.cuh" -#include -#include -#include -#include - -#include "plugin/device/gpu/kernel/gpu_kernel.h" -#include "plugin/device/gpu/kernel/gpu_kernel_factory.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" - -namespace mindspore { -namespace kernel { -constexpr size_t kIdx2 = 2; -constexpr size_t kIdx3 = 3; -constexpr size_t kIdx4 = 4; -constexpr size_t kIdx5 = 5; -constexpr size_t kIdx6 = 6; -constexpr size_t kIdx7 = 7; -template -class MDIterationLeapFrogLiujianMaxVelCudaGpuKernelMod : public DeprecatedNativeGpuKernelMod { - public: - MDIterationLeapFrogLiujianMaxVelCudaGpuKernelMod() {} - ~MDIterationLeapFrogLiujianMaxVelCudaGpuKernelMod() override = default; - - bool Init(const CNodePtr &kernel_node) override { - // get bond_numbers - kernel_node_ = kernel_node; - atom_numbers = static_cast(GetAttr(kernel_node, "atom_numbers")); - half_dt = static_cast(GetAttr(kernel_node, "half_dt")); - dt = static_cast(GetAttr(kernel_node, "dt")); - exp_gamma = static_cast(GetAttr(kernel_node, "exp_gamma")); - const double kCoef1 = 3.; - const double kCoef2 = 4.; - float4_numbers = ceil(kCoef1 * static_cast(atom_numbers) / kCoef2); - max_vel = static_cast(GetAttr(kernel_node, "max_vel")); - InitSizeLists(); - return true; - } - - bool Launch(const std::vector &inputs, const std::vector &workspace, - const std::vector &outputs, void *stream_ptr) override { - auto inverse_mass = GetDeviceAddress(inputs, 0); - auto sqrt_mass_inverse = GetDeviceAddress(inputs, 1); - auto vel = GetDeviceAddress(inputs, kIdx2); - auto crd = GetDeviceAddress(inputs, kIdx3); - auto frc = GetDeviceAddress(inputs, kIdx4); - auto acc = GetDeviceAddress(inputs, kIdx5); - auto rand_state = GetDeviceAddress(inputs, kIdx6); - auto rand_frc = GetDeviceAddress(inputs, kIdx7); - - auto output = GetDeviceAddress(outputs, 0); - - MD_Iteration_Leap_Frog_With_LiuJian_With_Max_Vel( - atom_numbers, half_dt, dt, exp_gamma, float4_numbers, inverse_mass, sqrt_mass_inverse, vel, crd, frc, acc, - reinterpret_cast(rand_state), rand_frc, output, max_vel, - reinterpret_cast(stream_ptr)); - return true; - } - - protected: - void InitSizeLists() override { - const int kAtomSize = 3; - - input_size_list_.push_back(atom_numbers * sizeof(float)); - input_size_list_.push_back(atom_numbers * sizeof(float)); - input_size_list_.push_back(atom_numbers * kAtomSize * sizeof(float)); - input_size_list_.push_back(atom_numbers * kAtomSize * sizeof(float)); - input_size_list_.push_back(atom_numbers * kAtomSize * sizeof(float)); - input_size_list_.push_back(atom_numbers * kAtomSize * sizeof(float)); - input_size_list_.push_back(float4_numbers * sizeof(curandStatePhilox4_32_10_t)); - input_size_list_.push_back(atom_numbers * kAtomSize * sizeof(float)); - - output_size_list_.push_back(atom_numbers * kAtomSize * sizeof(T)); - } - - private: - int atom_numbers; - float half_dt; - float dt; - float exp_gamma; - int float4_numbers; - float max_vel; -}; -} // namespace kernel -} // namespace mindspore -#endif diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/nvtit/md_iteration_leap_frog_with_max_vel_kernel.cc b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/nvtit/md_iteration_leap_frog_with_max_vel_kernel.cc deleted file mode 100644 index 4ab86af2906..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/nvtit/md_iteration_leap_frog_with_max_vel_kernel.cc +++ /dev/null @@ -1,31 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "plugin/device/gpu/kernel/sponge/nvtit/md_iteration_leap_frog_with_max_vel_kernel.h" - -namespace mindspore { -namespace kernel { -MS_REG_GPU_KERNEL_ONE(MDIterationLeapFrogWithMaxVel, - KernelAttr() - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeFloat32) - .AddOutputAttr(kNumberTypeFloat32), - MDIterationLeapFrogWithMaxVelGpuKernelMod, float) -} // namespace kernel -} // namespace mindspore diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/nvtit/md_iteration_leap_frog_with_max_vel_kernel.h b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/nvtit/md_iteration_leap_frog_with_max_vel_kernel.h deleted file mode 100644 index cca0bd568da..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/nvtit/md_iteration_leap_frog_with_max_vel_kernel.h +++ /dev/null @@ -1,79 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_MD_ITERATION_LEAP_FROG_WITH_MAX_VEL_KERNEL_H_ -#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_MD_ITERATION_LEAP_FROG_WITH_MAX_VEL_KERNEL_H_ - -#include "plugin/device/gpu/kernel/cuda_impl/sponge/nvtit/md_iteration_leap_frog_with_max_vel_impl.cuh" -#include -#include -#include -#include - -#include "plugin/device/gpu/kernel/gpu_kernel.h" -#include "plugin/device/gpu/kernel/gpu_kernel_factory.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" - -namespace mindspore { -namespace kernel { -template -class MDIterationLeapFrogWithMaxVelGpuKernelMod : public DeprecatedNativeGpuKernelMod { - public: - MDIterationLeapFrogWithMaxVelGpuKernelMod() {} - ~MDIterationLeapFrogWithMaxVelGpuKernelMod() override = default; - - bool Init(const CNodePtr &kernel_node) override { - // get bond_numbers - kernel_node_ = kernel_node; - atom_numbers = static_cast(GetAttr(kernel_node, "atom_numbers")); - dt = static_cast(GetAttr(kernel_node, "dt")); - max_velocity = static_cast(GetAttr(kernel_node, "max_velocity")); - InitSizeLists(); - return true; - } - - bool Launch(const std::vector &inputs, const std::vector &workspace, - const std::vector &outputs, void *stream_ptr) override { - auto vel = GetDeviceAddress(inputs, 0); - auto crd = GetDeviceAddress(inputs, 1); - auto frc = GetDeviceAddress(inputs, 2); - auto acc = GetDeviceAddress(inputs, 3); - auto inverse_mass = GetDeviceAddress(inputs, 4); - - MDIterationLeapFrogWithMaxVelocity(atom_numbers, vel, crd, frc, acc, inverse_mass, dt, max_velocity, - reinterpret_cast(stream_ptr)); - return true; - } - - protected: - void InitSizeLists() override { - input_size_list_.push_back(atom_numbers * 3 * sizeof(T)); - input_size_list_.push_back(atom_numbers * 3 * sizeof(T)); - input_size_list_.push_back(atom_numbers * 3 * sizeof(T)); - input_size_list_.push_back(atom_numbers * 3 * sizeof(T)); - input_size_list_.push_back(atom_numbers * sizeof(T)); - - output_size_list_.push_back(sizeof(T)); - } - - private: - int atom_numbers; - float dt; - float max_velocity; -}; -} // namespace kernel -} // namespace mindspore -#endif diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/nvtit/md_iteration_setup_random_state.cc b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/nvtit/md_iteration_setup_random_state.cc deleted file mode 100644 index 0dec3545eb0..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/nvtit/md_iteration_setup_random_state.cc +++ /dev/null @@ -1,24 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "plugin/device/gpu/kernel/sponge/nvtit/md_iteration_setup_random_state.h" - -namespace mindspore { -namespace kernel { -MS_REG_GPU_KERNEL_TWO(MDIterationSetupRandState, KernelAttr().AddOutputAttr(kNumberTypeFloat32), - MDIterationSetupRandStateGpuKernelMod, float, int) -} // namespace kernel -} // namespace mindspore diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/nvtit/md_iteration_setup_random_state.h b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/nvtit/md_iteration_setup_random_state.h deleted file mode 100644 index 8dea9ef84a2..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/nvtit/md_iteration_setup_random_state.h +++ /dev/null @@ -1,67 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_MD_ITERATION_SETUP_RANDOM_STATE_GPU_KERNEL_H_ -#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_MD_ITERATION_SETUP_RANDOM_STATE_GPU_KERNEL_H_ - -#include "plugin/device/gpu/kernel/cuda_impl/sponge/nvtit/md_iteration_setup_random_state_gpu_impl.cuh" -#include -#include -#include -#include - -#include "plugin/device/gpu/kernel/gpu_kernel.h" -#include "plugin/device/gpu/kernel/gpu_kernel_factory.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" - -namespace mindspore { -namespace kernel { -template -class MDIterationSetupRandStateGpuKernelMod : public DeprecatedNativeGpuKernelMod { - public: - MDIterationSetupRandStateGpuKernelMod() {} - ~MDIterationSetupRandStateGpuKernelMod() override = default; - - bool Init(const CNodePtr &kernel_node) override { - // get bond_numbers - kernel_node_ = kernel_node; - atom_numbers = static_cast(GetAttr(kernel_node, "atom_numbers")); - seed = static_cast(GetAttr(kernel_node, "seed")); - float4_numbers = ceil(3. * static_cast(atom_numbers) / 4.); - InitSizeLists(); - return true; - } - - bool Launch(const std::vector &inputs, const std::vector &, - const std::vector &outputs, void *stream_ptr) override { - auto output = GetDeviceAddress(outputs, 0); - curandStatePhilox4_32_10_t *rand_state = reinterpret_cast(output); - MD_Iteration_Setup_Random_State(float4_numbers, rand_state, seed, reinterpret_cast(stream_ptr)); - - return true; - } - - protected: - void InitSizeLists() override { output_size_list_.push_back(sizeof(curandStatePhilox4_32_10_t) * float4_numbers); } - - private: - int atom_numbers; - int seed; - int float4_numbers; -}; -} // namespace kernel -} // namespace mindspore -#endif diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/pme/fft_3d_kernel.cc b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/pme/fft_3d_kernel.cc deleted file mode 100644 index d7d664a2c28..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/pme/fft_3d_kernel.cc +++ /dev/null @@ -1,23 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#include "plugin/device/gpu/kernel/sponge/pme/fft_3d_kernel.h" - -namespace mindspore { -namespace kernel { -MS_REG_GPU_KERNEL_ONE(FFT3D, KernelAttr().AddInputAttr(kNumberTypeFloat32).AddOutputAttr(kNumberTypeComplex64), - FFT3DGpuKernelMod, float) -} // namespace kernel -} // namespace mindspore diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/pme/fft_3d_kernel.h b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/pme/fft_3d_kernel.h deleted file mode 100644 index e861ee2f76b..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/pme/fft_3d_kernel.h +++ /dev/null @@ -1,77 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONGE_PME_FFT_3D_KERNEL_H_ -#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONGE_PME_FFT_3D_KERNEL_H_ -#include -#include -#include -#include -#include -#include "plugin/device/gpu/kernel/gpu_kernel.h" -#include "plugin/device/gpu/kernel/gpu_kernel_factory.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" -#include "plugin/device/gpu/kernel/cuda_impl/sponge/pme/fft_3d_impl.cuh" -namespace mindspore { -namespace kernel { -template -using Complex = mindspore::utils::Complex; -template -class FFT3DGpuKernelMod : public DeprecatedNativeGpuKernelMod { - public: - FFT3DGpuKernelMod() = default; - ~FFT3DGpuKernelMod() override = default; - - bool Init(const CNodePtr &kernel_node) override { - kernel_node_ = kernel_node; - fftx = static_cast(GetAttr(kernel_node, "fftx")); - ffty = static_cast(GetAttr(kernel_node, "ffty")); - fftz = static_cast(GetAttr(kernel_node, "fftz")); - Nall = fftx * ffty * fftz; - Nfft = fftx * ffty * (fftz / 2 + 1); - - cufftPlan3d(&FFT_plan_r2c, fftx, ffty, fftz, CUFFT_R2C); - - InitSizeLists(); - return true; - } - - bool Launch(const std::vector &inputs, const std::vector &workspace, - const std::vector &outputs, void *stream_ptr) override { - auto input_tensor = GetDeviceAddress(inputs, 0); - auto output_tensor = GetDeviceAddress>(outputs, 0); - - cufftSetStream(FFT_plan_r2c, reinterpret_cast(stream_ptr)); - FFT3D(Nfft, input_tensor, output_tensor, FFT_plan_r2c, reinterpret_cast(stream_ptr)); - return true; - } - - protected: - void InitSizeLists() override { - input_size_list_.push_back(Nall * sizeof(T)); - output_size_list_.push_back(Nfft * sizeof(Complex)); - } - - private: - int fftx; - int ffty; - int fftz; - int Nall; - int Nfft; - cufftHandle FFT_plan_r2c; -}; -} // namespace kernel -} // namespace mindspore -#endif diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/pme/ifft_3d_kernel.cc b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/pme/ifft_3d_kernel.cc deleted file mode 100644 index d7c8f064ddb..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/pme/ifft_3d_kernel.cc +++ /dev/null @@ -1,23 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#include "plugin/device/gpu/kernel/sponge/pme/ifft_3d_kernel.h" - -namespace mindspore { -namespace kernel { -MS_REG_GPU_KERNEL_ONE(IFFT3D, KernelAttr().AddInputAttr(kNumberTypeComplex64).AddOutputAttr(kNumberTypeFloat32), - IFFT3DGpuKernelMod, float) -} // namespace kernel -} // namespace mindspore diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/pme/ifft_3d_kernel.h b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/pme/ifft_3d_kernel.h deleted file mode 100644 index d60570fc1a8..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/pme/ifft_3d_kernel.h +++ /dev/null @@ -1,77 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONGE_PME_IFFT_3D_KERNEL_H_ -#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONGE_PME_IFFT_3D_KERNEL_H_ -#include -#include -#include -#include -#include -#include "plugin/device/gpu/kernel/gpu_kernel.h" -#include "plugin/device/gpu/kernel/gpu_kernel_factory.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" -#include "plugin/device/gpu/kernel/cuda_impl/sponge/pme/ifft_3d_impl.cuh" -namespace mindspore { -namespace kernel { -template -using Complex = mindspore::utils::Complex; -template -class IFFT3DGpuKernelMod : public DeprecatedNativeGpuKernelMod { - public: - IFFT3DGpuKernelMod() = default; - ~IFFT3DGpuKernelMod() override = default; - - bool Init(const CNodePtr &kernel_node) override { - kernel_node_ = kernel_node; - fftx = static_cast(GetAttr(kernel_node, "fftx")); - ffty = static_cast(GetAttr(kernel_node, "ffty")); - fftz = static_cast(GetAttr(kernel_node, "fftz")); - Nfft = fftx * ffty * fftz; - Nall = fftx * ffty * (fftz - 1) * 2; - - cufftPlan3d(&FFT_plan_c2r, fftx, ffty, (fftz - 1) * 2, CUFFT_C2R); - - InitSizeLists(); - return true; - } - - bool Launch(const std::vector &inputs, const std::vector &workspace, - const std::vector &outputs, void *stream_ptr) override { - auto input_tensor = GetDeviceAddress>(inputs, 0); - auto output_tensor = GetDeviceAddress(outputs, 0); - - cufftSetStream(FFT_plan_c2r, reinterpret_cast(stream_ptr)); - IFFT3D(Nfft, input_tensor, output_tensor, FFT_plan_c2r, reinterpret_cast(stream_ptr)); - return true; - } - - protected: - void InitSizeLists() override { - input_size_list_.push_back(Nfft * sizeof(Complex)); - output_size_list_.push_back(Nall * sizeof(T)); - } - - private: - int fftx; - int ffty; - int fftz; - int Nall; - int Nfft; - cufftHandle FFT_plan_c2r; -}; -} // namespace kernel -} // namespace mindspore -#endif diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/pme/pme_batched_fft_2d_kernel.cc b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/pme/pme_batched_fft_2d_kernel.cc deleted file mode 100644 index e8991b98f4c..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/pme/pme_batched_fft_2d_kernel.cc +++ /dev/null @@ -1,24 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#include "plugin/device/gpu/kernel/sponge/pme/pme_batched_fft_2d_kernel.h" - -namespace mindspore { -namespace kernel { -MS_REG_GPU_KERNEL_ONE(PMEBatchedFFT2D, - KernelAttr().AddInputAttr(kNumberTypeComplex64).AddOutputAttr(kNumberTypeComplex64), - PMEBatchedFFT2DGpuKernelMod, float) -} // namespace kernel -} // namespace mindspore diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/pme/pme_batched_fft_2d_kernel.h b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/pme/pme_batched_fft_2d_kernel.h deleted file mode 100644 index d4f68f9be7d..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/pme/pme_batched_fft_2d_kernel.h +++ /dev/null @@ -1,82 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONGE_PME_PME_BATCHED_FFT_2D_KERNEL_H_ -#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONGE_PME_PME_BATCHED_FFT_2D_KERNEL_H_ -#include -#include -#include -#include -#include -#include "plugin/device/gpu/kernel/gpu_kernel.h" -#include "plugin/device/gpu/kernel/gpu_kernel_factory.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" -#include "plugin/device/gpu/kernel/cuda_impl/sponge/pme/pme_batched_fft_2d_impl.cuh" -namespace mindspore { -namespace kernel { -template -using Complex = mindspore::utils::Complex; -template -class PMEBatchedFFT2DGpuKernelMod : public DeprecatedNativeGpuKernelMod { - public: - PMEBatchedFFT2DGpuKernelMod() = default; - ~PMEBatchedFFT2DGpuKernelMod() override = default; - - bool Init(const CNodePtr &kernel_node) override { - // direction: forward 1, inverse -1 - direction = static_cast(GetAttr(kernel_node, "direction")); - fftx = static_cast(GetAttr(kernel_node, "fftx")); - ffty = static_cast(GetAttr(kernel_node, "ffty")); - fftz = static_cast(GetAttr(kernel_node, "fftz")); - Nall = fftx * ffty * fftz; - - // config plan - int batch = fftx; - int dimSize[] = {ffty, fftz}; - int rank = sizeof(dimSize) / sizeof(dimSize[0]); - cufftPlanMany(&FFT_plan_c2c, rank, dimSize, NULL, 0, 0, NULL, 0, 0, CUFFT_C2C, batch); - - InitSizeLists(); - return true; - } - - bool Launch(const std::vector &inputs, const std::vector &workspace, - const std::vector &outputs, void *stream_ptr) override { - auto input_tensor = GetDeviceAddress>(inputs, 0); - auto output_tensor = GetDeviceAddress>(outputs, 0); - - cufftSetStream(FFT_plan_c2c, reinterpret_cast(stream_ptr)); - PMEBatchedFFT2D(input_tensor, output_tensor, FFT_plan_c2c, direction, - reinterpret_cast(stream_ptr)); - return true; - } - - protected: - void InitSizeLists() override { - input_size_list_.push_back(Nall * sizeof(Complex)); - output_size_list_.push_back(Nall * sizeof(Complex)); - } - - private: - int direction; - int fftx; - int ffty; - int fftz; - int Nall; - cufftHandle FFT_plan_c2c; -}; -} // namespace kernel -} // namespace mindspore -#endif diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/pme/pme_energy_kernel.cc b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/pme/pme_energy_kernel.cc deleted file mode 100644 index e26e5bcfef0..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/pme/pme_energy_kernel.cc +++ /dev/null @@ -1,36 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#include "plugin/device/gpu/kernel/sponge/pme/pme_energy_kernel.h" - -namespace mindspore { -namespace kernel { -MS_REG_GPU_KERNEL_TWO(PMEEnergy, - KernelAttr() - .AddInputAttr(kNumberTypeUInt32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeInt32) - .AddOutputAttr(kNumberTypeFloat32) - .AddOutputAttr(kNumberTypeFloat32) - .AddOutputAttr(kNumberTypeFloat32) - .AddOutputAttr(kNumberTypeFloat32), - PMEEnergyGpuKernelMod, float, int) -} // namespace kernel -} // namespace mindspore diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/pme/pme_energy_kernel.h b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/pme/pme_energy_kernel.h deleted file mode 100644 index 73633c65848..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/pme/pme_energy_kernel.h +++ /dev/null @@ -1,272 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONGE_PME_PME_ENERGY_KERNEL_H_ -#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONGE_PME_PME_ENERGY_KERNEL_H_ -#include -#include -#include -#include "plugin/device/gpu/kernel/gpu_kernel.h" -#include "plugin/device/gpu/kernel/gpu_kernel_factory.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" -#include "plugin/device/gpu/kernel/cuda_impl/sponge/pme/pme_energy_impl.cuh" -namespace mindspore { -namespace kernel { -template -class PMEEnergyGpuKernelMod : public DeprecatedNativeGpuKernelMod { - public: - PMEEnergyGpuKernelMod() : ele_uint_crd(1) {} - ~PMEEnergyGpuKernelMod() override = default; - - bool Init(const CNodePtr &kernel_node) override { - kernel_node_ = kernel_node; - atom_numbers = static_cast(GetAttr(kernel_node, "atom_numbers")); - excluded_numbers = static_cast(GetAttr(kernel_node, "excluded_numbers")); - beta = static_cast(GetAttr(kernel_node, "beta")); - fftx = static_cast(GetAttr(kernel_node, "fftx")); - ffty = static_cast(GetAttr(kernel_node, "ffty")); - fftz = static_cast(GetAttr(kernel_node, "fftz")); - - float box_length_0 = static_cast(GetAttr(kernel_node, "box_length_0")); - float box_length_1 = static_cast(GetAttr(kernel_node, "box_length_1")); - float box_length_2 = static_cast(GetAttr(kernel_node, "box_length_2")); - std::vector h_box_length{box_length_0, box_length_1, box_length_2}; - VECTOR *box_length = reinterpret_cast(h_box_length.data()); - cufftPlan3d(&PME_plan_r2c, fftx, ffty, fftz, CUFFT_R2C); - cufftPlan3d(&PME_plan_c2r, fftx, ffty, fftz, CUFFT_C2R); - _thread_PME.x = 8; - _thread_PME.y = 8; - PME_Nin = ffty * fftz; - PME_Nfft = fftx * ffty * (fftz / 2 + 1); - PME_Nall = fftx * ffty * fftz; - PME_kxyz_cpu.resize(64); - volume = box_length[0].x * box_length[0].y * box_length[0].z; - int kx, ky, kz, kxrp, kyrp, kzrp, index; - for (kx = 0; kx < 4; kx++) { - for (ky = 0; ky < 4; ky++) { - for (kz = 0; kz < 4; kz++) { - index = kx * 16 + ky * 4 + kz; - PME_kxyz_cpu[index].uint_x = kx; - PME_kxyz_cpu[index].uint_y = ky; - PME_kxyz_cpu[index].uint_z = kz; - } - } - } - - B1.resize(fftx); - B2.resize(ffty); - B3.resize(fftz); - PME_BC0.resize(PME_Nfft); - for (kx = 0; kx < fftx; kx++) { - B1[kx] = getb(kx, fftx, 4); - } - - for (ky = 0; ky < ffty; ky++) { - B2[ky] = getb(ky, ffty, 4); - } - - for (kz = 0; kz < fftz; kz++) { - B3[kz] = getb(kz, fftz, 4); - } - float mprefactor = PI * PI / -beta / beta; - - float msq; - for (kx = 0; kx < fftx; kx++) { - kxrp = kx; - if (kx > fftx / 2) kxrp = fftx - kx; - for (ky = 0; ky < ffty; ky++) { - kyrp = ky; - if (ky > ffty / 2) kyrp = ffty - ky; - for (kz = 0; kz <= fftz / 2; kz++) { - kzrp = kz; - - msq = kxrp * kxrp / box_length[0].x / box_length[0].x + kyrp * kyrp / box_length[0].y / box_length[0].y + - kzrp * kzrp / box_length[0].z / box_length[0].z; - index = kx * ffty * (fftz / 2 + 1) + ky * (fftz / 2 + 1) + kz; - if ((kx + ky + kz) == 0) { - PME_BC0[index] = 0; - } else { - PME_BC0[index] = 1.0 / PI / msq * exp(mprefactor * msq) / volume; - } - - PME_BC0[index] *= B1[kx] * B2[ky] * B3[kz]; - } - } - } - - InitSizeLists(); - return true; - } - - bool Launch(const std::vector &inputs, const std::vector &workspace, - const std::vector &outputs, void *stream_ptr) override { - auto uint_crd = GetDeviceAddress(inputs, 0); - auto charge = GetDeviceAddress(inputs, 1); - auto nl_numbers = GetDeviceAddress(inputs, 2); - auto nl_serial = GetDeviceAddress(inputs, 3); - auto scaler = GetDeviceAddress(inputs, 4); - auto excluded_list_start = GetDeviceAddress(inputs, 5); - auto excluded_list = GetDeviceAddress(inputs, 6); - auto excluded_atom_numbers = GetDeviceAddress(inputs, 7); - - auto pme_uxyz = GetDeviceAddress(workspace, 0); // workspace - auto pme_frxyz = GetDeviceAddress(workspace, 1); // workspace - auto pme_q = GetDeviceAddress(workspace, 2); // workspace - auto pme_fq = GetDeviceAddress(workspace, 3); // workspace - auto pme_atom_near = GetDeviceAddress(workspace, 4); // workspace - auto pme_bc = GetDeviceAddress(workspace, 5); // workspace - auto pme_kxyz = GetDeviceAddress(workspace, 6); // workspace - auto nl = GetDeviceAddress(workspace, 7); - - auto reciprocal_ene = GetDeviceAddress(outputs, 0); - auto self_ene = GetDeviceAddress(outputs, 1); - auto direct_ene = GetDeviceAddress(outputs, 2); - auto correction_ene = GetDeviceAddress(outputs, 3); - - cufftSetStream(PME_plan_r2c, reinterpret_cast(stream_ptr)); - cufftSetStream(PME_plan_c2r, reinterpret_cast(stream_ptr)); - cudaMemcpyAsync(pme_kxyz, PME_kxyz_cpu.data(), sizeof(UNSIGNED_INT_VECTOR) * 64, cudaMemcpyHostToDevice, - reinterpret_cast(stream_ptr)); - cudaMemcpyAsync(pme_bc, PME_BC0.data(), sizeof(float) * PME_Nfft, cudaMemcpyHostToDevice, - reinterpret_cast(stream_ptr)); - - PMEEnergy(fftx, ffty, fftz, atom_numbers, beta, pme_bc, pme_uxyz, pme_frxyz, pme_q, pme_fq, pme_atom_near, pme_kxyz, - uint_crd, charge, nl_numbers, nl_serial, nl, scaler, excluded_list_start, excluded_list, - excluded_atom_numbers, reciprocal_ene, self_ene, direct_ene, correction_ene, _thread_PME, PME_Nin, - PME_Nfft, PME_Nall, PME_plan_r2c, PME_plan_c2r, reinterpret_cast(stream_ptr)); - return true; - } - - protected: - void InitSizeLists() override { - input_size_list_.push_back(atom_numbers * sizeof(UNSIGNED_INT_VECTOR)); - input_size_list_.push_back(atom_numbers * sizeof(VECTOR)); - input_size_list_.push_back(atom_numbers * sizeof(T1)); - input_size_list_.push_back(max_nl_numbers * sizeof(T1)); - input_size_list_.push_back(sizeof(VECTOR)); - - input_size_list_.push_back(atom_numbers * sizeof(T1)); - input_size_list_.push_back(excluded_numbers * sizeof(T1)); - input_size_list_.push_back(atom_numbers * sizeof(T1)); - - workspace_size_list_.push_back(atom_numbers * sizeof(UNSIGNED_INT_VECTOR)); - workspace_size_list_.push_back(atom_numbers * sizeof(VECTOR)); - workspace_size_list_.push_back(PME_Nall * sizeof(T)); - workspace_size_list_.push_back(PME_Nfft * sizeof(cufftComplex)); - workspace_size_list_.push_back(atom_numbers * 64 * sizeof(int)); - workspace_size_list_.push_back(PME_Nfft * sizeof(float)); - workspace_size_list_.push_back(64 * sizeof(UNSIGNED_INT_VECTOR)); - workspace_size_list_.push_back(atom_numbers * max_nl_numbers * sizeof(T1)); - - output_size_list_.push_back(sizeof(T)); - output_size_list_.push_back(sizeof(T)); - output_size_list_.push_back(sizeof(T)); - output_size_list_.push_back(sizeof(T)); - } - - cufftComplex expc(cufftComplex z) { - cufftComplex res; - float t = expf(z.x); - mindspore::kernel::math::SinCosf(z.y, &res.y, &res.x); - res.x *= t; - res.y *= t; - return res; - } - - float M_(float u, int n) { - if (n == 2) { - if (u > 2 || u < 0) return 0; - return 1 - abs(u - 1); - } else { - return u / (n - 1) * M_(u, n - 1) + (n - u) / (n - 1) * M_(u - 1, n - 1); - } - } - - float getb(int k, int NFFT, int B_order) { - cufftComplex tempc, tempc2, res; - float tempf; - tempc2.x = 0; - tempc2.y = 0; - - tempc.x = 0; - if (NFFT == 0) { - MS_LOG(ERROR) << "Divide by zero."; - } else { - tempc.y = 2 * (B_order - 1) * PI * k / NFFT; - } - res = expc(tempc); - - for (int kk = 0; kk < (B_order - 1); kk++) { - tempc.x = 0; - if (NFFT == 0) { - MS_LOG(ERROR) << "Divide by zero."; - break; - } else { - tempc.y = 2 * PI * k / NFFT * kk; - } - tempc = expc(tempc); - tempf = M_(kk + 1, B_order); - tempc2.x += tempf * tempc.x; - tempc2.y += tempf * tempc.y; - } - res = cuCdivf(res, tempc2); - return res.x * res.x + res.y * res.y; - } - - private: - size_t ele_uint_crd = 1; - - std::vector B1; - std::vector B2; - std::vector B3; - std::vector PME_BC0; - - int atom_numbers; - int excluded_numbers; - int max_nl_numbers = 800; - int fftx; - int ffty; - int fftz; - float beta; - int PME_Nin; - int PME_Nall; - int PME_Nfft; - float volume; - float PI = 3.1415926; - cufftHandle PME_plan_r2c; - cufftHandle PME_plan_c2r; - - dim3 _thread_PME; - - struct VECTOR { - float x; - float y; - float z; - }; - - struct UNSIGNED_INT_VECTOR { - unsigned int uint_x; - unsigned int uint_y; - unsigned int uint_z; - }; - std::vector PME_kxyz_cpu; - struct NEIGHBOR_LIST { - int atom_numbers; - int *atom_serial; - }; -}; -} // namespace kernel -} // namespace mindspore -#endif diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/pme/pme_energy_update_kernel.cc b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/pme/pme_energy_update_kernel.cc deleted file mode 100644 index ed53552993a..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/pme/pme_energy_update_kernel.cc +++ /dev/null @@ -1,42 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/** - * Note: - * PMEEnergyUpdate. This is an experimental interface that is subject to change and/or deletion. - */ -#include "plugin/device/gpu/kernel/sponge/pme/pme_energy_update_kernel.h" - -namespace mindspore { -namespace kernel { -MS_REG_GPU_KERNEL_TWO(PMEEnergyUpdate, - KernelAttr() - .AddInputAttr(kNumberTypeUInt32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeFloat32) - .AddOutputAttr(kNumberTypeFloat32) - .AddOutputAttr(kNumberTypeFloat32) - .AddOutputAttr(kNumberTypeFloat32) - .AddOutputAttr(kNumberTypeFloat32), - PMEEnergyUpdateGpuKernelMod, float, int) -} // namespace kernel -} // namespace mindspore diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/pme/pme_energy_update_kernel.h b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/pme/pme_energy_update_kernel.h deleted file mode 100644 index 226d45c5c22..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/pme/pme_energy_update_kernel.h +++ /dev/null @@ -1,305 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/** - * Note: - * PMEEnergyUpdate. This is an experimental interface that is subject to change and/or deletion. - */ -#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONGE_PME_PME_ENERGY_UPDATE_KERNEL_H_ -#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONGE_PME_PME_ENERGY_UPDATE_KERNEL_H_ -#include -#include -#include -#include "plugin/device/gpu/kernel/gpu_kernel.h" -#include "plugin/device/gpu/kernel/gpu_kernel_factory.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" -#include "plugin/device/gpu/kernel/cuda_impl/sponge/pme/pme_energy_update_impl.cuh" - -namespace mindspore { -namespace kernel { -template -class PMEEnergyUpdateGpuKernelMod : public DeprecatedNativeGpuKernelMod { - public: - PMEEnergyUpdateGpuKernelMod() : ele_uint_crd(1) {} - ~PMEEnergyUpdateGpuKernelMod() override = default; - - bool Init(const CNodePtr &kernel_node) override { - kernel_node_ = kernel_node; - atom_numbers = static_cast(GetAttr(kernel_node, "atom_numbers")); - excluded_numbers = static_cast(GetAttr(kernel_node, "excluded_numbers")); - beta = static_cast(GetAttr(kernel_node, "beta")); - fftx = static_cast(GetAttr(kernel_node, "fftx")); - ffty = static_cast(GetAttr(kernel_node, "ffty")); - fftz = static_cast(GetAttr(kernel_node, "fftz")); - - float box_length_0 = static_cast(GetAttr(kernel_node, "box_length_0")); - float box_length_1 = static_cast(GetAttr(kernel_node, "box_length_1")); - float box_length_2 = static_cast(GetAttr(kernel_node, "box_length_2")); - max_neighbor_numbers = static_cast(GetAttr(kernel_node, "max_neighbor_numbers")); - need_update = static_cast(GetAttr(kernel_node, "need_update")); - - cufftPlan3d(&PME_plan_r2c, fftx, ffty, fftz, CUFFT_R2C); - cufftPlan3d(&PME_plan_c2r, fftx, ffty, fftz, CUFFT_C2R); - _thread_PME.x = 8; - _thread_PME.y = 8; - PME_Nin = ffty * fftz; - PME_Nfft = fftx * ffty * (fftz / 2 + 1); - PME_Nall = fftx * ffty * fftz; - PME_kxyz_cpu.resize(64); - - std::vector h_box_length{box_length_0, box_length_1, box_length_2}; - VECTOR *box_length = reinterpret_cast(h_box_length.data()); - - volume = box_length[0].x * box_length[0].y * box_length[0].z; - int kx, ky, kz, kxrp, kyrp, kzrp, index; - for (kx = 0; kx < 4; kx++) { - for (ky = 0; ky < 4; ky++) { - for (kz = 0; kz < 4; kz++) { - index = kx * 16 + ky * 4 + kz; - PME_kxyz_cpu[index].uint_x = kx; - PME_kxyz_cpu[index].uint_y = ky; - PME_kxyz_cpu[index].uint_z = kz; - } - } - } - - B1.resize(fftx); - B2.resize(ffty); - B3.resize(fftz); - PME_BC0.resize(PME_Nfft); - for (kx = 0; kx < fftx; kx++) { - B1[kx] = getb(kx, fftx, 4); - } - - for (ky = 0; ky < ffty; ky++) { - B2[ky] = getb(ky, ffty, 4); - } - - for (kz = 0; kz < fftz; kz++) { - B3[kz] = getb(kz, fftz, 4); - } - float mprefactor = PI * PI / -beta / beta; - - float msq; - for (kx = 0; kx < fftx; kx++) { - kxrp = kx; - if (kx > fftx / 2) kxrp = fftx - kx; - for (ky = 0; ky < ffty; ky++) { - kyrp = ky; - if (ky > ffty / 2) kyrp = ffty - ky; - for (kz = 0; kz <= fftz / 2; kz++) { - kzrp = kz; - - msq = kxrp * kxrp / box_length[0].x / box_length[0].x + kyrp * kyrp / box_length[0].y / box_length[0].y + - kzrp * kzrp / box_length[0].z / box_length[0].z; - index = kx * ffty * (fftz / 2 + 1) + ky * (fftz / 2 + 1) + kz; - if ((kx + ky + kz) == 0) { - PME_BC0[index] = 0; - } else { - PME_BC0[index] = 1.0 / PI / msq * exp(mprefactor * msq) / volume; - } - - PME_BC0[index] *= B1[kx] * B2[ky] * B3[kz]; - } - } - } - - InitSizeLists(); - return true; - } - - bool Launch(const std::vector &inputs, const std::vector &workspace, - const std::vector &outputs, void *stream_ptr) override { - auto uint_crd = GetDeviceAddress(inputs, 0); - auto charge = GetDeviceAddress(inputs, 1); - auto nl_numbers = GetDeviceAddress(inputs, 2); - auto nl_serial = GetDeviceAddress(inputs, 3); - auto scaler = GetDeviceAddress(inputs, 4); - auto excluded_list_start = GetDeviceAddress(inputs, 5); - auto excluded_list = GetDeviceAddress(inputs, 6); - auto excluded_atom_numbers = GetDeviceAddress(inputs, 7); - auto neutralizing_factor = GetDeviceAddress(inputs, 8); - auto d_beta = GetDeviceAddress(inputs, 9); - - auto pme_uxyz = GetDeviceAddress(workspace, 0); // workspace - auto pme_frxyz = GetDeviceAddress(workspace, 1); // workspace - auto pme_q = GetDeviceAddress(workspace, 2); // workspace - auto pme_fq = GetDeviceAddress(workspace, 3); // workspace - auto pme_atom_near = GetDeviceAddress(workspace, 4); // workspace - auto pme_bc = GetDeviceAddress(workspace, 5); // workspace - auto pme_kxyz = GetDeviceAddress(workspace, 6); // workspace - auto nl = GetDeviceAddress(workspace, 7); - auto charge_sum = GetDeviceAddress(workspace, 8); - - auto reciprocal_ene = GetDeviceAddress(outputs, 0); - auto self_ene = GetDeviceAddress(outputs, 1); - auto direct_ene = GetDeviceAddress(outputs, 2); - auto correction_ene = GetDeviceAddress(outputs, 3); - - h_beta = beta; - if (need_update) { - cudaMemcpyAsync(&h_beta, d_beta, sizeof(float), cudaMemcpyDeviceToHost, - reinterpret_cast(stream_ptr)); - cudaStreamSynchronize(reinterpret_cast(stream_ptr)); - factor = h_beta / beta; - double factor_inverse = 1.0 / factor; - - for (int i = 0; i < PME_Nfft; i++) { - PME_BC0[i] *= factor_inverse; // update PME_BC0 - } - } - - cufftSetStream(PME_plan_r2c, reinterpret_cast(stream_ptr)); - cufftSetStream(PME_plan_c2r, reinterpret_cast(stream_ptr)); - cudaMemcpyAsync(pme_kxyz, PME_kxyz_cpu.data(), sizeof(UNSIGNED_INT_VECTOR) * 64, cudaMemcpyHostToDevice, - reinterpret_cast(stream_ptr)); - cudaMemcpyAsync(pme_bc, PME_BC0.data(), sizeof(float) * PME_Nfft, cudaMemcpyHostToDevice, - reinterpret_cast(stream_ptr)); - - PMEEnergyUpdate(fftx, ffty, fftz, atom_numbers, h_beta, pme_bc, pme_uxyz, pme_frxyz, pme_q, pme_fq, pme_atom_near, - pme_kxyz, uint_crd, charge, nl_numbers, nl_serial, nl, scaler, excluded_list_start, excluded_list, - excluded_atom_numbers, reciprocal_ene, self_ene, direct_ene, correction_ene, _thread_PME, PME_Nin, - PME_Nfft, PME_Nall, PME_plan_r2c, PME_plan_c2r, neutralizing_factor, charge_sum, - max_neighbor_numbers, reinterpret_cast(stream_ptr)); - return true; - } - - protected: - void InitSizeLists() override { - input_size_list_.push_back(atom_numbers * sizeof(UNSIGNED_INT_VECTOR)); - input_size_list_.push_back(atom_numbers * sizeof(VECTOR)); - input_size_list_.push_back(atom_numbers * sizeof(T1)); - input_size_list_.push_back(max_neighbor_numbers * sizeof(T1)); - input_size_list_.push_back(sizeof(VECTOR)); - - input_size_list_.push_back(atom_numbers * sizeof(T1)); - input_size_list_.push_back(excluded_numbers * sizeof(T1)); - input_size_list_.push_back(atom_numbers * sizeof(T1)); - input_size_list_.push_back(sizeof(T)); - input_size_list_.push_back(sizeof(T)); - - workspace_size_list_.push_back(atom_numbers * sizeof(UNSIGNED_INT_VECTOR)); - workspace_size_list_.push_back(atom_numbers * sizeof(VECTOR)); - workspace_size_list_.push_back(PME_Nall * sizeof(T)); - workspace_size_list_.push_back(PME_Nfft * sizeof(cufftComplex)); - workspace_size_list_.push_back(atom_numbers * 64 * sizeof(int)); - workspace_size_list_.push_back(PME_Nfft * sizeof(float)); - workspace_size_list_.push_back(64 * sizeof(UNSIGNED_INT_VECTOR)); - workspace_size_list_.push_back(atom_numbers * max_neighbor_numbers * sizeof(T1)); - workspace_size_list_.push_back(sizeof(T)); - - output_size_list_.push_back(sizeof(T)); - output_size_list_.push_back(sizeof(T)); - output_size_list_.push_back(sizeof(T)); - output_size_list_.push_back(sizeof(T)); - } - - cufftComplex expc(cufftComplex z) { - cufftComplex res; - float t = expf(z.x); - mindspore::kernel::math::SinCosf(z.y, &res.y, &res.x); - res.x *= t; - res.y *= t; - return res; - } - - float M_(float u, int n) { - if (n == 2) { - if (u > 2 || u < 0) return 0; - return 1 - abs(u - 1); - } else { - return u / (n - 1) * M_(u, n - 1) + (n - u) / (n - 1) * M_(u - 1, n - 1); - } - } - - float getb(int k, int NFFT, int B_order) { - cufftComplex tempc, tempc2, res; - float tempf; - tempc2.x = 0; - tempc2.y = 0; - - tempc.x = 0; - if (NFFT == 0) { - MS_LOG(ERROR) << "Divide by zero."; - } else { - tempc.y = 2 * (B_order - 1) * PI * k / NFFT; - } - res = expc(tempc); - - for (int kk = 0; kk < (B_order - 1); kk++) { - tempc.x = 0; - if (NFFT == 0) { - MS_LOG(ERROR) << "Divide by zero."; - break; - } else { - tempc.y = 2 * PI * k / NFFT * kk; - } - tempc = expc(tempc); - tempf = M_(kk + 1, B_order); - tempc2.x += tempf * tempc.x; - tempc2.y += tempf * tempc.y; - } - res = cuCdivf(res, tempc2); - return res.x * res.x + res.y * res.y; - } - - private: - size_t ele_uint_crd = 1; - - std::vector B1; - std::vector B2; - std::vector B3; - std::vector PME_BC0; - - int atom_numbers; - int excluded_numbers; - int max_neighbor_numbers; - int fftx; - int ffty; - int fftz; - float beta; - float factor; - float h_beta; - int PME_Nin; - int PME_Nall; - int PME_Nfft; - float volume; - float PI = 3.1415926; - int need_update; - cufftHandle PME_plan_r2c; - cufftHandle PME_plan_c2r; - - dim3 _thread_PME; - - struct VECTOR { - float x; - float y; - float z; - }; - - struct UNSIGNED_INT_VECTOR { - unsigned int uint_x; - unsigned int uint_y; - unsigned int uint_z; - }; - std::vector PME_kxyz_cpu; - struct NEIGHBOR_LIST { - int atom_numbers; - int *atom_serial; - }; -}; -} // namespace kernel -} // namespace mindspore -#endif diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/pme/pme_excluded_force_kernel.cc b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/pme/pme_excluded_force_kernel.cc deleted file mode 100644 index 21864bbe06d..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/pme/pme_excluded_force_kernel.cc +++ /dev/null @@ -1,31 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#include "plugin/device/gpu/kernel/sponge/pme/pme_excluded_force_kernel.h" - -namespace mindspore { -namespace kernel { -MS_REG_GPU_KERNEL_TWO(PMEExcludedForce, - KernelAttr() - .AddInputAttr(kNumberTypeUInt32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeInt32) - .AddOutputAttr(kNumberTypeFloat32), - PMEExcludedForceGpuKernelMod, float, int) -} // namespace kernel -} // namespace mindspore diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/pme/pme_excluded_force_kernel.h b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/pme/pme_excluded_force_kernel.h deleted file mode 100644 index a98d67d3478..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/pme/pme_excluded_force_kernel.h +++ /dev/null @@ -1,93 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/** - * Note: - * PMEExcludedForce. This is an experimental interface that is subject to change and/or deletion. - */ -#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONGE_PME_PME_EXCLUDED_FORCE_KERNEL_H_ -#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONGE_PME_PME_EXCLUDED_FORCE_KERNEL_H_ -#include -#include -#include -#include "plugin/device/gpu/kernel/gpu_kernel.h" -#include "plugin/device/gpu/kernel/gpu_kernel_factory.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" -#include "plugin/device/gpu/kernel/cuda_impl/sponge/pme/pme_excluded_force_impl.cuh" - -namespace mindspore { -namespace kernel { -template -class PMEExcludedForceGpuKernelMod : public DeprecatedNativeGpuKernelMod { - public: - PMEExcludedForceGpuKernelMod() : ele_uint_crd(1) {} - ~PMEExcludedForceGpuKernelMod() override = default; - - bool Init(const CNodePtr &kernel_node) override { - kernel_node_ = kernel_node; - atom_numbers = static_cast(GetAttr(kernel_node, "atom_numbers")); - excluded_numbers = static_cast(GetAttr(kernel_node, "excluded_numbers")); - beta = static_cast(GetAttr(kernel_node, "beta")); - InitSizeLists(); - return true; - } - - bool Launch(const std::vector &inputs, const std::vector &workspace, - const std::vector &outputs, void *stream_ptr) override { - auto uint_crd = GetDeviceAddress(inputs, 0); - auto sacler = GetDeviceAddress(inputs, 1); - auto charge = GetDeviceAddress(inputs, 2); - auto excluded_list_start = GetDeviceAddress(inputs, 3); - auto excluded_list = GetDeviceAddress(inputs, 4); - auto excluded_atom_numbers = GetDeviceAddress(inputs, 5); - - auto force = GetDeviceAddress(outputs, 0); - PMEExcludedForce(atom_numbers, beta, uint_crd, sacler, charge, excluded_list_start, excluded_list, - excluded_atom_numbers, force, reinterpret_cast(stream_ptr)); - return true; - } - - protected: - void InitSizeLists() override { - input_size_list_.push_back(atom_numbers * sizeof(UNSIGNED_INT_VECTOR)); - input_size_list_.push_back(sizeof(VECTOR)); - input_size_list_.push_back(atom_numbers * sizeof(T)); - input_size_list_.push_back(atom_numbers * sizeof(T1)); - input_size_list_.push_back(excluded_numbers * sizeof(T1)); - input_size_list_.push_back(atom_numbers * sizeof(T1)); - - output_size_list_.push_back(atom_numbers * 3 * sizeof(T)); - } - - private: - size_t ele_uint_crd = 1; - int atom_numbers; - int excluded_numbers; - float beta; - struct VECTOR { - float x; - float y; - float z; - }; - - struct UNSIGNED_INT_VECTOR { - unsigned int uint_x; - unsigned int uint_y; - unsigned int uint_z; - }; -}; -} // namespace kernel -} // namespace mindspore -#endif diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/pme/pme_excluded_force_update_kernel.cc b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/pme/pme_excluded_force_update_kernel.cc deleted file mode 100644 index de0ec83a10c..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/pme/pme_excluded_force_update_kernel.cc +++ /dev/null @@ -1,36 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/** - * Note: - * PMEExcludedForceUpdate. This is an experimental interface that is subject to change and/or deletion. - */ -#include "plugin/device/gpu/kernel/sponge/pme/pme_excluded_force_update_kernel.h" - -namespace mindspore { -namespace kernel { -MS_REG_GPU_KERNEL_TWO(PMEExcludedForceUpdate, - KernelAttr() - .AddInputAttr(kNumberTypeUInt32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeFloat32) - .AddOutputAttr(kNumberTypeFloat32), - PMEExcludedForceUpdateGpuKernelMod, float, int) -} // namespace kernel -} // namespace mindspore diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/pme/pme_excluded_force_update_kernel.h b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/pme/pme_excluded_force_update_kernel.h deleted file mode 100644 index 2792be490e9..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/pme/pme_excluded_force_update_kernel.h +++ /dev/null @@ -1,106 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/** - * Note: - * PMEExcludedForceUpdate. This is an experimental interface that is subject to change and/or deletion. - */ -#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONGE_PME_PME_EXCLUDED_FORCE_KERNEL_H_ -#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONGE_PME_PME_EXCLUDED_FORCE_KERNEL_H_ - -#include -#include -#include -#include -#include -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" -#include "plugin/device/gpu/kernel/gpu_kernel.h" -#include "plugin/device/gpu/kernel/gpu_kernel_factory.h" -#include "plugin/device/gpu/kernel/cuda_impl/sponge/pme/pme_excluded_force_impl.cuh" - -namespace mindspore { -namespace kernel { -template -class PMEExcludedForceUpdateGpuKernelMod : public DeprecatedNativeGpuKernelMod { - public: - PMEExcludedForceUpdateGpuKernelMod() : ele_uint_crd(1) {} - ~PMEExcludedForceUpdateGpuKernelMod() override = default; - - bool Init(const CNodePtr &kernel_node) override { - kernel_node_ = kernel_node; - atom_numbers = static_cast(GetAttr(kernel_node, "atom_numbers")); - excluded_numbers = static_cast(GetAttr(kernel_node, "excluded_numbers")); - beta = static_cast(GetAttr(kernel_node, "beta")); - need_update = static_cast(GetAttr(kernel_node, "need_update")); - InitSizeLists(); - return true; - } - - bool Launch(const std::vector &inputs, const std::vector &workspace, - const std::vector &outputs, void *stream_ptr) override { - auto uint_crd = GetDeviceAddress(inputs, 0); - auto sacler = GetDeviceAddress(inputs, 1); - auto charge = GetDeviceAddress(inputs, 2); - auto excluded_list_start = GetDeviceAddress(inputs, 3); - auto excluded_list = GetDeviceAddress(inputs, 4); - auto excluded_atom_numbers = GetDeviceAddress(inputs, 5); - auto d_beta = GetDeviceAddress(inputs, 6); - if (need_update) { - cudaMemcpyAsync(&beta, d_beta, sizeof(float), cudaMemcpyDeviceToHost, reinterpret_cast(stream_ptr)); - cudaStreamSynchronize(reinterpret_cast(stream_ptr)); - } - - auto force = GetDeviceAddress(outputs, 0); - PMEExcludedForce(atom_numbers, beta, uint_crd, sacler, charge, excluded_list_start, excluded_list, - excluded_atom_numbers, force, reinterpret_cast(stream_ptr)); - return true; - } - - protected: - void InitSizeLists() override { - input_size_list_.push_back(atom_numbers * sizeof(UNSIGNED_INT_VECTOR)); - input_size_list_.push_back(sizeof(VECTOR)); - input_size_list_.push_back(atom_numbers * sizeof(T)); - input_size_list_.push_back(atom_numbers * sizeof(T1)); - input_size_list_.push_back(excluded_numbers * sizeof(T1)); - input_size_list_.push_back(atom_numbers * sizeof(T1)); - input_size_list_.push_back(sizeof(T)); - - output_size_list_.push_back(atom_numbers * 3 * sizeof(T)); - } - - private: - size_t ele_uint_crd = 1; - - int atom_numbers; - int excluded_numbers; - float beta; - float factor; - int need_update; - struct VECTOR { - float x; - float y; - float z; - }; - - struct UNSIGNED_INT_VECTOR { - unsigned int uint_x; - unsigned int uint_y; - unsigned int uint_z; - }; -}; -} // namespace kernel -} // namespace mindspore -#endif diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/pme/pme_fft_1d_kernel.cc b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/pme/pme_fft_1d_kernel.cc deleted file mode 100644 index d3d81f1f706..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/pme/pme_fft_1d_kernel.cc +++ /dev/null @@ -1,23 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#include "plugin/device/gpu/kernel/sponge/pme/pme_fft_1d_kernel.h" - -namespace mindspore { -namespace kernel { -MS_REG_GPU_KERNEL_ONE(PMEFFT1D, KernelAttr().AddInputAttr(kNumberTypeComplex64).AddOutputAttr(kNumberTypeComplex64), - PMEFFT1DGpuKernelMod, float) -} // namespace kernel -} // namespace mindspore diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/pme/pme_fft_1d_kernel.h b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/pme/pme_fft_1d_kernel.h deleted file mode 100644 index 55a692252ec..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/pme/pme_fft_1d_kernel.h +++ /dev/null @@ -1,73 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONGE_PME_PME_FFT_1D_KERNEL_H_ -#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONGE_PME_PME_FFT_1D_KERNEL_H_ -#include -#include -#include -#include -#include -#include "plugin/device/gpu/kernel/gpu_kernel.h" -#include "plugin/device/gpu/kernel/gpu_kernel_factory.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" -#include "plugin/device/gpu/kernel/cuda_impl/sponge/pme/pme_fft_1d_impl.cuh" -namespace mindspore { -namespace kernel { -template -using Complex = mindspore::utils::Complex; -template -class PMEFFT1DGpuKernelMod : public DeprecatedNativeGpuKernelMod { - public: - PMEFFT1DGpuKernelMod() = default; - ~PMEFFT1DGpuKernelMod() override = default; - - bool Init(const CNodePtr &kernel_node) override { - fftx = static_cast(GetAttr(kernel_node, "fftx")); - Nall = fftx; - Nfft = fftx; - - cufftPlan1d(&FFT_plan_c2c, fftx, CUFFT_C2C, batch); - - InitSizeLists(); - return true; - } - - bool Launch(const std::vector &inputs, const std::vector &workspace, - const std::vector &outputs, void *stream_ptr) override { - auto input_tensor = GetDeviceAddress>(inputs, 0); - auto output_tensor = GetDeviceAddress>(outputs, 0); - - cufftSetStream(FFT_plan_c2c, reinterpret_cast(stream_ptr)); - PMEFFT1D(Nfft, input_tensor, output_tensor, FFT_plan_c2c, reinterpret_cast(stream_ptr)); - return true; - } - - protected: - void InitSizeLists() override { - input_size_list_.push_back(Nall * sizeof(Complex)); - output_size_list_.push_back(Nfft * sizeof(Complex)); - } - - private: - int batch = 1; - int fftx; - int Nall; - int Nfft; - cufftHandle FFT_plan_c2c; -}; -} // namespace kernel -} // namespace mindspore -#endif diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/pme/pme_fft_2d_kernel.cc b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/pme/pme_fft_2d_kernel.cc deleted file mode 100644 index 588bc98d5ae..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/pme/pme_fft_2d_kernel.cc +++ /dev/null @@ -1,23 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#include "plugin/device/gpu/kernel/sponge/pme/pme_fft_2d_kernel.h" - -namespace mindspore { -namespace kernel { -MS_REG_GPU_KERNEL_ONE(PMEFFT2D, KernelAttr().AddInputAttr(kNumberTypeComplex64).AddOutputAttr(kNumberTypeComplex64), - PMEFFT2DGpuKernelMod, float) -} // namespace kernel -} // namespace mindspore diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/pme/pme_fft_2d_kernel.h b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/pme/pme_fft_2d_kernel.h deleted file mode 100644 index 8e486a0c53f..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/pme/pme_fft_2d_kernel.h +++ /dev/null @@ -1,74 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONGE_PME_PME_FFT_2D_KERNEL_H_ -#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONGE_PME_PME_FFT_2D_KERNEL_H_ -#include -#include -#include -#include -#include -#include "plugin/device/gpu/kernel/gpu_kernel.h" -#include "plugin/device/gpu/kernel/gpu_kernel_factory.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" -#include "plugin/device/gpu/kernel/cuda_impl/sponge/pme/pme_fft_2d_impl.cuh" -namespace mindspore { -namespace kernel { -template -using Complex = mindspore::utils::Complex; -template -class PMEFFT2DGpuKernelMod : public DeprecatedNativeGpuKernelMod { - public: - PMEFFT2DGpuKernelMod() = default; - ~PMEFFT2DGpuKernelMod() override = default; - - bool Init(const CNodePtr &kernel_node) override { - fftx = static_cast(GetAttr(kernel_node, "fftx")); - ffty = static_cast(GetAttr(kernel_node, "ffty")); - Nall = fftx * ffty; - Nfft = fftx * ffty; - - cufftPlan2d(&FFT_plan_c2c, fftx, ffty, CUFFT_C2C); - - InitSizeLists(); - return true; - } - - bool Launch(const std::vector &inputs, const std::vector &workspace, - const std::vector &outputs, void *stream_ptr) override { - auto input_tensor = GetDeviceAddress>(inputs, 0); - auto output_tensor = GetDeviceAddress>(outputs, 0); - - cufftSetStream(FFT_plan_c2c, reinterpret_cast(stream_ptr)); - PMEFFT2D(Nfft, input_tensor, output_tensor, FFT_plan_c2c, reinterpret_cast(stream_ptr)); - return true; - } - - protected: - void InitSizeLists() override { - input_size_list_.push_back(Nall * sizeof(Complex)); - output_size_list_.push_back(Nfft * sizeof(Complex)); - } - - private: - int fftx; - int ffty; - int Nall; - int Nfft; - cufftHandle FFT_plan_c2c; -}; -} // namespace kernel -} // namespace mindspore -#endif diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/pme/pme_ifft_1d_kernel.cc b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/pme/pme_ifft_1d_kernel.cc deleted file mode 100644 index 137ce5bcf6c..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/pme/pme_ifft_1d_kernel.cc +++ /dev/null @@ -1,23 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#include "plugin/device/gpu/kernel/sponge/pme/pme_ifft_1d_kernel.h" - -namespace mindspore { -namespace kernel { -MS_REG_GPU_KERNEL_ONE(PMEIFFT1D, KernelAttr().AddInputAttr(kNumberTypeComplex64).AddOutputAttr(kNumberTypeComplex64), - PMEIFFT1DGpuKernelMod, float) -} // namespace kernel -} // namespace mindspore diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/pme/pme_ifft_1d_kernel.h b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/pme/pme_ifft_1d_kernel.h deleted file mode 100644 index 5aedb6cecee..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/pme/pme_ifft_1d_kernel.h +++ /dev/null @@ -1,73 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONGE_PME_PME_IFFT_1D_KERNEL_H_ -#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONGE_PME_PME_IFFT_1D_KERNEL_H_ -#include -#include -#include -#include -#include -#include "plugin/device/gpu/kernel/gpu_kernel.h" -#include "plugin/device/gpu/kernel/gpu_kernel_factory.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" -#include "plugin/device/gpu/kernel/cuda_impl/sponge/pme/pme_ifft_1d_impl.cuh" -namespace mindspore { -namespace kernel { -template -using Complex = mindspore::utils::Complex; -template -class PMEIFFT1DGpuKernelMod : public DeprecatedNativeGpuKernelMod { - public: - PMEIFFT1DGpuKernelMod() = default; - ~PMEIFFT1DGpuKernelMod() override = default; - - bool Init(const CNodePtr &kernel_node) override { - fftx = static_cast(GetAttr(kernel_node, "fftx")); - Nfft = fftx; - Nall = fftx; - - cufftPlan1d(&FFT_plan_c2c, fftx, CUFFT_C2C, batch); - - InitSizeLists(); - return true; - } - - bool Launch(const std::vector &inputs, const std::vector &workspace, - const std::vector &outputs, void *stream_ptr) override { - auto input_tensor = GetDeviceAddress>(inputs, 0); - auto output_tensor = GetDeviceAddress>(outputs, 0); - - cufftSetStream(FFT_plan_c2c, reinterpret_cast(stream_ptr)); - PMEIFFT1D(Nfft, input_tensor, output_tensor, FFT_plan_c2c, reinterpret_cast(stream_ptr)); - return true; - } - - protected: - void InitSizeLists() override { - input_size_list_.push_back(Nfft * sizeof(Complex)); - output_size_list_.push_back(Nall * sizeof(Complex)); - } - - private: - int batch = 1; - int fftx; - int Nall; - int Nfft; - cufftHandle FFT_plan_c2c; -}; -} // namespace kernel -} // namespace mindspore -#endif diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/pme/pme_ifft_2d_kernel.cc b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/pme/pme_ifft_2d_kernel.cc deleted file mode 100644 index a7554a6fbb4..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/pme/pme_ifft_2d_kernel.cc +++ /dev/null @@ -1,23 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#include "plugin/device/gpu/kernel/sponge/pme/pme_ifft_2d_kernel.h" - -namespace mindspore { -namespace kernel { -MS_REG_GPU_KERNEL_ONE(PMEIFFT2D, KernelAttr().AddInputAttr(kNumberTypeComplex64).AddOutputAttr(kNumberTypeComplex64), - PMEIFFT2DGpuKernelMod, float) -} // namespace kernel -} // namespace mindspore diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/pme/pme_ifft_2d_kernel.h b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/pme/pme_ifft_2d_kernel.h deleted file mode 100644 index 30dd017f041..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/pme/pme_ifft_2d_kernel.h +++ /dev/null @@ -1,74 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONGE_PME_PME_IFFT_2D_KERNEL_H_ -#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONGE_PME_PME_IFFT_2D_KERNEL_H_ -#include -#include -#include -#include -#include -#include "plugin/device/gpu/kernel/gpu_kernel.h" -#include "plugin/device/gpu/kernel/gpu_kernel_factory.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" -#include "plugin/device/gpu/kernel/cuda_impl/sponge/pme/pme_ifft_2d_impl.cuh" -namespace mindspore { -namespace kernel { -template -using Complex = mindspore::utils::Complex; -template -class PMEIFFT2DGpuKernelMod : public DeprecatedNativeGpuKernelMod { - public: - PMEIFFT2DGpuKernelMod() = default; - ~PMEIFFT2DGpuKernelMod() override = default; - - bool Init(const CNodePtr &kernel_node) override { - fftx = static_cast(GetAttr(kernel_node, "fftx")); - ffty = static_cast(GetAttr(kernel_node, "ffty")); - Nfft = fftx * ffty; - Nall = fftx * ffty; - - cufftPlan2d(&FFT_plan_c2c, fftx, ffty, CUFFT_C2C); - - InitSizeLists(); - return true; - } - - bool Launch(const std::vector &inputs, const std::vector &workspace, - const std::vector &outputs, void *stream_ptr) override { - auto input_tensor = GetDeviceAddress>(inputs, 0); - auto output_tensor = GetDeviceAddress>(outputs, 0); - - cufftSetStream(FFT_plan_c2c, reinterpret_cast(stream_ptr)); - PMEIFFT2D(Nfft, input_tensor, output_tensor, FFT_plan_c2c, reinterpret_cast(stream_ptr)); - return true; - } - - protected: - void InitSizeLists() override { - input_size_list_.push_back(Nfft * sizeof(Complex)); - output_size_list_.push_back(Nall * sizeof(Complex)); - } - - private: - int fftx; - int ffty; - int Nall; - int Nfft; - cufftHandle FFT_plan_c2c; -}; -} // namespace kernel -} // namespace mindspore -#endif diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/pme/pme_irfft_2d_kernel.cc b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/pme/pme_irfft_2d_kernel.cc deleted file mode 100644 index a827e44058f..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/pme/pme_irfft_2d_kernel.cc +++ /dev/null @@ -1,23 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#include "plugin/device/gpu/kernel/sponge/pme/pme_irfft_2d_kernel.h" - -namespace mindspore { -namespace kernel { -MS_REG_GPU_KERNEL_ONE(PMEIRFFT2D, KernelAttr().AddInputAttr(kNumberTypeComplex64).AddOutputAttr(kNumberTypeFloat32), - PMEIRFFT2DGpuKernelMod, float) -} // namespace kernel -} // namespace mindspore diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/pme/pme_irfft_2d_kernel.h b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/pme/pme_irfft_2d_kernel.h deleted file mode 100644 index 9c0bad02f4a..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/pme/pme_irfft_2d_kernel.h +++ /dev/null @@ -1,75 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONGE_PME_PME_IRFFT_2D_KERNEL_H_ -#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONGE_PME_PME_IRFFT_2D_KERNEL_H_ -#include -#include -#include -#include -#include -#include "plugin/device/gpu/kernel/gpu_kernel.h" -#include "plugin/device/gpu/kernel/gpu_kernel_factory.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" -#include "plugin/device/gpu/kernel/cuda_impl/sponge/pme/pme_irfft_2d_impl.cuh" -namespace mindspore { -namespace kernel { -template -using Complex = mindspore::utils::Complex; -template -class PMEIRFFT2DGpuKernelMod : public DeprecatedNativeGpuKernelMod { - public: - PMEIRFFT2DGpuKernelMod() = default; - ~PMEIRFFT2DGpuKernelMod() override = default; - - bool Init(const CNodePtr &kernel_node) override { - constexpr int NUM_2 = 2; - fftx = static_cast(GetAttr(kernel_node, "fftx")); - ffty = static_cast(GetAttr(kernel_node, "ffty")); - Nfft = fftx * ffty; - Nall = fftx * (ffty - 1) * NUM_2; - - cufftPlan2d(&FFT_plan_c2r, fftx, (ffty - 1) * NUM_2, CUFFT_C2R); - - InitSizeLists(); - return true; - } - - bool Launch(const std::vector &inputs, const std::vector &workspace, - const std::vector &outputs, void *stream_ptr) override { - auto input_tensor = GetDeviceAddress>(inputs, 0); - auto output_tensor = GetDeviceAddress(outputs, 0); - - cufftSetStream(FFT_plan_c2r, reinterpret_cast(stream_ptr)); - PMEIRFFT2D(Nfft, input_tensor, output_tensor, FFT_plan_c2r, reinterpret_cast(stream_ptr)); - return true; - } - - protected: - void InitSizeLists() override { - input_size_list_.push_back(Nfft * sizeof(Complex)); - output_size_list_.push_back(Nall * sizeof(T)); - } - - private: - int fftx; - int ffty; - int Nall; - int Nfft; - cufftHandle FFT_plan_c2r; -}; -} // namespace kernel -} // namespace mindspore -#endif diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/pme/pme_reciprocal_force_kernel.cc b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/pme/pme_reciprocal_force_kernel.cc deleted file mode 100644 index d8ed969b054..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/pme/pme_reciprocal_force_kernel.cc +++ /dev/null @@ -1,25 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#include "plugin/device/gpu/kernel/sponge/pme/pme_reciprocal_force_kernel.h" - -namespace mindspore { -namespace kernel { -MS_REG_GPU_KERNEL_TWO( - PMEReciprocalForce, - KernelAttr().AddInputAttr(kNumberTypeUInt32).AddInputAttr(kNumberTypeFloat32).AddOutputAttr(kNumberTypeFloat32), - PMEReciprocalForceGpuKernelMod, float, int) -} // namespace kernel -} // namespace mindspore diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/pme/pme_reciprocal_force_kernel.h b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/pme/pme_reciprocal_force_kernel.h deleted file mode 100644 index a2fa1a76dea..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/pme/pme_reciprocal_force_kernel.h +++ /dev/null @@ -1,243 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/** - * Note: - * PMEReciprocalForce. This is an experimental interface that is subject to change and/or deletion. - */ -#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONGE_PME_PME_RECIPROCAL_FORCE_KERNEL_H_ -#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONGE_PME_PME_RECIPROCAL_FORCE_KERNEL_H_ -#include -#include -#include -#include -#include -#include "plugin/device/gpu/kernel/gpu_kernel.h" -#include "plugin/device/gpu/kernel/gpu_kernel_factory.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" -#include "plugin/device/gpu/kernel/cuda_impl/sponge/pme/pme_reciprocal_force_impl.cuh" -namespace mindspore { -namespace kernel { -template -class PMEReciprocalForceGpuKernelMod : public DeprecatedNativeGpuKernelMod { - public: - PMEReciprocalForceGpuKernelMod() : ele_uint_crd(1) {} - ~PMEReciprocalForceGpuKernelMod() override = default; - - bool Init(const CNodePtr &kernel_node) override { - kernel_node_ = kernel_node; - atom_numbers = static_cast(GetAttr(kernel_node, "atom_numbers")); - beta = static_cast(GetAttr(kernel_node, "beta")); - fftx = static_cast(GetAttr(kernel_node, "fftx")); - ffty = static_cast(GetAttr(kernel_node, "ffty")); - fftz = static_cast(GetAttr(kernel_node, "fftz")); - PME_Nall = fftx * ffty * fftz; - PME_Nfft = fftx * ffty * (fftz / 2 + 1); - PME_Nin = ffty * fftz; - - float box_length_0 = static_cast(GetAttr(kernel_node, "box_length_0")); - float box_length_1 = static_cast(GetAttr(kernel_node, "box_length_1")); - float box_length_2 = static_cast(GetAttr(kernel_node, "box_length_2")); - std::vector h_box_length{box_length_0, box_length_1, box_length_2}; - VECTOR *box_length = reinterpret_cast(h_box_length.data()); - PME_inverse_box_vector.x = static_cast(fftx) / box_length[0].x; - PME_inverse_box_vector.y = static_cast(ffty) / box_length[0].y; - PME_inverse_box_vector.z = static_cast(fftz) / box_length[0].z; - cufftPlan3d(&PME_plan_r2c, fftx, ffty, fftz, CUFFT_R2C); - cufftPlan3d(&PME_plan_c2r, fftx, ffty, fftz, CUFFT_C2R); - - float volume = box_length[0].x * box_length[0].y * box_length[0].z; - PME_kxyz_cpu.resize(64); - int kx, ky, kz, kxrp, kyrp, kzrp, index; - for (kx = 0; kx < 4; kx++) { - for (ky = 0; ky < 4; ky++) { - for (kz = 0; kz < 4; kz++) { - index = kx * 16 + ky * 4 + kz; - PME_kxyz_cpu[index].uint_x = kx; - PME_kxyz_cpu[index].uint_y = ky; - PME_kxyz_cpu[index].uint_z = kz; - } - } - } - B1.resize(fftx); - B2.resize(ffty); - B3.resize(fftz); - PME_BC0.resize(PME_Nfft); - - for (kx = 0; kx < fftx; kx++) { - B1[kx] = getb(kx, fftx, 4); - } - - for (ky = 0; ky < ffty; ky++) { - B2[ky] = getb(ky, ffty, 4); - } - - for (kz = 0; kz < fftz; kz++) { - B3[kz] = getb(kz, fftz, 4); - } - float mprefactor = PI * PI / -beta / beta; - float msq; - for (kx = 0; kx < fftx; kx++) { - kxrp = kx; - if (kx > fftx / 2) kxrp = fftx - kx; - for (ky = 0; ky < ffty; ky++) { - kyrp = ky; - if (ky > ffty / 2) kyrp = ffty - ky; - for (kz = 0; kz <= fftz / 2; kz++) { - kzrp = kz; - - msq = kxrp * kxrp / box_length[0].x / box_length[0].x + kyrp * kyrp / box_length[0].y / box_length[0].y + - kzrp * kzrp / box_length[0].z / box_length[0].z; - index = kx * ffty * (fftz / 2 + 1) + ky * (fftz / 2 + 1) + kz; - if ((kx + ky + kz) == 0) { - PME_BC0[index] = 0; - } else { - PME_BC0[index] = 1.0 / PI / msq * exp(mprefactor * msq) / volume; - } - - PME_BC0[index] *= B1[kx] * B2[ky] * B3[kz]; - } - } - } - - InitSizeLists(); - return true; - } - - bool Launch(const std::vector &inputs, const std::vector &workspace, - const std::vector &outputs, void *stream_ptr) override { - auto uint_crd = GetDeviceAddress(inputs, 0); - auto charge = GetDeviceAddress(inputs, 1); - - auto pme_uxyz = GetDeviceAddress(workspace, 0); // workspace - auto pme_frxyz = GetDeviceAddress(workspace, 1); // workspace - auto pme_q = GetDeviceAddress(workspace, 2); // workspace - auto pme_fq = GetDeviceAddress(workspace, 3); // workspace - auto pme_atom_near = GetDeviceAddress(workspace, 4); // workspace - auto pme_bc = GetDeviceAddress(workspace, 5); // workspace - auto pme_kxyz = GetDeviceAddress(workspace, 6); // workspace - - auto force = GetDeviceAddress(outputs, 0); - cufftSetStream(PME_plan_r2c, reinterpret_cast(stream_ptr)); - cufftSetStream(PME_plan_c2r, reinterpret_cast(stream_ptr)); - cudaMemcpyAsync(pme_kxyz, PME_kxyz_cpu.data(), sizeof(UNSIGNED_INT_VECTOR) * 64, cudaMemcpyHostToDevice, - reinterpret_cast(stream_ptr)); - cudaMemcpyAsync(pme_bc, PME_BC0.data(), sizeof(float) * PME_Nfft, cudaMemcpyHostToDevice, - reinterpret_cast(stream_ptr)); - PMEReciprocalForce(fftx, ffty, fftz, atom_numbers, beta, pme_bc, pme_uxyz, pme_frxyz, pme_q, pme_fq, pme_atom_near, - pme_kxyz, uint_crd, charge, force, PME_Nin, PME_Nall, PME_Nfft, PME_plan_r2c, PME_plan_c2r, - PME_inverse_box_vector, reinterpret_cast(stream_ptr)); - return true; - } - - protected: - void InitSizeLists() override { - input_size_list_.push_back(atom_numbers * sizeof(UNSIGNED_INT_VECTOR)); - input_size_list_.push_back(atom_numbers * sizeof(float)); - - workspace_size_list_.push_back(atom_numbers * sizeof(UNSIGNED_INT_VECTOR)); - workspace_size_list_.push_back(atom_numbers * sizeof(VECTOR)); - workspace_size_list_.push_back(PME_Nall * sizeof(T)); - workspace_size_list_.push_back(PME_Nfft * sizeof(cufftComplex)); - workspace_size_list_.push_back(atom_numbers * 64 * sizeof(int)); - workspace_size_list_.push_back(PME_Nfft * sizeof(float)); - workspace_size_list_.push_back(64 * sizeof(UNSIGNED_INT_VECTOR)); - - output_size_list_.push_back(atom_numbers * sizeof(VECTOR)); - } - - cufftComplex expc(cufftComplex z) { - cufftComplex res; - float t = expf(z.x); - mindspore::kernel::math::SinCosf(z.y, &res.y, &res.x); - res.x *= t; - res.y *= t; - return res; - } - - float M_(float u, int n) { - if (n == 2) { - if (u > 2 || u < 0) return 0; - return 1 - abs(u - 1); - } else { - return u / (n - 1) * M_(u, n - 1) + (n - u) / (n - 1) * M_(u - 1, n - 1); - } - } - - float getb(int k, int NFFT, int B_order) { - cufftComplex tempc, tempc2, res; - float tempf; - tempc2.x = 0; - tempc2.y = 0; - - tempc.x = 0; - if (NFFT == 0) { - MS_LOG(ERROR) << "Divide by zero."; - } else { - tempc.y = 2 * (B_order - 1) * PI * k / NFFT; - } - res = expc(tempc); - - for (int kk = 0; kk < (B_order - 1); kk++) { - tempc.x = 0; - if (NFFT == 0) { - MS_LOG(ERROR) << "Divide by zero."; - break; - } else { - tempc.y = 2 * PI * k / NFFT * kk; - } - tempc = expc(tempc); - tempf = M_(kk + 1, B_order); - tempc2.x += tempf * tempc.x; - tempc2.y += tempf * tempc.y; - } - res = cuCdivf(res, tempc2); - return res.x * res.x + res.y * res.y; - } - - private: - size_t ele_uint_crd = 1; - int atom_numbers; - int fftx; - int ffty; - int fftz; - float beta; - int PME_Nall; - int PME_Nfft; - int PME_Nin; - float PI = 3.1415926; - std::vector B1; - std::vector B2; - std::vector B3; - std::vector PME_BC0; - - cufftHandle PME_plan_r2c; - cufftHandle PME_plan_c2r; - struct VECTOR { - float x; - float y; - float z; - }; - _VECTOR PME_inverse_box_vector; - struct UNSIGNED_INT_VECTOR { - unsigned int uint_x; - unsigned int uint_y; - unsigned int uint_z; - }; - std::vector PME_kxyz_cpu; -}; -} // namespace kernel -} // namespace mindspore -#endif diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/pme/pme_reciprocal_force_update_kernel.cc b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/pme/pme_reciprocal_force_update_kernel.cc deleted file mode 100644 index d25884edb97..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/pme/pme_reciprocal_force_update_kernel.cc +++ /dev/null @@ -1,32 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/** - * Note: - * PMEReciprocalForceUpdate. This is an experimental interface that is subject to change and/or deletion. - */ -#include "plugin/device/gpu/kernel/sponge/pme/pme_reciprocal_force_update_kernel.h" - -namespace mindspore { -namespace kernel { -MS_REG_GPU_KERNEL_TWO(PMEReciprocalForceUpdate, - KernelAttr() - .AddInputAttr(kNumberTypeUInt32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeFloat32) - .AddOutputAttr(kNumberTypeFloat32), - PMEReciprocalForceUpdateGpuKernelMod, float, int) -} // namespace kernel -} // namespace mindspore diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/pme/pme_reciprocal_force_update_kernel.h b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/pme/pme_reciprocal_force_update_kernel.h deleted file mode 100644 index 80e73c41cb5..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/pme/pme_reciprocal_force_update_kernel.h +++ /dev/null @@ -1,273 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/** - * Note: - * PMEReciprocalForceUpdate. This is an experimental interface that is subject to change and/or deletion. - */ -#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONGE_PME_PME_RECIPROCAL_FORCE_UPDATE_KERNEL_H_ -#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONGE_PME_PME_RECIPROCAL_FORCE_UPDATE_KERNEL_H_ -#include -#include -#include -#include "plugin/device/gpu/kernel/gpu_kernel.h" -#include "plugin/device/gpu/kernel/gpu_kernel_factory.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" -#include "plugin/device/gpu/kernel/cuda_impl/sponge/pme/pme_reciprocal_force_impl.cuh" - -namespace mindspore { -namespace kernel { -template -class PMEReciprocalForceUpdateGpuKernelMod : public DeprecatedNativeGpuKernelMod { - public: - PMEReciprocalForceUpdateGpuKernelMod() : ele_uint_crd(1) {} - ~PMEReciprocalForceUpdateGpuKernelMod() override = default; - - bool Init(const CNodePtr &kernel_node) override { - kernel_node_ = kernel_node; - atom_numbers = static_cast(GetAttr(kernel_node, "atom_numbers")); - beta = static_cast(GetAttr(kernel_node, "beta")); - fftx = static_cast(GetAttr(kernel_node, "fftx")); - ffty = static_cast(GetAttr(kernel_node, "ffty")); - fftz = static_cast(GetAttr(kernel_node, "fftz")); - - float box_length_0 = static_cast(GetAttr(kernel_node, "box_length_0")); - float box_length_1 = static_cast(GetAttr(kernel_node, "box_length_1")); - float box_length_2 = static_cast(GetAttr(kernel_node, "box_length_2")); - - need_update = static_cast(GetAttr(kernel_node, "need_update")); - PME_Nall = fftx * ffty * fftz; - PME_Nfft = fftx * ffty * (fftz / 2 + 1); - PME_Nin = ffty * fftz; - cufftPlan3d(&PME_plan_r2c, fftx, ffty, fftz, CUFFT_R2C); - cufftPlan3d(&PME_plan_c2r, fftx, ffty, fftz, CUFFT_C2R); - - std::vector h_box_length{box_length_0, box_length_1, box_length_2}; - VECTOR *box_length = reinterpret_cast(h_box_length.data()); - - PME_inverse_box_vector0.x = static_cast(fftx) / box_length[0].x; - PME_inverse_box_vector0.y = static_cast(ffty) / box_length[0].y; - PME_inverse_box_vector0.z = static_cast(fftz) / box_length[0].z; - PME_inverse_box_vector.x = PME_inverse_box_vector0.x; - PME_inverse_box_vector.y = PME_inverse_box_vector0.y; - PME_inverse_box_vector.z = PME_inverse_box_vector0.z; - float volume = box_length[0].x * box_length[0].y * box_length[0].z; - PME_kxyz_cpu.resize(64); - int kx, ky, kz, kxrp, kyrp, kzrp, index; - for (kx = 0; kx < 4; kx++) { - for (ky = 0; ky < 4; ky++) { - for (kz = 0; kz < 4; kz++) { - index = kx * 16 + ky * 4 + kz; - PME_kxyz_cpu[index].uint_x = kx; - PME_kxyz_cpu[index].uint_y = ky; - PME_kxyz_cpu[index].uint_z = kz; - } - } - } - B1.resize(fftx); - B2.resize(ffty); - B3.resize(fftz); - PME_BC0.resize(PME_Nfft); - - for (kx = 0; kx < fftx; kx++) { - B1[kx] = getb(kx, fftx, 4); - } - - for (ky = 0; ky < ffty; ky++) { - B2[ky] = getb(ky, ffty, 4); - } - - for (kz = 0; kz < fftz; kz++) { - B3[kz] = getb(kz, fftz, 4); - } - float mprefactor = PI * PI / -beta / beta; - float msq; - for (kx = 0; kx < fftx; kx++) { - kxrp = kx; - if (kx > fftx / 2) kxrp = fftx - kx; - for (ky = 0; ky < ffty; ky++) { - kyrp = ky; - if (ky > ffty / 2) kyrp = ffty - ky; - for (kz = 0; kz <= fftz / 2; kz++) { - kzrp = kz; - - msq = kxrp * kxrp / box_length[0].x / box_length[0].x + kyrp * kyrp / box_length[0].y / box_length[0].y + - kzrp * kzrp / box_length[0].z / box_length[0].z; - index = kx * ffty * (fftz / 2 + 1) + ky * (fftz / 2 + 1) + kz; - if ((kx + ky + kz) == 0) { - PME_BC0[index] = 0; - } else { - PME_BC0[index] = 1.0 / PI / msq * exp(mprefactor * msq) / volume; - } - - PME_BC0[index] *= B1[kx] * B2[ky] * B3[kz]; - } - } - } - PME_BC = PME_BC0; - InitSizeLists(); - return true; - } - - bool Launch(const std::vector &inputs, const std::vector &workspace, - const std::vector &outputs, void *stream_ptr) override { - auto uint_crd = GetDeviceAddress(inputs, 0); - auto charge = GetDeviceAddress(inputs, 1); - auto d_beta = GetDeviceAddress(inputs, 2); - - auto pme_uxyz = GetDeviceAddress(workspace, 0); // workspace - auto pme_frxyz = GetDeviceAddress(workspace, 1); // workspace - auto pme_q = GetDeviceAddress(workspace, 2); // workspace - auto pme_fq = GetDeviceAddress(workspace, 3); // workspace - auto pme_atom_near = GetDeviceAddress(workspace, 4); // workspace - auto pme_bc = GetDeviceAddress(workspace, 5); // workspace - auto pme_kxyz = GetDeviceAddress(workspace, 6); // workspace - - auto force = GetDeviceAddress(outputs, 0); - h_beta = beta; - if (need_update) { - cudaMemcpyAsync(&h_beta, d_beta, sizeof(float), cudaMemcpyDeviceToHost, - reinterpret_cast(stream_ptr)); - cudaStreamSynchronize(reinterpret_cast(stream_ptr)); - factor = h_beta / beta; - double factor_inverse = 1.0 / factor; - - PME_inverse_box_vector.x = PME_inverse_box_vector0.x * factor_inverse; - PME_inverse_box_vector.y = PME_inverse_box_vector0.y * factor_inverse; - PME_inverse_box_vector.z = PME_inverse_box_vector0.z * factor_inverse; - - for (int i = 0; i < PME_Nfft; i++) { - PME_BC[i] = PME_BC0[i] * factor_inverse; - } - } - - cufftSetStream(PME_plan_r2c, reinterpret_cast(stream_ptr)); - cufftSetStream(PME_plan_c2r, reinterpret_cast(stream_ptr)); - cudaMemcpyAsync(pme_kxyz, PME_kxyz_cpu.data(), sizeof(UNSIGNED_INT_VECTOR) * 64, cudaMemcpyHostToDevice, - reinterpret_cast(stream_ptr)); - cudaMemcpyAsync(pme_bc, PME_BC.data(), sizeof(float) * PME_Nfft, cudaMemcpyHostToDevice, - reinterpret_cast(stream_ptr)); - PMEReciprocalForce(fftx, ffty, fftz, atom_numbers, beta, pme_bc, pme_uxyz, pme_frxyz, pme_q, pme_fq, pme_atom_near, - pme_kxyz, uint_crd, charge, force, PME_Nin, PME_Nall, PME_Nfft, PME_plan_r2c, PME_plan_c2r, - PME_inverse_box_vector, reinterpret_cast(stream_ptr)); - return true; - } - - protected: - void InitSizeLists() override { - input_size_list_.push_back(atom_numbers * sizeof(UNSIGNED_INT_VECTOR)); - input_size_list_.push_back(atom_numbers * sizeof(float)); - input_size_list_.push_back(sizeof(float)); - - workspace_size_list_.push_back(atom_numbers * sizeof(UNSIGNED_INT_VECTOR)); - workspace_size_list_.push_back(atom_numbers * sizeof(VECTOR)); - workspace_size_list_.push_back(PME_Nall * sizeof(T)); - workspace_size_list_.push_back(PME_Nfft * sizeof(cufftComplex)); - workspace_size_list_.push_back(atom_numbers * 64 * sizeof(int)); - workspace_size_list_.push_back(PME_Nfft * sizeof(float)); - workspace_size_list_.push_back(64 * sizeof(UNSIGNED_INT_VECTOR)); - - output_size_list_.push_back(atom_numbers * sizeof(VECTOR)); - } - - cufftComplex expc(cufftComplex z) { - cufftComplex res; - float t = expf(z.x); - mindspore::kernel::math::SinCosf(z.y, &res.y, &res.x); - res.x *= t; - res.y *= t; - return res; - } - - float M_(float u, int n) { - if (n == 2) { - if (u > 2 || u < 0) return 0; - return 1 - abs(u - 1); - } else { - return u / (n - 1) * M_(u, n - 1) + (n - u) / (n - 1) * M_(u - 1, n - 1); - } - } - - float getb(int k, int NFFT, int B_order) { - cufftComplex tempc, tempc2, res; - float tempf; - tempc2.x = 0; - tempc2.y = 0; - - tempc.x = 0; - if (NFFT == 0) { - MS_LOG(ERROR) << "Divide by zero."; - } else { - tempc.y = 2 * (B_order - 1) * PI * k / NFFT; - } - res = expc(tempc); - - for (int kk = 0; kk < (B_order - 1); kk++) { - tempc.x = 0; - if (NFFT == 0) { - MS_LOG(ERROR) << "Divide by zero."; - break; - } else { - tempc.y = 2 * PI * k / NFFT * kk; - } - tempc = expc(tempc); - tempf = M_(kk + 1, B_order); - tempc2.x += tempf * tempc.x; - tempc2.y += tempf * tempc.y; - } - res = cuCdivf(res, tempc2); - return res.x * res.x + res.y * res.y; - } - - private: - size_t ele_uint_crd = 1; - - int atom_numbers; - int fftx; - int ffty; - int fftz; - float beta; - float h_beta; - float factor; - int PME_Nall; - int PME_Nfft; - int PME_Nin; - float PI = 3.1415926; - int need_update; - std::vector B1; - std::vector B2; - std::vector B3; - std::vector PME_BC0; - std::vector PME_BC; - - cufftHandle PME_plan_r2c; - cufftHandle PME_plan_c2r; - struct VECTOR { - float x; - float y; - float z; - }; - _VECTOR PME_inverse_box_vector0; - _VECTOR PME_inverse_box_vector; - struct UNSIGNED_INT_VECTOR { - unsigned int uint_x; - unsigned int uint_y; - unsigned int uint_z; - }; - std::vector PME_kxyz_cpu; -}; -} // namespace kernel -} // namespace mindspore -#endif diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/pme/pme_rfft_2d_kernel.cc b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/pme/pme_rfft_2d_kernel.cc deleted file mode 100644 index 47d86f609ce..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/pme/pme_rfft_2d_kernel.cc +++ /dev/null @@ -1,23 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#include "plugin/device/gpu/kernel/sponge/pme/pme_rfft_2d_kernel.h" - -namespace mindspore { -namespace kernel { -MS_REG_GPU_KERNEL_ONE(PMERFFT2D, KernelAttr().AddInputAttr(kNumberTypeFloat32).AddOutputAttr(kNumberTypeComplex64), - PMERFFT2DGpuKernelMod, float) -} // namespace kernel -} // namespace mindspore diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/pme/pme_rfft_2d_kernel.h b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/pme/pme_rfft_2d_kernel.h deleted file mode 100644 index e13f2e9109a..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/pme/pme_rfft_2d_kernel.h +++ /dev/null @@ -1,75 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONGE_PME_PME_RFFT_2D_KERNEL_H_ -#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONGE_PME_PME_RFFT_2D_KERNEL_H_ -#include -#include -#include -#include -#include -#include "plugin/device/gpu/kernel/gpu_kernel.h" -#include "plugin/device/gpu/kernel/gpu_kernel_factory.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" -#include "plugin/device/gpu/kernel/cuda_impl/sponge/pme/pme_rfft_2d_impl.cuh" -namespace mindspore { -namespace kernel { -template -using Complex = mindspore::utils::Complex; -template -class PMERFFT2DGpuKernelMod : public DeprecatedNativeGpuKernelMod { - public: - PMERFFT2DGpuKernelMod() = default; - ~PMERFFT2DGpuKernelMod() override = default; - - bool Init(const CNodePtr &kernel_node) override { - constexpr int NUM_2 = 2; - fftx = static_cast(GetAttr(kernel_node, "fftx")); - ffty = static_cast(GetAttr(kernel_node, "ffty")); - Nall = fftx * ffty; - Nfft = fftx * (ffty / NUM_2 + 1); - - cufftPlan2d(&FFT_plan_r2c, fftx, ffty, CUFFT_R2C); - - InitSizeLists(); - return true; - } - - bool Launch(const std::vector &inputs, const std::vector &workspace, - const std::vector &outputs, void *stream_ptr) override { - auto input_tensor = GetDeviceAddress(inputs, 0); - auto output_tensor = GetDeviceAddress>(outputs, 0); - - cufftSetStream(FFT_plan_r2c, reinterpret_cast(stream_ptr)); - PMERFFT2D(Nfft, input_tensor, output_tensor, FFT_plan_r2c, reinterpret_cast(stream_ptr)); - return true; - } - - protected: - void InitSizeLists() override { - input_size_list_.push_back(Nall * sizeof(T)); - output_size_list_.push_back(Nfft * sizeof(Complex)); - } - - private: - int fftx; - int ffty; - int Nall; - int Nfft; - cufftHandle FFT_plan_r2c; -}; -} // namespace kernel -} // namespace mindspore -#endif diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/restrain/restrain_energy_kernel.cc b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/restrain/restrain_energy_kernel.cc deleted file mode 100644 index c75fd71b850..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/restrain/restrain_energy_kernel.cc +++ /dev/null @@ -1,34 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/** - * Note: - * RestrainEnergy. This is an experimental interface that is subject to change and/or deletion. - */ - -#include "plugin/device/gpu/kernel/sponge/restrain/restrain_energy_kernel.h" - -namespace mindspore { -namespace kernel { -MS_REG_GPU_KERNEL_TWO(RestrainEnergy, - KernelAttr() - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeFloat32) - .AddOutputAttr(kNumberTypeFloat32), - RestrainEnergyGpuKernelMod, float, int) -} // namespace kernel -} // namespace mindspore diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/restrain/restrain_energy_kernel.h b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/restrain/restrain_energy_kernel.h deleted file mode 100644 index e81b99be9be..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/restrain/restrain_energy_kernel.h +++ /dev/null @@ -1,95 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONG_RESTRAIN_RESTRAIN_FORCE_KERNEL_H_ -#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONG_RESTRAIN_RESTRAIN_FORCE_KERNEL_H_ - -#include "plugin/device/gpu/kernel/cuda_impl/sponge/restrain/restrain_energy_impl.cuh" - -#include -#include -#include -#include - -#include "plugin/device/gpu/kernel/gpu_kernel.h" -#include "plugin/device/gpu/kernel/gpu_kernel_factory.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" - -namespace mindspore { -namespace kernel { -template -class RestrainEnergyGpuKernelMod : public DeprecatedNativeGpuKernelMod { - public: - RestrainEnergyGpuKernelMod() : ele_crd(1) {} - ~RestrainEnergyGpuKernelMod() override = default; - - bool Init(const CNodePtr &kernel_node) override { - // get bond_numbers - kernel_node_ = kernel_node; - restrain_numbers = static_cast(GetAttr(kernel_node, "restrain_numbers")); - atom_numbers = static_cast(GetAttr(kernel_node, "atom_numbers")); - weight = static_cast(GetAttr(kernel_node, "weight")); - auto shape_restrain_list = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 0); - auto shape_crd = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 1); - auto shape_crd_ref = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 2); - auto shape_scaler = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 3); - - ele_crd *= SizeOf(shape_crd); - ele_scaler *= SizeOf(shape_scaler); - ele_restrain_list *= SizeOf(shape_restrain_list); - ele_crd_ref *= SizeOf(shape_crd_ref); - - InitSizeLists(); - return true; - } - - bool Launch(const std::vector &inputs, const std::vector &, - const std::vector &outputs, void *stream_ptr) override { - auto restrain_list = GetDeviceAddress(inputs, 0); - auto crd_f = GetDeviceAddress(inputs, 1); - auto crd_ref = GetDeviceAddress(inputs, 2); - auto scaler_f = GetDeviceAddress(inputs, 3); - - auto ene = GetDeviceAddress(outputs, 0); - - restrainenergy(restrain_numbers, atom_numbers, weight, restrain_list, crd_f, crd_ref, scaler_f, ene, - reinterpret_cast(stream_ptr)); - return true; - } - - protected: - void InitSizeLists() override { - input_size_list_.push_back(ele_restrain_list * sizeof(T1)); - input_size_list_.push_back(ele_crd * sizeof(T)); - input_size_list_.push_back(ele_crd_ref * sizeof(T)); - input_size_list_.push_back(ele_scaler * sizeof(T)); - - output_size_list_.push_back(atom_numbers * sizeof(T)); - } - - private: - size_t ele_crd = 1; - size_t ele_scaler = 1; - size_t ele_restrain_list = 1; - size_t ele_crd_ref = 1; - - int restrain_numbers; - int atom_numbers; - float weight; -}; -} // namespace kernel -} // namespace mindspore -#endif // MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONG_RESTRAIN_RESTRAIN_FORCE_KERNEL_H_ diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/restrain/restrain_force_atom_energy_virial_impl_kernel.cc b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/restrain/restrain_force_atom_energy_virial_impl_kernel.cc deleted file mode 100644 index 00e17f37856..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/restrain/restrain_force_atom_energy_virial_impl_kernel.cc +++ /dev/null @@ -1,36 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/** - * Note: - * RestrainForceWithAtomEnergyVirial. This is an experimental interface that is subject to change and/or deletion. - */ - -#include "plugin/device/gpu/kernel/sponge/restrain/restrain_force_atom_energy_virial_kernel.h" - -namespace mindspore { -namespace kernel { -MS_REG_GPU_KERNEL_TWO(RestrainForceWithAtomEnergyVirial, - KernelAttr() - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeFloat32) - .AddOutputAttr(kNumberTypeFloat32) - .AddOutputAttr(kNumberTypeFloat32) - .AddOutputAttr(kNumberTypeFloat32), - RestrainForceWithAtomenergyAndVirialGpuKernelMod, float, int) -} // namespace kernel -} // namespace mindspore diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/restrain/restrain_force_atom_energy_virial_kernel.h b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/restrain/restrain_force_atom_energy_virial_kernel.h deleted file mode 100644 index 39b679f3e57..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/restrain/restrain_force_atom_energy_virial_kernel.h +++ /dev/null @@ -1,100 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONG_RESTRAIN_RESTRAIN_FORCE_ATOM_ENERGY_VIRIAL_KERNEL_H_ -#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONG_RESTRAIN_RESTRAIN_FORCE_ATOM_ENERGY_VIRIAL_KERNEL_H_ - -#include "plugin/device/gpu/kernel/cuda_impl/sponge/restrain/restrain_force_atom_energy_virial_impl.cuh" - -#include -#include -#include -#include - -#include "plugin/device/gpu/kernel/gpu_kernel.h" -#include "plugin/device/gpu/kernel/gpu_kernel_factory.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" - -namespace mindspore { -namespace kernel { -template -class RestrainForceWithAtomenergyAndVirialGpuKernelMod : public DeprecatedNativeGpuKernelMod { - public: - RestrainForceWithAtomenergyAndVirialGpuKernelMod() : ele_crd(1) {} - ~RestrainForceWithAtomenergyAndVirialGpuKernelMod() override = default; - - bool Init(const CNodePtr &kernel_node) override { - // get bond_numbers - kernel_node_ = kernel_node; - restrain_numbers = static_cast(GetAttr(kernel_node, "restrain_numbers")); - atom_numbers = static_cast(GetAttr(kernel_node, "atom_numbers")); - weight = static_cast(GetAttr(kernel_node, "weight")); - auto shape_restrain_list = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 0); - auto shape_crd = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 1); - auto shape_crd_ref = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 2); - auto shape_scaler = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 3); - - ele_crd *= SizeOf(shape_crd); - ele_scaler *= SizeOf(shape_scaler); - ele_restrain_list *= SizeOf(shape_restrain_list); - ele_crd_ref *= SizeOf(shape_crd_ref); - - InitSizeLists(); - return true; - } - - bool Launch(const std::vector &inputs, const std::vector &, - const std::vector &outputs, void *stream_ptr) override { - auto restrain_list = GetDeviceAddress(inputs, 0); - auto crd_f = GetDeviceAddress(inputs, 1); - auto crd_ref = GetDeviceAddress(inputs, 2); - auto scaler_f = GetDeviceAddress(inputs, 3); - - auto atom_ene = GetDeviceAddress(outputs, 0); - auto atom_virial = GetDeviceAddress(outputs, 1); - auto frc = GetDeviceAddress(outputs, 2); - - restrainforcewithatomenergyandvirial(restrain_numbers, atom_numbers, restrain_list, crd_f, crd_ref, weight, - scaler_f, atom_ene, atom_virial, frc, - reinterpret_cast(stream_ptr)); - return true; - } - - protected: - void InitSizeLists() override { - input_size_list_.push_back(ele_restrain_list * sizeof(T1)); - input_size_list_.push_back(ele_crd * sizeof(T)); - input_size_list_.push_back(ele_crd_ref * sizeof(T)); - input_size_list_.push_back(ele_scaler * sizeof(T)); - - output_size_list_.push_back(atom_numbers * sizeof(T)); - output_size_list_.push_back(atom_numbers * sizeof(T)); - output_size_list_.push_back(3 * atom_numbers * sizeof(T)); - } - - private: - size_t ele_crd = 1; - size_t ele_scaler = 1; - size_t ele_restrain_list = 1; - size_t ele_crd_ref = 1; - - int restrain_numbers; - int atom_numbers; - float weight; -}; -} // namespace kernel -} // namespace mindspore -#endif // MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONG_RESTRAIN_RESTRAIN_FORCE_KERNEL_H_ diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/restrain/restrain_force_kernel.cc b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/restrain/restrain_force_kernel.cc deleted file mode 100644 index 843616974dd..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/restrain/restrain_force_kernel.cc +++ /dev/null @@ -1,30 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "plugin/device/gpu/kernel/sponge/restrain/restrain_force_kernel.h" - -namespace mindspore { -namespace kernel { -MS_REG_GPU_KERNEL_TWO(RestrainForce, - KernelAttr() - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeUInt32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeFloat32) - .AddOutputAttr(kNumberTypeFloat32), - RestrainForceGpuKernelMod, float, int) -} // namespace kernel -} // namespace mindspore diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/restrain/restrain_force_kernel.h b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/restrain/restrain_force_kernel.h deleted file mode 100644 index 471acf5b720..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/restrain/restrain_force_kernel.h +++ /dev/null @@ -1,95 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONG_RESTRAIN_RESTRAIN_FORCE_KERNEL_H_ -#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONG_RESTRAIN_RESTRAIN_FORCE_KERNEL_H_ - -#include "plugin/device/gpu/kernel/cuda_impl/sponge/restrain/restrain_force_impl.cuh" - -#include -#include -#include -#include - -#include "plugin/device/gpu/kernel/gpu_kernel.h" -#include "plugin/device/gpu/kernel/gpu_kernel_factory.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" - -namespace mindspore { -namespace kernel { -template -class RestrainForceGpuKernelMod : public DeprecatedNativeGpuKernelMod { - public: - RestrainForceGpuKernelMod() : ele_uint_crd(1) {} - ~RestrainForceGpuKernelMod() override = default; - - bool Init(const CNodePtr &kernel_node) override { - // get bond_numbers - kernel_node_ = kernel_node; - restrain_numbers = static_cast(GetAttr(kernel_node, "restrain_numbers")); - atom_numbers = static_cast(GetAttr(kernel_node, "atom_numbers")); - factor = static_cast(GetAttr(kernel_node, "factor")); - auto shape_restrain_list = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 0); - auto shape_uint_crd = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 1); - auto shape_uint_crd_ref = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 2); - auto shape_scaler = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 3); - - ele_uint_crd *= SizeOf(shape_uint_crd); - ele_scaler *= SizeOf(shape_scaler); - ele_restrain_list *= SizeOf(shape_restrain_list); - ele_uint_crd_ref *= SizeOf(shape_uint_crd_ref); - - InitSizeLists(); - return true; - } - - bool Launch(const std::vector &inputs, const std::vector &, - const std::vector &outputs, void *stream_ptr) override { - auto restrain_list = GetDeviceAddress(inputs, 0); - auto uint_crd_f = GetDeviceAddress(inputs, 1); - auto uint_crd_ref = GetDeviceAddress(inputs, 2); - auto scaler_f = GetDeviceAddress(inputs, 3); - - auto frc_f = GetDeviceAddress(outputs, 0); - - restrainforce(restrain_numbers, atom_numbers, restrain_list, uint_crd_f, uint_crd_ref, factor, scaler_f, frc_f, - reinterpret_cast(stream_ptr)); - return true; - } - - protected: - void InitSizeLists() override { - input_size_list_.push_back(ele_restrain_list * sizeof(T1)); - input_size_list_.push_back(ele_uint_crd * sizeof(T1)); - input_size_list_.push_back(ele_uint_crd_ref * sizeof(T1)); - input_size_list_.push_back(ele_scaler * sizeof(T)); - - output_size_list_.push_back(atom_numbers * 3 * sizeof(T)); - } - - private: - size_t ele_uint_crd = 1; - size_t ele_scaler = 1; - size_t ele_restrain_list = 1; - size_t ele_uint_crd_ref = 1; - - int restrain_numbers; - int atom_numbers; - float factor; -}; -} // namespace kernel -} // namespace mindspore -#endif // MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONG_RESTRAIN_RESTRAIN_FORCE_KERNEL_H_ diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/simple_constrain/constrain_force_cycle_kernel.cc b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/simple_constrain/constrain_force_cycle_kernel.cc deleted file mode 100644 index 745353432e3..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/simple_constrain/constrain_force_cycle_kernel.cc +++ /dev/null @@ -1,33 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "plugin/device/gpu/kernel/sponge/simple_constrain/constrain_force_cycle_kernel.h" - -namespace mindspore { -namespace kernel { -MS_REG_GPU_KERNEL_THREE(ConstrainForceCycle, - KernelAttr() - .AddInputAttr(kNumberTypeUInt32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeFloat32) - .AddOutputAttr(kNumberTypeFloat32), - ConstrainForceCycleGpuKernelMod, float, int, unsigned int) -} // namespace kernel -} // namespace mindspore diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/simple_constrain/constrain_force_cycle_kernel.h b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/simple_constrain/constrain_force_cycle_kernel.h deleted file mode 100644 index d2d80bb59df..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/simple_constrain/constrain_force_cycle_kernel.h +++ /dev/null @@ -1,119 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONG_SIMPLE_CONSTRAIN_CONSTRAIN_FORCE_CYCLE_KERNEL_H_ -#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONG_SIMPLE_CONSTRAIN_CONSTRAIN_FORCE_CYCLE_KERNEL_H_ - -#include "plugin/device/gpu/kernel/cuda_impl/sponge/simple_constrain/constrain_force_cycle_impl.cuh" - -#include -#include -#include -#include - -#include "plugin/device/gpu/kernel/gpu_kernel.h" -#include "plugin/device/gpu/kernel/gpu_kernel_factory.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" - -namespace mindspore { -namespace kernel { -template -class ConstrainForceCycleGpuKernelMod : public DeprecatedNativeGpuKernelMod { - public: - ConstrainForceCycleGpuKernelMod() : ele_uint_crd(1) {} - ~ConstrainForceCycleGpuKernelMod() override = default; - - bool Init(const CNodePtr &kernel_node) override { - // get bond_numbers - kernel_node_ = kernel_node; - atom_numbers = static_cast(GetAttr(kernel_node, "atom_numbers")); - constrain_pair_numbers = static_cast(GetAttr(kernel_node, "constrain_pair_numbers")); - - auto shape_uint_crd = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 0); - auto shape_scaler = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 1); - auto shape_pair_dr = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 2); - auto shape_atom_i_serials = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 3); - auto shape_atom_j_serials = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 4); - auto shape_constant_rs = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 5); - auto shape_constrain_ks = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 6); - - ele_uint_crd *= SizeOf(shape_uint_crd); - ele_scaler *= SizeOf(shape_scaler); - ele_pair_dr *= SizeOf(shape_pair_dr); - ele_atom_i_serials *= SizeOf(shape_atom_i_serials); - ele_atom_j_serials *= SizeOf(shape_atom_j_serials); - ele_constant_rs *= SizeOf(shape_constant_rs); - ele_constrain_ks *= SizeOf(shape_constrain_ks); - - InitSizeLists(); - return true; - } - - bool Launch(const std::vector &inputs, const std::vector &workspace, - const std::vector &outputs, void *stream_ptr) override { - auto uint_crd = GetDeviceAddress(inputs, 0); - auto scaler = GetDeviceAddress(inputs, 1); - auto pair_dr = GetDeviceAddress(inputs, 2); - auto atom_i_serials = GetDeviceAddress(inputs, 3); - auto atom_j_serials = GetDeviceAddress(inputs, 4); - auto constant_rs = GetDeviceAddress(inputs, 5); - auto constrain_ks = GetDeviceAddress(inputs, 6); - - auto constrain_pair = GetDeviceAddress(workspace, 0); - - auto test_frc_f = GetDeviceAddress(outputs, 0); - - Constrain_Force_Cycle(atom_numbers, constrain_pair_numbers, uint_crd, scaler, constrain_pair, pair_dr, - atom_i_serials, atom_j_serials, constant_rs, constrain_ks, test_frc_f, - reinterpret_cast(stream_ptr)); - return true; - } - - protected: - void InitSizeLists() override { - input_size_list_.push_back(ele_uint_crd * sizeof(T2)); - input_size_list_.push_back(ele_scaler * sizeof(T)); - input_size_list_.push_back(ele_pair_dr * sizeof(T)); - input_size_list_.push_back(ele_atom_i_serials * sizeof(T1)); - input_size_list_.push_back(ele_atom_j_serials * sizeof(T1)); - input_size_list_.push_back(ele_constant_rs * sizeof(T)); - input_size_list_.push_back(ele_constrain_ks * sizeof(T)); - - workspace_size_list_.push_back(constrain_pair_numbers * sizeof(CONSTRAIN_PAIR)); - output_size_list_.push_back(3 * atom_numbers * sizeof(T)); - } - - private: - size_t ele_uint_crd = 1; - size_t ele_scaler = 1; - size_t ele_pair_dr = 1; - size_t ele_atom_i_serials = 1; - size_t ele_atom_j_serials = 1; - size_t ele_constant_rs = 1; - size_t ele_constrain_ks = 1; - - int atom_numbers; - int constrain_pair_numbers; - struct CONSTRAIN_PAIR { - int atom_i_serial; - int atom_j_serial; - float constant_r; - float constrain_k; - }; -}; -} // namespace kernel -} // namespace mindspore -#endif diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/simple_constrain/constrain_force_cycle_with_virial_kernel.cc b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/simple_constrain/constrain_force_cycle_with_virial_kernel.cc deleted file mode 100644 index 02f5e921ce7..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/simple_constrain/constrain_force_cycle_with_virial_kernel.cc +++ /dev/null @@ -1,38 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/** - * Note: - * ConstrainForceCycleWithVirial. This is an experimental interface that is subject to change and/or deletion. - */ - -#include "plugin/device/gpu/kernel/sponge/simple_constrain/constrain_force_cycle_with_virial_kernel.h" - -namespace mindspore { -namespace kernel { -MS_REG_GPU_KERNEL_THREE(ConstrainForceCycleWithVirial, - KernelAttr() - .AddInputAttr(kNumberTypeUInt32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeFloat32) - .AddOutputAttr(kNumberTypeFloat32) - .AddOutputAttr(kNumberTypeFloat32), - ConstrainForceCycleWithVirialGpuKernelMod, float, int, unsigned int) -} // namespace kernel -} // namespace mindspore diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/simple_constrain/constrain_force_cycle_with_virial_kernel.h b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/simple_constrain/constrain_force_cycle_with_virial_kernel.h deleted file mode 100644 index 34d6567cbc0..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/simple_constrain/constrain_force_cycle_with_virial_kernel.h +++ /dev/null @@ -1,120 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONG_SIMPLE_CONSTRAIN_CONSTRAIN_FORCE_CYCLE_WITH_VIRIAL_KERNEL_H_ -#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONG_SIMPLE_CONSTRAIN_CONSTRAIN_FORCE_CYCLE_WITH_VIRIAL_KERNEL_H_ - -#include "plugin/device/gpu/kernel/cuda_impl/sponge/simple_constrain/constrain_force_cycle_with_virial_impl.cuh" - -#include -#include -#include -#include - -#include "plugin/device/gpu/kernel/gpu_kernel.h" -#include "plugin/device/gpu/kernel/gpu_kernel_factory.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" - -namespace mindspore { -namespace kernel { -template -class ConstrainForceCycleWithVirialGpuKernelMod : public DeprecatedNativeGpuKernelMod { - public: - ConstrainForceCycleWithVirialGpuKernelMod() : ele_uint_crd(1) {} - ~ConstrainForceCycleWithVirialGpuKernelMod() override = default; - - bool Init(const CNodePtr &kernel_node) override { - // get bond_numbers - kernel_node_ = kernel_node; - atom_numbers = static_cast(GetAttr(kernel_node, "atom_numbers")); - constrain_pair_numbers = static_cast(GetAttr(kernel_node, "constrain_pair_numbers")); - - auto shape_uint_crd = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 0); - auto shape_scaler = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 1); - auto shape_pair_dr = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 2); - auto shape_atom_i_serials = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 3); - auto shape_atom_j_serials = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 4); - auto shape_constant_rs = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 5); - auto shape_constrain_ks = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 6); - - ele_uint_crd *= SizeOf(shape_uint_crd); - ele_scaler *= SizeOf(shape_scaler); - ele_pair_dr *= SizeOf(shape_pair_dr); - ele_atom_i_serials *= SizeOf(shape_atom_i_serials); - ele_atom_j_serials *= SizeOf(shape_atom_j_serials); - ele_constant_rs *= SizeOf(shape_constant_rs); - ele_constrain_ks *= SizeOf(shape_constrain_ks); - - InitSizeLists(); - return true; - } - - bool Launch(const std::vector &inputs, const std::vector &workspace, - const std::vector &outputs, void *stream_ptr) override { - auto uint_crd = GetDeviceAddress(inputs, 0); - auto scaler = GetDeviceAddress(inputs, 1); - auto pair_dr = GetDeviceAddress(inputs, 2); - auto atom_i_serials = GetDeviceAddress(inputs, 3); - auto atom_j_serials = GetDeviceAddress(inputs, 4); - auto constant_rs = GetDeviceAddress(inputs, 5); - auto constrain_ks = GetDeviceAddress(inputs, 6); - auto constrain_pair = GetDeviceAddress(workspace, 0); - - auto test_frc_f = GetDeviceAddress(outputs, 0); - auto d_atom_virial = GetDeviceAddress(outputs, 1); - - Constrain_Force_Cycle_With_Virial(atom_numbers, constrain_pair_numbers, uint_crd, scaler, constrain_pair, pair_dr, - atom_i_serials, atom_j_serials, constant_rs, constrain_ks, test_frc_f, - d_atom_virial, reinterpret_cast(stream_ptr)); - return true; - } - - protected: - void InitSizeLists() override { - input_size_list_.push_back(ele_uint_crd * sizeof(T2)); - input_size_list_.push_back(ele_scaler * sizeof(T)); - input_size_list_.push_back(ele_pair_dr * sizeof(T)); - input_size_list_.push_back(ele_atom_i_serials * sizeof(T1)); - input_size_list_.push_back(ele_atom_j_serials * sizeof(T1)); - input_size_list_.push_back(ele_constant_rs * sizeof(T)); - input_size_list_.push_back(ele_constrain_ks * sizeof(T)); - - workspace_size_list_.push_back(constrain_pair_numbers * sizeof(CONSTRAIN_PAIR)); - output_size_list_.push_back(3 * atom_numbers * sizeof(T)); - output_size_list_.push_back(atom_numbers * sizeof(T)); - } - - private: - size_t ele_uint_crd = 1; - size_t ele_scaler = 1; - size_t ele_pair_dr = 1; - size_t ele_atom_i_serials = 1; - size_t ele_atom_j_serials = 1; - size_t ele_constant_rs = 1; - size_t ele_constrain_ks = 1; - - int atom_numbers; - int constrain_pair_numbers; - struct CONSTRAIN_PAIR { - int atom_i_serial; - int atom_j_serial; - float constant_r; - float constrain_k; - }; -}; -} // namespace kernel -} // namespace mindspore -#endif diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/simple_constrain/constrain_force_kernel.cc b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/simple_constrain/constrain_force_kernel.cc deleted file mode 100644 index 0c4a0b13d17..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/simple_constrain/constrain_force_kernel.cc +++ /dev/null @@ -1,40 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/** - * Note: - * ConstrainForce. This is an experimental interface that is subject to change and/or deletion. - */ -#include "plugin/device/gpu/kernel/sponge/simple_constrain/constrain_force_kernel.h" - -namespace mindspore { -namespace kernel { -MS_REG_GPU_KERNEL_THREE(ConstrainForce, - KernelAttr() - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeFloat32) - .AddOutputAttr(kNumberTypeUInt32) - .AddOutputAttr(kNumberTypeFloat32) - .AddOutputAttr(kNumberTypeFloat32), - ConstrainForceGpuKernelMod, float, int, unsigned int) -} // namespace kernel -} // namespace mindspore diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/simple_constrain/constrain_force_kernel.h b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/simple_constrain/constrain_force_kernel.h deleted file mode 100644 index f8c0c2a5b6b..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/simple_constrain/constrain_force_kernel.h +++ /dev/null @@ -1,154 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/** - * Note: - * ConstrainForce. This is an experimental interface that is subject to change and/or deletion. - */ - -#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONG_SIMPLE_CONSTRAIN_CONSTRAIN_FORCE_KERNEL_H_ -#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONG_SIMPLE_CONSTRAIN_CONSTRAIN_FORCE_KERNEL_H_ - -#include "plugin/device/gpu/kernel/cuda_impl/sponge/simple_constrain/constrain_force_virial_impl.cuh" - -#include -#include -#include -#include - -#include "plugin/device/gpu/kernel/gpu_kernel.h" -#include "plugin/device/gpu/kernel/gpu_kernel_factory.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" - -namespace mindspore { -namespace kernel { -template -class ConstrainForceGpuKernelMod : public DeprecatedNativeGpuKernelMod { - public: - ConstrainForceGpuKernelMod() : ele_crd(1) {} - ~ConstrainForceGpuKernelMod() override = default; - - bool Init(const CNodePtr &kernel_node) override { - kernel_node_ = kernel_node; - atom_numbers = static_cast(GetAttr(kernel_node, "atom_numbers")); - constrain_pair_numbers = static_cast(GetAttr(kernel_node, "constrain_pair_numbers")); - iteration_numbers = static_cast(GetAttr(kernel_node, "iteration_numbers")); - half_exp_gamma_plus_half = static_cast(GetAttr(kernel_node, "half_exp_gamma_plus_half")); - - auto shape_crd = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 0); - auto shape_quarter_cof = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 1); - auto shape_mass_inverse = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 2); - - auto shape_scaler = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 3); - auto shape_pair_dr = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 4); - auto shape_atom_i_serials = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 5); - auto shape_atom_j_serials = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 6); - auto shape_constant_rs = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 7); - auto shape_constrain_ks = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 8); - - ele_scaler *= SizeOf(shape_scaler); - ele_pair_dr *= SizeOf(shape_pair_dr); - ele_atom_i_serials *= SizeOf(shape_atom_i_serials); - ele_atom_j_serials *= SizeOf(shape_atom_j_serials); - ele_constant_rs *= SizeOf(shape_constant_rs); - ele_constrain_ks *= SizeOf(shape_constrain_ks); - ele_crd *= SizeOf(shape_crd); - ele_quarter_cof *= SizeOf(shape_quarter_cof); - ele_mass_inverse *= SizeOf(shape_mass_inverse); - - InitSizeLists(); - return true; - } - - bool Launch(const std::vector &inputs, const std::vector &workspace, - const std::vector &outputs, void *stream_ptr) override { - auto crd = GetDeviceAddress(inputs, 0); - auto quarter_cof = GetDeviceAddress(inputs, 1); - auto mass_inverse = GetDeviceAddress(inputs, 2); - - auto scaler = GetDeviceAddress(inputs, 3); - auto pair_dr = GetDeviceAddress(inputs, 4); - auto atom_i_serials = GetDeviceAddress(inputs, 5); - auto atom_j_serials = GetDeviceAddress(inputs, 6); - auto constant_rs = GetDeviceAddress(inputs, 7); - auto constrain_ks = GetDeviceAddress(inputs, 8); - - auto constrain_pair = GetDeviceAddress(workspace, 0); - - auto uint_crd = GetDeviceAddress(outputs, 0); - - auto test_frc_f = GetDeviceAddress(outputs, 1); - auto d_atom_virial = GetDeviceAddress(outputs, 2); - - set_zero_force_with_virial(atom_numbers, constrain_pair_numbers, test_frc_f, d_atom_virial, - reinterpret_cast(stream_ptr)); - - for (int i = 0; i < iteration_numbers; i++) { - refresh_uint_crd_update(atom_numbers, half_exp_gamma_plus_half, crd, quarter_cof, test_frc_f, mass_inverse, - uint_crd, reinterpret_cast(stream_ptr)); - - constrain_force_cycle_update(atom_numbers, constrain_pair_numbers, uint_crd, scaler, constrain_pair, pair_dr, - atom_i_serials, atom_j_serials, constant_rs, constrain_ks, test_frc_f, - reinterpret_cast(stream_ptr)); - } - return true; - } - - protected: - void InitSizeLists() override { - input_size_list_.push_back(ele_crd * sizeof(T)); - input_size_list_.push_back(ele_quarter_cof * sizeof(T)); - input_size_list_.push_back(ele_mass_inverse * sizeof(T)); - - input_size_list_.push_back(ele_scaler * sizeof(T)); - input_size_list_.push_back(ele_pair_dr * sizeof(T)); - input_size_list_.push_back(ele_atom_i_serials * sizeof(T1)); - input_size_list_.push_back(ele_atom_j_serials * sizeof(T1)); - input_size_list_.push_back(ele_constant_rs * sizeof(T)); - input_size_list_.push_back(ele_constrain_ks * sizeof(T)); - - workspace_size_list_.push_back(constrain_pair_numbers * sizeof(CONSTRAIN_PAIR)); - - output_size_list_.push_back(3 * atom_numbers * sizeof(T2)); - output_size_list_.push_back(3 * atom_numbers * sizeof(T)); - output_size_list_.push_back(constrain_pair_numbers * sizeof(T)); - } - - private: - size_t ele_scaler = 1; - size_t ele_pair_dr = 1; - size_t ele_atom_i_serials = 1; - size_t ele_atom_j_serials = 1; - size_t ele_constant_rs = 1; - size_t ele_constrain_ks = 1; - size_t ele_crd = 1; - size_t ele_quarter_cof = 1; - size_t ele_mass_inverse = 1; - - int atom_numbers; - int constrain_pair_numbers; - int iteration_numbers; - int need_pressure; - float half_exp_gamma_plus_half; - struct CONSTRAIN_PAIR { - int atom_i_serial; - int atom_j_serial; - float constant_r; - float constrain_k; - }; -}; -} // namespace kernel -} // namespace mindspore -#endif diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/simple_constrain/constrain_force_virial_kernel.cc b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/simple_constrain/constrain_force_virial_kernel.cc deleted file mode 100644 index 9b01f9000d7..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/simple_constrain/constrain_force_virial_kernel.cc +++ /dev/null @@ -1,41 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/** - * Note: - * ConstrainForceVirial. This is an experimental interface that is subject to change and/or deletion. - */ - -#include "plugin/device/gpu/kernel/sponge/simple_constrain/constrain_force_virial_kernel.h" - -namespace mindspore { -namespace kernel { -MS_REG_GPU_KERNEL_THREE(ConstrainForceVirial, - KernelAttr() - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeFloat32) - .AddOutputAttr(kNumberTypeUInt32) - .AddOutputAttr(kNumberTypeFloat32) - .AddOutputAttr(kNumberTypeFloat32), - ConstrainForceVirialGpuKernelMod, float, int, unsigned int) -} // namespace kernel -} // namespace mindspore diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/simple_constrain/constrain_force_virial_kernel.h b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/simple_constrain/constrain_force_virial_kernel.h deleted file mode 100644 index 08621b810d4..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/simple_constrain/constrain_force_virial_kernel.h +++ /dev/null @@ -1,151 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/** - * Note: - * ConstrainForceVirial. This is an experimental interface that is subject to change and/or deletion. - */ -#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONG_SIMPLE_CONSTRAIN_CONSTRAIN_FORCE_VIRIAL_KERNEL_H_ -#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONG_SIMPLE_CONSTRAIN_CONSTRAIN_FORCE_VIRIAL_KERNEL_H_ - -#include "plugin/device/gpu/kernel/cuda_impl/sponge/simple_constrain/constrain_force_virial_impl.cuh" - -#include -#include -#include -#include - -#include "plugin/device/gpu/kernel/gpu_kernel.h" -#include "plugin/device/gpu/kernel/gpu_kernel_factory.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" - -namespace mindspore { -namespace kernel { -template -class ConstrainForceVirialGpuKernelMod : public DeprecatedNativeGpuKernelMod { - public: - ConstrainForceVirialGpuKernelMod() : ele_crd(1) {} - ~ConstrainForceVirialGpuKernelMod() override = default; - - bool Init(const CNodePtr &kernel_node) override { - kernel_node_ = kernel_node; - atom_numbers = static_cast(GetAttr(kernel_node, "atom_numbers")); - constrain_pair_numbers = static_cast(GetAttr(kernel_node, "constrain_pair_numbers")); - iteration_numbers = static_cast(GetAttr(kernel_node, "iteration_numbers")); - half_exp_gamma_plus_half = static_cast(GetAttr(kernel_node, "half_exp_gamma_plus_half")); - - auto shape_crd = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 0); - auto shape_quarter_cof = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 1); - auto shape_mass_inverse = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 2); - - auto shape_scaler = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 3); - auto shape_pair_dr = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 4); - auto shape_atom_i_serials = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 5); - auto shape_atom_j_serials = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 6); - auto shape_constant_rs = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 7); - auto shape_constrain_ks = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 8); - - ele_scaler *= SizeOf(shape_scaler); - ele_pair_dr *= SizeOf(shape_pair_dr); - ele_atom_i_serials *= SizeOf(shape_atom_i_serials); - ele_atom_j_serials *= SizeOf(shape_atom_j_serials); - ele_constant_rs *= SizeOf(shape_constant_rs); - ele_constrain_ks *= SizeOf(shape_constrain_ks); - ele_crd *= SizeOf(shape_crd); - ele_quarter_cof *= SizeOf(shape_quarter_cof); - ele_mass_inverse *= SizeOf(shape_mass_inverse); - - InitSizeLists(); - return true; - } - - bool Launch(const std::vector &inputs, const std::vector &workspace, - const std::vector &outputs, void *stream_ptr) override { - auto crd = GetDeviceAddress(inputs, 0); - auto quarter_cof = GetDeviceAddress(inputs, 1); - auto mass_inverse = GetDeviceAddress(inputs, 2); - auto scaler = GetDeviceAddress(inputs, 3); - auto pair_dr = GetDeviceAddress(inputs, 4); - auto atom_i_serials = GetDeviceAddress(inputs, 5); - auto atom_j_serials = GetDeviceAddress(inputs, 6); - auto constant_rs = GetDeviceAddress(inputs, 7); - auto constrain_ks = GetDeviceAddress(inputs, 8); - auto constrain_pair = GetDeviceAddress(workspace, 0); - auto uint_crd = GetDeviceAddress(outputs, 0); - auto test_frc_f = GetDeviceAddress(outputs, 1); - auto d_atom_virial = GetDeviceAddress(outputs, 2); - - set_zero_force_with_virial(atom_numbers, constrain_pair_numbers, test_frc_f, d_atom_virial, - reinterpret_cast(stream_ptr)); - - for (int i = 0; i < iteration_numbers; i++) { - refresh_uint_crd_update(atom_numbers, half_exp_gamma_plus_half, crd, quarter_cof, test_frc_f, mass_inverse, - uint_crd, reinterpret_cast(stream_ptr)); - - constrain_force_cycle_with_virial_update(atom_numbers, constrain_pair_numbers, uint_crd, scaler, constrain_pair, - pair_dr, atom_i_serials, atom_j_serials, constant_rs, constrain_ks, - test_frc_f, d_atom_virial, reinterpret_cast(stream_ptr)); - } - - return true; - } - - protected: - void InitSizeLists() override { - input_size_list_.push_back(ele_crd * sizeof(T)); - input_size_list_.push_back(ele_quarter_cof * sizeof(T)); - input_size_list_.push_back(ele_mass_inverse * sizeof(T)); - - input_size_list_.push_back(ele_scaler * sizeof(T)); - input_size_list_.push_back(ele_pair_dr * sizeof(T)); - input_size_list_.push_back(ele_atom_i_serials * sizeof(T1)); - input_size_list_.push_back(ele_atom_j_serials * sizeof(T1)); - input_size_list_.push_back(ele_constant_rs * sizeof(T)); - input_size_list_.push_back(ele_constrain_ks * sizeof(T)); - - workspace_size_list_.push_back(constrain_pair_numbers * sizeof(CONSTRAIN_PAIR)); - - output_size_list_.push_back(3 * atom_numbers * sizeof(T2)); - - output_size_list_.push_back(3 * atom_numbers * sizeof(T)); - output_size_list_.push_back(constrain_pair_numbers * sizeof(T)); - } - - private: - size_t ele_scaler = 1; - size_t ele_pair_dr = 1; - size_t ele_atom_i_serials = 1; - size_t ele_atom_j_serials = 1; - size_t ele_constant_rs = 1; - size_t ele_constrain_ks = 1; - size_t ele_crd = 1; - size_t ele_quarter_cof = 1; - size_t ele_mass_inverse = 1; - - int atom_numbers; - int constrain_pair_numbers; - int iteration_numbers; - int need_pressure; - float half_exp_gamma_plus_half; - struct CONSTRAIN_PAIR { - int atom_i_serial; - int atom_j_serial; - float constant_r; - float constrain_k; - }; -}; -} // namespace kernel -} // namespace mindspore -#endif diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/simple_constrain/constrain_kernel.cc b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/simple_constrain/constrain_kernel.cc deleted file mode 100644 index e2f0bcd862c..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/simple_constrain/constrain_kernel.cc +++ /dev/null @@ -1,41 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/** - * Note: - * Constrain. This is an experimental interface that is subject to change and/or deletion. - */ -#include "plugin/device/gpu/kernel/sponge/simple_constrain/constrain_kernel.h" - -namespace mindspore { -namespace kernel { -MS_REG_GPU_KERNEL_THREE(Constrain, - KernelAttr() - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeInt32) - .AddOutputAttr(kNumberTypeUInt32) - .AddOutputAttr(kNumberTypeFloat32) - .AddOutputAttr(kNumberTypeFloat32), - ConstrainGpuKernelMod, float, int, unsigned int) -} // namespace kernel -} // namespace mindspore diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/simple_constrain/constrain_kernel.h b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/simple_constrain/constrain_kernel.h deleted file mode 100644 index 0adc081764a..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/simple_constrain/constrain_kernel.h +++ /dev/null @@ -1,168 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/** - * Note: - * Constrain. This is an experimental interface that is subject to change and/or deletion. - */ -#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONG_SIMPLE_CONSTRAIN_CONSTRAIN_KERNEL_H_ -#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONG_SIMPLE_CONSTRAIN_CONSTRAIN_KERNEL_H_ - -#include "plugin/device/gpu/kernel/cuda_impl/sponge/simple_constrain/constrain_force_virial_impl.cuh" - -#include -#include -#include -#include - -#include "plugin/device/gpu/kernel/gpu_kernel.h" -#include "plugin/device/gpu/kernel/gpu_kernel_factory.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" - -namespace mindspore { -namespace kernel { -template -class ConstrainGpuKernelMod : public DeprecatedNativeGpuKernelMod { - public: - ConstrainGpuKernelMod() : ele_crd(1), step(1) {} - ~ConstrainGpuKernelMod() override = default; - - bool Init(const CNodePtr &kernel_node) override { - // get bond_numbers - kernel_node_ = kernel_node; - atom_numbers = static_cast(GetAttr(kernel_node, "atom_numbers")); - constrain_pair_numbers = static_cast(GetAttr(kernel_node, "constrain_pair_numbers")); - iteration_numbers = static_cast(GetAttr(kernel_node, "iteration_numbers")); - half_exp_gamma_plus_half = static_cast(GetAttr(kernel_node, "half_exp_gamma_plus_half")); - - auto shape_crd = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 0); - auto shape_quarter_cof = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 1); - auto shape_mass_inverse = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 2); - - auto shape_scaler = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 3); - auto shape_pair_dr = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 4); - auto shape_atom_i_serials = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 5); - auto shape_atom_j_serials = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 6); - auto shape_constant_rs = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 7); - auto shape_constrain_ks = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 8); - - ele_scaler *= SizeOf(shape_scaler); - ele_pair_dr *= SizeOf(shape_pair_dr); - ele_atom_i_serials *= SizeOf(shape_atom_i_serials); - ele_atom_j_serials *= SizeOf(shape_atom_j_serials); - ele_constant_rs *= SizeOf(shape_constant_rs); - ele_constrain_ks *= SizeOf(shape_constrain_ks); - ele_crd *= SizeOf(shape_crd); - ele_quarter_cof *= SizeOf(shape_quarter_cof); - ele_mass_inverse *= SizeOf(shape_mass_inverse); - - InitSizeLists(); - return true; - } - - bool Launch(const std::vector &inputs, const std::vector &workspace, - const std::vector &outputs, void *stream_ptr) override { - auto crd = GetDeviceAddress(inputs, 0); - auto quarter_cof = GetDeviceAddress(inputs, 1); - auto mass_inverse = GetDeviceAddress(inputs, 2); - auto scaler = GetDeviceAddress(inputs, 3); - auto pair_dr = GetDeviceAddress(inputs, 4); - auto atom_i_serials = GetDeviceAddress(inputs, 5); - auto atom_j_serials = GetDeviceAddress(inputs, 6); - auto constant_rs = GetDeviceAddress(inputs, 7); - auto constrain_ks = GetDeviceAddress(inputs, 8); - auto d_need_pressure = GetDeviceAddress(inputs, 9); - - auto constrain_pair = GetDeviceAddress(workspace, 0); - - auto uint_crd = GetDeviceAddress(outputs, 0); - - auto test_frc_f = GetDeviceAddress(outputs, 1); - auto d_atom_virial = GetDeviceAddress(outputs, 2); - - cudaMemcpyAsync(&need_pressure, d_need_pressure, sizeof(int), cudaMemcpyDeviceToHost, - reinterpret_cast(stream_ptr)); - cudaStreamSynchronize(reinterpret_cast(stream_ptr)); - - set_zero_force_with_virial(atom_numbers, constrain_pair_numbers, test_frc_f, d_atom_virial, - reinterpret_cast(stream_ptr)); - - for (int i = 0; i < iteration_numbers; i++) { - refresh_uint_crd_update(atom_numbers, half_exp_gamma_plus_half, crd, quarter_cof, test_frc_f, mass_inverse, - uint_crd, reinterpret_cast(stream_ptr)); - - if (need_pressure) { - constrain_force_cycle_with_virial_update(atom_numbers, constrain_pair_numbers, uint_crd, scaler, constrain_pair, - pair_dr, atom_i_serials, atom_j_serials, constant_rs, constrain_ks, - test_frc_f, d_atom_virial, reinterpret_cast(stream_ptr)); - } else { - constrain_force_cycle_update(atom_numbers, constrain_pair_numbers, uint_crd, scaler, constrain_pair, pair_dr, - atom_i_serials, atom_j_serials, constant_rs, constrain_ks, test_frc_f, - reinterpret_cast(stream_ptr)); - } - } - step++; - - return true; - } - - protected: - void InitSizeLists() override { - input_size_list_.push_back(ele_crd * sizeof(T)); - input_size_list_.push_back(ele_quarter_cof * sizeof(T)); - input_size_list_.push_back(ele_mass_inverse * sizeof(T)); - - input_size_list_.push_back(ele_scaler * sizeof(T)); - input_size_list_.push_back(ele_pair_dr * sizeof(T)); - input_size_list_.push_back(ele_atom_i_serials * sizeof(T1)); - input_size_list_.push_back(ele_atom_j_serials * sizeof(T1)); - input_size_list_.push_back(ele_constant_rs * sizeof(T)); - input_size_list_.push_back(ele_constrain_ks * sizeof(T)); - - workspace_size_list_.push_back(constrain_pair_numbers * sizeof(CONSTRAIN_PAIR)); - - output_size_list_.push_back(3 * atom_numbers * sizeof(T2)); - - output_size_list_.push_back(3 * atom_numbers * sizeof(T)); - output_size_list_.push_back(constrain_pair_numbers * sizeof(T)); - } - - private: - size_t ele_scaler = 1; - size_t ele_pair_dr = 1; - size_t ele_atom_i_serials = 1; - size_t ele_atom_j_serials = 1; - size_t ele_constant_rs = 1; - size_t ele_constrain_ks = 1; - size_t ele_crd = 1; - size_t ele_quarter_cof = 1; - size_t ele_mass_inverse = 1; - - int atom_numbers; - int constrain_pair_numbers; - int iteration_numbers; - int need_pressure; - float half_exp_gamma_plus_half; - int step; - struct CONSTRAIN_PAIR { - int atom_i_serial; - int atom_j_serial; - float constant_r; - float constrain_k; - }; -}; -} // namespace kernel -} // namespace mindspore -#endif diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/simple_constrain/last_crd_to_dr_kernel.cc b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/simple_constrain/last_crd_to_dr_kernel.cc deleted file mode 100644 index 5f9325b4c03..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/simple_constrain/last_crd_to_dr_kernel.cc +++ /dev/null @@ -1,36 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/** - * Note: - * LastCrdToDr. This is an experimental interface that is subject to change and/or deletion. - */ -#include "plugin/device/gpu/kernel/sponge/simple_constrain/last_crd_to_dr_kernel.h" - -namespace mindspore { -namespace kernel { -MS_REG_GPU_KERNEL_TWO(LastCrdToDr, - KernelAttr() - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeInt32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeFloat32) - .AddOutputAttr(kNumberTypeFloat32), - LastCrdToDrGpuKernelMod, float, int) -} // namespace kernel -} // namespace mindspore diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/simple_constrain/last_crd_to_dr_kernel.h b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/simple_constrain/last_crd_to_dr_kernel.h deleted file mode 100644 index 9d2d787017d..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/simple_constrain/last_crd_to_dr_kernel.h +++ /dev/null @@ -1,117 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONG_SIMPLE_CONSTRAIN_LAST_CRD_TO_DR_KERNEL_H_ -#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONG_SIMPLE_CONSTRAIN_LAST_CRD_TO_DR_KERNEL_H_ - -#include "plugin/device/gpu/kernel/cuda_impl/sponge/simple_constrain/last_crd_to_dr_impl.cuh" - -#include -#include -#include -#include - -#include "plugin/device/gpu/kernel/gpu_kernel.h" -#include "plugin/device/gpu/kernel/gpu_kernel_factory.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" - -namespace mindspore { -namespace kernel { -template -class LastCrdToDrGpuKernelMod : public DeprecatedNativeGpuKernelMod { - public: - LastCrdToDrGpuKernelMod() : ele_atom_crd(1) {} - ~LastCrdToDrGpuKernelMod() override = default; - - bool Init(const CNodePtr &kernel_node) override { - // get bond_numbers - kernel_node_ = kernel_node; - constrain_pair_numbers = static_cast(GetAttr(kernel_node, "constrain_pair_numbers")); - - auto shape_atom_crd = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 0); - auto shape_quater_cof = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 1); - auto shape_uint_dr_to_dr = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 2); - auto shape_atom_i_serials = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 3); - auto shape_atom_j_serials = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 4); - auto shape_constant_rs = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 5); - auto shape_constrain_ks = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 6); - - ele_atom_crd *= SizeOf(shape_atom_crd); - ele_quater_cof *= SizeOf(shape_quater_cof); - ele_uint_dr_to_dr *= SizeOf(shape_uint_dr_to_dr); - ele_atom_i_serials *= SizeOf(shape_atom_i_serials); - ele_atom_j_serials *= SizeOf(shape_atom_j_serials); - ele_constant_rs *= SizeOf(shape_constant_rs); - ele_constrain_ks *= SizeOf(shape_constrain_ks); - - InitSizeLists(); - return true; - } - - bool Launch(const std::vector &inputs, const std::vector &workspace, - const std::vector &outputs, void *stream_ptr) override { - auto atom_crd = GetDeviceAddress(inputs, 0); - auto quater_cof = GetDeviceAddress(inputs, 1); - auto uint_dr_to_dr = GetDeviceAddress(inputs, 2); - auto atom_i_serials = GetDeviceAddress(inputs, 3); - auto atom_j_serials = GetDeviceAddress(inputs, 4); - auto constant_rs = GetDeviceAddress(inputs, 5); - auto constrain_ks = GetDeviceAddress(inputs, 6); - - auto constrain_pair = GetDeviceAddress(workspace, 0); - - auto pair_dr = GetDeviceAddress(outputs, 0); - - lastcrdtodr(constrain_pair_numbers, atom_crd, quater_cof, uint_dr_to_dr, constrain_pair, atom_i_serials, - atom_j_serials, constant_rs, constrain_ks, pair_dr, reinterpret_cast(stream_ptr)); - return true; - } - - protected: - void InitSizeLists() override { - input_size_list_.push_back(ele_atom_crd * sizeof(T)); - input_size_list_.push_back(ele_quater_cof * sizeof(T)); - input_size_list_.push_back(ele_uint_dr_to_dr * sizeof(T)); - input_size_list_.push_back(ele_atom_i_serials * sizeof(T1)); - input_size_list_.push_back(ele_atom_j_serials * sizeof(T1)); - input_size_list_.push_back(ele_constant_rs * sizeof(T)); - input_size_list_.push_back(ele_constrain_ks * sizeof(T)); - - workspace_size_list_.push_back(constrain_pair_numbers * sizeof(CONSTRAIN_PAIR)); - - output_size_list_.push_back(3 * constrain_pair_numbers * sizeof(T)); - } - - private: - size_t ele_atom_crd = 1; - size_t ele_quater_cof = 1; - size_t ele_uint_dr_to_dr = 1; - size_t ele_atom_i_serials = 1; - size_t ele_atom_j_serials = 1; - size_t ele_constant_rs = 1; - size_t ele_constrain_ks = 1; - - int constrain_pair_numbers; - struct CONSTRAIN_PAIR { - int atom_i_serial; - int atom_j_serial; - float constant_r; - float constrain_k; - }; -}; -} // namespace kernel -} // namespace mindspore -#endif diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/simple_constrain/refresh_crd_vel_kernel.cc b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/simple_constrain/refresh_crd_vel_kernel.cc deleted file mode 100644 index 52859060f14..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/simple_constrain/refresh_crd_vel_kernel.cc +++ /dev/null @@ -1,34 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/** - * Note: - * RefreshCrdVel. This is an experimental interface that is subject to change and/or deletion. - */ - -#include "plugin/device/gpu/kernel/sponge/simple_constrain/refresh_crd_vel_kernel.h" - -namespace mindspore { -namespace kernel { -MS_REG_GPU_KERNEL_TWO(RefreshCrdVel, - KernelAttr() - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeFloat32) - .AddOutputAttr(kNumberTypeFloat32), - RefreshCrdVelGpuKernelMod, float, unsigned int) -} // namespace kernel -} // namespace mindspore diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/simple_constrain/refresh_crd_vel_kernel.h b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/simple_constrain/refresh_crd_vel_kernel.h deleted file mode 100644 index 170cd7e9543..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/simple_constrain/refresh_crd_vel_kernel.h +++ /dev/null @@ -1,97 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONG_SIMPLE_CONSTRAIN_REFRESH_CRD_VEL_KERNEL_H_ -#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONG_SIMPLE_CONSTRAIN_REFRESH_CRD_VEL_KERNEL_H_ - -#include "plugin/device/gpu/kernel/cuda_impl/sponge/simple_constrain/refresh_crd_vel_impl.cuh" - -#include -#include -#include -#include - -#include "plugin/device/gpu/kernel/gpu_kernel.h" -#include "plugin/device/gpu/kernel/gpu_kernel_factory.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" - -namespace mindspore { -namespace kernel { -template -class RefreshCrdVelGpuKernelMod : public DeprecatedNativeGpuKernelMod { - public: - RefreshCrdVelGpuKernelMod() : ele_crd(1) {} - ~RefreshCrdVelGpuKernelMod() override = default; - - bool Init(const CNodePtr &kernel_node) override { - // get bond_numbers - kernel_node_ = kernel_node; - atom_numbers = static_cast(GetAttr(kernel_node, "atom_numbers")); - dt_inverse = static_cast(GetAttr(kernel_node, "dt_inverse")); - dt = static_cast(GetAttr(kernel_node, "dt")); - exp_gamma = static_cast(GetAttr(kernel_node, "exp_gamma")); - half_exp_gamma_plus_half = static_cast(GetAttr(kernel_node, "half_exp_gamma_plus_half")); - auto shape_crd = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 0); - auto shape_vel = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 1); - auto shape_test_frc = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 2); - auto shape_mass_inverse = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 3); - - ele_crd *= SizeOf(shape_crd); - ele_vel *= SizeOf(shape_vel); - ele_test_frc *= SizeOf(shape_test_frc); - ele_mass_inverse *= SizeOf(shape_mass_inverse); - - InitSizeLists(); - return true; - } - - bool Launch(const std::vector &inputs, const std::vector &, - const std::vector &outputs, void *stream_ptr) override { - auto crd = GetDeviceAddress(inputs, 0); - auto vel = GetDeviceAddress(inputs, 1); - auto test_frc = GetDeviceAddress(inputs, 2); - auto mass_inverse = GetDeviceAddress(inputs, 3); - - refreshcrdvel(atom_numbers, dt_inverse, dt, exp_gamma, half_exp_gamma_plus_half, test_frc, mass_inverse, crd, vel, - reinterpret_cast(stream_ptr)); - return true; - } - - protected: - void InitSizeLists() override { - input_size_list_.push_back(ele_crd * sizeof(T)); - input_size_list_.push_back(ele_vel * sizeof(T)); - input_size_list_.push_back(ele_test_frc * sizeof(T)); - input_size_list_.push_back(ele_mass_inverse * sizeof(T)); - - output_size_list_.push_back(sizeof(T)); - } - - private: - size_t ele_crd = 1; - size_t ele_vel = 1; - size_t ele_test_frc = 1; - size_t ele_mass_inverse = 1; - - int atom_numbers; - float dt_inverse; - float dt; - float exp_gamma; - float half_exp_gamma_plus_half; -}; -} // namespace kernel -} // namespace mindspore -#endif // MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONG_SIMPLE_CONSTRAIN_REFRESH_CRD_VEL_KERNEL_H_ diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/simple_constrain/refresh_uint_crd_kernel.cc b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/simple_constrain/refresh_uint_crd_kernel.cc deleted file mode 100644 index a4ca9a0e907..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/simple_constrain/refresh_uint_crd_kernel.cc +++ /dev/null @@ -1,33 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/** - * Note: - * RefreshUintCrd. This is an experimental interface that is subject to change and/or deletion. - */ -#include "plugin/device/gpu/kernel/sponge/simple_constrain/refresh_uint_crd_kernel.h" - -namespace mindspore { -namespace kernel { -MS_REG_GPU_KERNEL_TWO(RefreshUintCrd, - KernelAttr() - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeFloat32) - .AddInputAttr(kNumberTypeFloat32) - .AddOutputAttr(kNumberTypeUInt32), - RefreshUintCrdGpuKernelMod, float, unsigned int) -} // namespace kernel -} // namespace mindspore diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/simple_constrain/refresh_uint_crd_kernel.h b/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/simple_constrain/refresh_uint_crd_kernel.h deleted file mode 100644 index 2b8bba9cb53..00000000000 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/sponge/simple_constrain/refresh_uint_crd_kernel.h +++ /dev/null @@ -1,93 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONG_SIMPLE_CONSTRAIN_REFRESH_UINT_CRD_KERNEL_H_ -#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONG_SIMPLE_CONSTRAIN_REFRESH_UINT_CRD_KERNEL_H_ - -#include "plugin/device/gpu/kernel/cuda_impl/sponge/simple_constrain/refresh_uint_crd_impl.cuh" - -#include -#include -#include -#include - -#include "plugin/device/gpu/kernel/gpu_kernel.h" -#include "plugin/device/gpu/kernel/gpu_kernel_factory.h" -#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cuda_common.h" - -namespace mindspore { -namespace kernel { -template -class RefreshUintCrdGpuKernelMod : public DeprecatedNativeGpuKernelMod { - public: - RefreshUintCrdGpuKernelMod() : ele_crd(1) {} - ~RefreshUintCrdGpuKernelMod() override = default; - - bool Init(const CNodePtr &kernel_node) override { - // get bond_numbers - kernel_node_ = kernel_node; - atom_numbers = static_cast(GetAttr(kernel_node, "atom_numbers")); - half_exp_gamma_plus_half = static_cast(GetAttr(kernel_node, "half_exp_gamma_plus_half")); - auto shape_crd = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 0); - auto shape_quarter_cof = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 1); - auto shape_test_frc = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 2); - auto shape_mass_inverse = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 3); - - ele_crd *= SizeOf(shape_crd); - ele_quarter_cof *= SizeOf(shape_quarter_cof); - ele_test_frc *= SizeOf(shape_test_frc); - ele_mass_inverse *= SizeOf(shape_mass_inverse); - - InitSizeLists(); - return true; - } - - bool Launch(const std::vector &inputs, const std::vector &, - const std::vector &outputs, void *stream_ptr) override { - auto crd = GetDeviceAddress(inputs, 0); - auto quarter_cof = GetDeviceAddress(inputs, 1); - auto test_frc = GetDeviceAddress(inputs, 2); - auto mass_inverse = GetDeviceAddress(inputs, 3); - - auto uint_crd = GetDeviceAddress(outputs, 0); - - refreshuintcrd(atom_numbers, half_exp_gamma_plus_half, crd, quarter_cof, test_frc, mass_inverse, uint_crd, - reinterpret_cast(stream_ptr)); - return true; - } - - protected: - void InitSizeLists() override { - input_size_list_.push_back(ele_crd * sizeof(T)); - input_size_list_.push_back(ele_quarter_cof * sizeof(T)); - input_size_list_.push_back(ele_test_frc * sizeof(T)); - input_size_list_.push_back(ele_mass_inverse * sizeof(T)); - - output_size_list_.push_back(3 * atom_numbers * sizeof(T1)); - } - - private: - size_t ele_crd = 1; - size_t ele_quarter_cof = 1; - size_t ele_test_frc = 1; - size_t ele_mass_inverse = 1; - - int atom_numbers; - float half_exp_gamma_plus_half; -}; -} // namespace kernel -} // namespace mindspore -#endif // MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONG_SIMPLE_CONSTRAIN_REFRESH_UINT_CRD_KERNEL_H_ diff --git a/mindspore/python/mindspore/ops/operations/__init__.py b/mindspore/python/mindspore/ops/operations/__init__.py index da46c2b8261..f77efe6abcc 100644 --- a/mindspore/python/mindspore/ops/operations/__init__.py +++ b/mindspore/python/mindspore/ops/operations/__init__.py @@ -104,23 +104,6 @@ from .random_ops import (RandomChoiceWithMask, StandardNormal, Gamma, RandomGamm LogUniformCandidateSampler, TruncatedNormal) from .rl_ops import (BufferAppend, BufferGetItem, BufferSample) from .sparse_ops import (SparseToDense, SparseTensorDenseMatmul, SparseTensorDenseAdd) -from .sponge_ops import (BondForce, BondEnergy, BondAtomEnergy, BondForceWithAtomEnergy, BondForceWithAtomVirial, - DihedralForce, DihedralEnergy, DihedralAtomEnergy, DihedralForceWithAtomEnergy, AngleForce, - AngleEnergy, AngleAtomEnergy, AngleForceWithAtomEnergy, PMEReciprocalForce, - LJForce, LJEnergy, LJForceWithPMEDirectForce, PMEExcludedForce, PMEEnergy, Dihedral14LJForce, - Dihedral14LJForceWithDirectCF, Dihedral14LJEnergy, Dihedral14LJCFForceWithAtomEnergy, - Dihedral14LJAtomEnergy, Dihedral14CFEnergy, Dihedral14CFAtomEnergy, - MDTemperature, MDIterationLeapFrogLiujian, - CrdToUintCrd, MDIterationSetupRandState, TransferCrd, FFT3D, IFFT3D, NeighborListUpdate) -from .sponge_update_ops import (ConstrainForceCycleWithVirial, RefreshUintCrd, LastCrdToDr, RefreshCrdVel, - CalculateNowrapCrd, RefreshBoxmapTimes, Totalc6get, CrdToUintCrdQuarter, - MDIterationLeapFrogLiujianWithMaxVel, GetCenterOfMass, MapCenterOfMass, - NeighborListRefresh, MDIterationLeapFrog, MDIterationLeapFrogWithMaxVel, - MDIterationGradientDescent, BondForceWithAtomEnergyAndVirial, ConstrainForceCycle, - LJForceWithVirialEnergy, LJForceWithPMEDirectForceUpdate, PMEReciprocalForceUpdate, - PMEExcludedForceUpdate, LJForceWithVirialEnergyUpdate, - Dihedral14ForceWithAtomEnergyVirial, PMEEnergyUpdate, - ConstrainForceVirial, ConstrainForce, Constrain) __all__ = [ 'HSVToRGB', @@ -499,8 +482,6 @@ __all__ = [ "TensorScatterMul", "TensorScatterDiv", "SoftShrink", - "FFT3D", - "IFFT3D", "HShrink", "PyFunc", "BufferAppend", @@ -522,77 +503,11 @@ __all__ = [ "Cummax", ] -__sponge__ = [ - "Cross", - "BondForce", - "BondEnergy", - "BondAtomEnergy", - "BondForceWithAtomEnergy", - "BondForceWithAtomVirial", - "DihedralForce", - "DihedralEnergy", - "DihedralAtomEnergy", - "DihedralForceWithAtomEnergy", - "AngleForce", - "AngleEnergy", - "AngleAtomEnergy", - "AngleForceWithAtomEnergy", - 'PMEReciprocalForce', - 'LJForce', - 'LJForceWithPMEDirectForce', - 'LJEnergy', - 'PMEExcludedForce', - 'PMEEnergy', - "Dihedral14LJForce", - "Dihedral14LJEnergy", - "Dihedral14LJForceWithDirectCF", - "Dihedral14LJCFForceWithAtomEnergy", - "Dihedral14LJAtomEnergy", - "Dihedral14CFEnergy", - "MDIterationLeapFrog", - "Dihedral14CFAtomEnergy", - "MDTemperature", - "NeighborListUpdate", - "MDIterationLeapFrogLiujian", - "CrdToUintCrd", - "MDIterationSetupRandState", - "TransferCrd", - # Update - "ConstrainForceCycleWithVirial", - "RefreshUintCrd", - "LastCrdToDr", - "RefreshCrdVel", - "CalculateNowrapCrd", - "RefreshBoxmapTimes", - "Totalc6get", - "CrdToUintCrdQuarter", - "MDIterationLeapFrogLiujianWithMaxVel", - "GetCenterOfMass", - "MapCenterOfMass", - "NeighborListRefresh", - "MDIterationLeapFrog", - "MDIterationLeapFrogWithMaxVel", - "MDIterationGradientDescent", - "BondForceWithAtomEnergyAndVirial", - "ConstrainForceCycle", - "LJForceWithVirialEnergy", - "LJForceWithPMEDirectForceUpdate", - "PMEReciprocalForceUpdate", - "PMEExcludedForceUpdate", - "LJForceWithVirialEnergyUpdate", - "Dihedral14ForceWithAtomEnergyVirial", - "PMEEnergyUpdate", - "ConstrainForceVirial", - "ConstrainForce", - "Constrain", -] - __custom__ = [ "ms_kernel", "kernel", ] -__all__.extend(__sponge__) __all__.extend(__custom__) __all__.sort() diff --git a/mindspore/python/mindspore/ops/operations/sponge_ops.py b/mindspore/python/mindspore/ops/operations/sponge_ops.py deleted file mode 100644 index f2f53799694..00000000000 --- a/mindspore/python/mindspore/ops/operations/sponge_ops.py +++ /dev/null @@ -1,3498 +0,0 @@ -# Copyright 2021 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ - -""" -Note: - SPONGE operators. This is an experimental interface that is subject to change and/or deletion. -""" - -import math - -from ..primitive import Primitive, PrimitiveWithInfer, prim_attr_register -from ..._checkparam import Rel -from ..._checkparam import Validator as validator -from ...common import dtype as mstype - - -class BondForce(PrimitiveWithInfer): - """ - Calculate the force exerted by the simple harmonic bond on the corresponding atoms. - Assume the number of harmonic bonds is m and the number of atoms is n. - - Because there is a large amount of inputs and each of them are related, - there is no way to construct `Examples` using random methods. For details, refer the webpage `SPONGE in MindSpore - `_. - - .. math:: - - dr = (x_1-x_2, y_1-y_2, z_1-z_2) - - .. math:: - - F = (F_x, F_y, F_z) = 2*k*(1 - r_0/|dr|)*dr - - Args: - atom_numbers(int32): the number of atoms n. - bond_numbers(int32): the number of harmonic bonds m. - - Inputs: - - **uint_crd_f** (Tensor) - The unsigned int coordinate value of each atom. - The data type is uint32 and the shape is :math:`(n, 3)`. - - **scaler_f** (Tensor) - The 3-D scale factor (x, y, z), - between the real space float coordinates and the unsigned int coordinates. - The data type is float32 and the shape is :math:`(3,)`. - - **atom_a** (Tensor) - The first atom index of each bond. - The data type is int32 and the shape is :math:`(m,)`. - - **atom_b** (Tensor) - The second atom index of each bond. - The data type is int32 and the shape is :math:`(m,)`. - - **bond_k** (Tensor) - The force constant of each bond. - The data type is float32 and the shape is :math:`(m,)`. - - **bond_r0** (Tensor) - The equlibrium length of each bond. - The data type is float32 and the shape is :math:`(m,)`. - - Outputs: - - **frc_f** (Tensor) - The force felt by each atom. - The data type is float32 and the shape is :math:`(n, 3)`. - - Supported Platforms: - ``GPU`` - """ - - @prim_attr_register - def __init__(self, bond_numbers, atom_numbers): - """Initialize BondForce.""" - validator.check_value_type('bond_numbers', bond_numbers, int, self.name) - validator.check_value_type('atom_numbers', atom_numbers, int, self.name) - self.bond_numbers = bond_numbers - self.atom_numbers = atom_numbers - self.add_prim_attr('bond_numbers', self.bond_numbers) - self.add_prim_attr('atom_numbers', self.atom_numbers) - self.init_prim_io_names(inputs=['uint_crd_f', 'scaler_f', 'atom_a', 'atom_b', 'bond_k', 'bond_r0'], - outputs=['frc_f']) - - def infer_shape(self, uint_crd_f_shape, scaler_f_shape, atom_a_shape, atom_b_shape, bond_k_shape, bond_r0_shape): - cls_name = self.name - n = self.atom_numbers - m = self.bond_numbers - validator.check_int(len(uint_crd_f_shape), 2, Rel.EQ, "uint_crd_f_dim", cls_name) - validator.check_int(len(scaler_f_shape), 1, Rel.EQ, "scaler_f_dim", cls_name) - validator.check_int(len(atom_a_shape), 1, Rel.EQ, "atom_a_dim", cls_name) - validator.check_int(len(atom_b_shape), 1, Rel.EQ, "atom_b_dim", cls_name) - validator.check_int(len(bond_k_shape), 1, Rel.EQ, "bond_k_dim", cls_name) - validator.check_int(len(bond_r0_shape), 1, Rel.EQ, "bond_r0_dim", cls_name) - validator.check_int(uint_crd_f_shape[0], n, Rel.EQ, "uint_crd_f_shape[0]", cls_name) - validator.check_int(uint_crd_f_shape[1], 3, Rel.EQ, "uint_crd_f_shape[1]", cls_name) - validator.check_int(scaler_f_shape[0], 3, Rel.EQ, "scaler_f_shape", cls_name) - validator.check_int(atom_a_shape[0], m, Rel.EQ, "uint_crd_f_shape", cls_name) - validator.check_int(atom_b_shape[0], m, Rel.EQ, "atom_b_shape", cls_name) - validator.check_int(bond_k_shape[0], m, Rel.EQ, "bond_k_shape", cls_name) - validator.check_int(bond_r0_shape[0], m, Rel.EQ, "bond_r0_shape", cls_name) - return uint_crd_f_shape - - def infer_dtype(self, uint_crd_f_dtype, scaler_f_type, atom_a_type, atom_b_type, bond_k_type, bond_r0_type): - validator.check_tensor_dtype_valid('uint_crd_f', uint_crd_f_dtype, [mstype.uint32], self.name) - validator.check_tensor_dtype_valid('scaler_f', scaler_f_type, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('atom_a', atom_a_type, [mstype.int32], self.name) - validator.check_tensor_dtype_valid('atom_b', atom_b_type, [mstype.int32], self.name) - validator.check_tensor_dtype_valid('bond_k', bond_k_type, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('bond_r0', bond_r0_type, [mstype.float32], self.name) - return bond_r0_type - - -class BondEnergy(PrimitiveWithInfer): - """ - Calculate the harmonic potential energy between each bonded atom pair. - Assume our system has n atoms and m harmonic bonds. - - Because there is a large amount of inputs and each of them are related, - there is no way to construct `Examples` using random methods. For details, refer the webpage `SPONGE in MindSpore - `_. - - .. math:: - - dr = (x_1-x_2, y_1-y_2, z_1-z_2) - - .. math:: - - E = k*(|dr| - r_0)^2 - - Args: - atom_numbers(int32): the number of atoms n. - bond_numbers(int32): the number of harmonic bonds m. - - Inputs: - - **uint_crd_f** (Tensor) - The unsigned int coordinate value of each atom. - The data type is uint32 and the shape is :math:`(n, 3)`. - - **scaler_f** (Tensor) - The 3-D scale factor (x, y, z), - between the real space float coordinates and the unsigned int coordinates. - The data type is float32 and the shape is :math:`(3,)`. - - **atom_a** (Tensor) - The first atom index of each bond. - The data type is int32 and the shape is :math:`(m,)`. - - **atom_b** (Tensor) - The second atom index of each bond. - The data type is int32 and the shape is :math:`(m,)`. - - **bond_k** (Tensor) - The force constant of each bond. - The data type is float32 and the shape is :math:`(m,)`. - - **bond_r0** (Tensor) - The equlibrium length of each bond. - The data type is float32 and the shape is :math:`(m,)`. - - Outputs: - - **bond_ene** (Tensor) - The harmonic potential energy for each bond. - The data type is float32 and the shape is :math:`(m,)`. - - Supported Platforms: - ``GPU`` - """ - - @prim_attr_register - def __init__(self, bond_numbers, atom_numbers): - """Initialize BondEnergy.""" - validator.check_value_type('bond_numbers', bond_numbers, int, self.name) - validator.check_value_type('atom_numbers', atom_numbers, int, self.name) - self.bond_numbers = bond_numbers - self.atom_numbers = atom_numbers - self.add_prim_attr('bond_numbers', self.bond_numbers) - self.add_prim_attr('atom_numbers', self.atom_numbers) - self.init_prim_io_names(inputs=['uint_crd_f', 'scaler_f', 'atom_a', 'atom_b', 'bond_k', 'bond_r0'], - outputs=['bond_ene']) - - def infer_shape(self, uint_crd_f_shape, scaler_f_shape, atom_a_shape, atom_b_shape, bond_k_shape, bond_r0_shape): - cls_name = self.name - n = self.atom_numbers - m = self.bond_numbers - validator.check_int(len(uint_crd_f_shape), 2, Rel.EQ, "uint_crd_f_dim", cls_name) - validator.check_int(len(scaler_f_shape), 1, Rel.EQ, "scaler_f_dim", cls_name) - validator.check_int(len(atom_a_shape), 1, Rel.EQ, "atom_a_dim", cls_name) - validator.check_int(len(atom_b_shape), 1, Rel.EQ, "atom_b_dim", cls_name) - validator.check_int(len(bond_k_shape), 1, Rel.EQ, "bond_k_dim", cls_name) - validator.check_int(len(bond_r0_shape), 1, Rel.EQ, "bond_r0_dim", cls_name) - validator.check_int(uint_crd_f_shape[0], n, Rel.EQ, "uint_crd_f_shape[0]", cls_name) - validator.check_int(uint_crd_f_shape[1], 3, Rel.EQ, "uint_crd_f_shape[1]", cls_name) - validator.check_int(scaler_f_shape[0], 3, Rel.EQ, "scaler_f_shape", cls_name) - validator.check_int(atom_a_shape[0], m, Rel.EQ, "uint_crd_f_shape", cls_name) - validator.check_int(atom_b_shape[0], m, Rel.EQ, "atom_b_shape", cls_name) - validator.check_int(bond_k_shape[0], m, Rel.EQ, "bond_k_shape", cls_name) - validator.check_int(bond_r0_shape[0], m, Rel.EQ, "bond_r0_shape", cls_name) - - return bond_k_shape - - def infer_dtype(self, uint_crd_f_dtype, scaler_f_type, atom_a_type, atom_b_type, bond_k_type, bond_r0_type): - validator.check_tensor_dtype_valid('uint_crd_f', uint_crd_f_dtype, [mstype.uint32], self.name) - validator.check_tensor_dtype_valid('scaler_f', scaler_f_type, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('atom_a', atom_a_type, [mstype.int32], self.name) - validator.check_tensor_dtype_valid('atom_b', atom_b_type, [mstype.int32], self.name) - validator.check_tensor_dtype_valid('bond_k', bond_k_type, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('bond_r0', bond_r0_type, [mstype.float32], self.name) - return bond_r0_type - - -class BondAtomEnergy(PrimitiveWithInfer): - """ - Add the potential energy caused by simple harmonic bonds to the total - potential energy of each atom. - - The calculation formula is the same as operator BondEnergy(). - - Because there is a large amount of inputs and each of them are related, - there is no way to construct `Examples` using random methods. For details, refer the webpage `SPONGE in MindSpore - `_. - - Args: - atom_numbers(int32): the number of atoms n. - bond_numbers(int32): the number of harmonic bonds m. - - Inputs: - - **uint_crd_f** (Tensor) - The unsigned int coordinate value of each atom. - The data type is uint32 and the shape is :math:`(n, 3)`. - - **scaler_f** (Tensor) - The 3-D scale factor (x, y, z), - between the real space float coordinates and the unsigned int coordinates. - The data type is float32 and the shape is :math:`(3,)`. - - **atom_a** (Tensor) - The first atom index of each bond. - The data type is int32 and the shape is :math:`(m,)`. - - **atom_b** (Tensor) - The second atom index of each bond. - The data type is int32 and the shape is :math:`(m,)`. - - **bond_k** (Tensor) - The force constant of each bond. - The data type is float32 and the shape is :math:`(m,)`. - - **bond_r0** (Tensor) - The equlibrium length of each bond. - The data type is float32 and the shape is :math:`(m,)`. - - Outputs: - - **atom_ene** (Tensor) - The accumulated potential energy for each atom. - The data type is float32 and the shape is :math:`(n,)`. - - Supported Platforms: - ``GPU`` - """ - - @prim_attr_register - def __init__(self, bond_numbers, atom_numbers): - """Initialize BondAtomEnergy.""" - validator.check_value_type('bond_numbers', bond_numbers, int, self.name) - validator.check_value_type('atom_numbers', atom_numbers, int, self.name) - self.bond_numbers = bond_numbers - self.atom_numbers = atom_numbers - self.add_prim_attr('bond_numbers', self.bond_numbers) - self.add_prim_attr('atom_numbers', self.atom_numbers) - self.init_prim_io_names(inputs=['uint_crd_f', 'scaler_f', 'atom_a', 'atom_b', 'bond_k', 'bond_r0'], - outputs=['atom_ene']) - - def infer_shape(self, uint_crd_f_shape, scaler_f_shape, atom_a_shape, atom_b_shape, bond_k_shape, bond_r0_shape): - cls_name = self.name - n = self.atom_numbers - m = self.bond_numbers - validator.check_int(len(uint_crd_f_shape), 2, Rel.EQ, "uint_crd_f_dim", cls_name) - validator.check_int(len(scaler_f_shape), 1, Rel.EQ, "scaler_f_dim", cls_name) - validator.check_int(len(atom_a_shape), 1, Rel.EQ, "atom_a_dim", cls_name) - validator.check_int(len(atom_b_shape), 1, Rel.EQ, "atom_b_dim", cls_name) - validator.check_int(len(bond_k_shape), 1, Rel.EQ, "bond_k_dim", cls_name) - validator.check_int(len(bond_r0_shape), 1, Rel.EQ, "bond_r0_dim", cls_name) - validator.check_int(uint_crd_f_shape[0], n, Rel.EQ, "uint_crd_f_shape[0]", cls_name) - validator.check_int(uint_crd_f_shape[1], 3, Rel.EQ, "uint_crd_f_shape[1]", cls_name) - validator.check_int(scaler_f_shape[0], 3, Rel.EQ, "scaler_f_shape", cls_name) - validator.check_int(atom_a_shape[0], m, Rel.EQ, "uint_crd_f_shape", cls_name) - validator.check_int(atom_b_shape[0], m, Rel.EQ, "atom_b_shape", cls_name) - validator.check_int(bond_k_shape[0], m, Rel.EQ, "bond_k_shape", cls_name) - validator.check_int(bond_r0_shape[0], m, Rel.EQ, "bond_r0_shape", cls_name) - return [n,] - - def infer_dtype(self, uint_crd_f_dtype, scaler_f_type, atom_a_type, atom_b_type, bond_k_type, bond_r0_type): - validator.check_tensor_dtype_valid('uint_crd_f', uint_crd_f_dtype, [mstype.uint32], self.name) - validator.check_tensor_dtype_valid('scaler_f', scaler_f_type, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('atom_a', atom_a_type, [mstype.int32], self.name) - validator.check_tensor_dtype_valid('atom_b', atom_b_type, [mstype.int32], self.name) - validator.check_tensor_dtype_valid('bond_k', bond_k_type, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('bond_r0', bond_r0_type, [mstype.float32], self.name) - return bond_r0_type - - -class BondForceWithAtomEnergy(PrimitiveWithInfer): - """ - Calculate bond force and harmonic potential energy together. - - The calculation formula is the same as operator BondForce() and BondEnergy(). - - Because there is a large amount of inputs and each of them are related, - there is no way to construct `Examples` using random methods. For details, refer the webpage `SPONGE in MindSpore - `_. - - Args: - atom_numbers(int32): the number of atoms n. - bond_numbers(int32): the number of harmonic bonds m. - - Inputs: - - **uint_crd_f** (Tensor) - The unsigned int coordinate value of each atom. - The data type is uint32 and the shape is :math:`(n, 3)`. - - **scaler_f** (Tensor) - The 3-D scale factor (x, y, z), - between the real space float coordinates and the unsigned int coordinates. - The data type is float32 and the shape is :math:`(3,)`. - - **atom_a** (Tensor) - The first atom index of each bond. - The data type is int32 and the shape is :math:`(m,)`. - - **atom_b** (Tensor) - The second atom index of each bond. - The data type is int32 and the shape is :math:`(m,)`. - - **bond_k** (Tensor) - The force constant of each bond. - The data type is float32 and the shape is :math:`(m,)`. - - **bond_r0** (Tensor) - The equlibrium length of each bond. - The data type is float32 and the shape is :math:`(m,)`. - - Outputs: - - **frc_f** (Tensor) - The force felt by each atom. - The data type is float32 and the shape is :math:`(n, 3)`. - - **atom_e** (Tensor) - The accumulated potential energy for each atom. - The data type is float32 and the shape is :math:`(n,)`. - - Supported Platforms: - ``GPU`` - """ - - @prim_attr_register - def __init__(self, bond_numbers, atom_numbers): - """Initialize BondForceWithAtomEnergy.""" - validator.check_value_type('bond_numbers', bond_numbers, int, self.name) - validator.check_value_type('atom_numbers', atom_numbers, int, self.name) - self.bond_numbers = bond_numbers - self.atom_numbers = atom_numbers - self.add_prim_attr('bond_numbers', self.bond_numbers) - self.add_prim_attr('atom_numbers', self.atom_numbers) - self.init_prim_io_names(inputs=['uint_crd_f', 'scaler_f', 'atom_a', 'atom_b', 'bond_k', 'bond_r0'], - outputs=['frc_f', 'atom_e']) - - def infer_shape(self, uint_crd_f_shape, scaler_f_shape, atom_a_shape, atom_b_shape, bond_k_shape, bond_r0_shape): - cls_name = self.name - n = self.atom_numbers - m = self.bond_numbers - validator.check_int(len(uint_crd_f_shape), 2, Rel.EQ, "uint_crd_f_dim", cls_name) - validator.check_int(len(scaler_f_shape), 1, Rel.EQ, "scaler_f_dim", cls_name) - validator.check_int(len(atom_a_shape), 1, Rel.EQ, "atom_a_dim", cls_name) - validator.check_int(len(atom_b_shape), 1, Rel.EQ, "atom_b_dim", cls_name) - validator.check_int(len(bond_k_shape), 1, Rel.EQ, "bond_k_dim", cls_name) - validator.check_int(len(bond_r0_shape), 1, Rel.EQ, "bond_r0_dim", cls_name) - validator.check_int(uint_crd_f_shape[0], n, Rel.EQ, "uint_crd_f_shape[0]", cls_name) - validator.check_int(uint_crd_f_shape[1], 3, Rel.EQ, "uint_crd_f_shape[1]", cls_name) - validator.check_int(scaler_f_shape[0], 3, Rel.EQ, "scaler_f_shape", cls_name) - validator.check_int(atom_a_shape[0], m, Rel.EQ, "uint_crd_f_shape", cls_name) - validator.check_int(atom_b_shape[0], m, Rel.EQ, "atom_b_shape", cls_name) - validator.check_int(bond_k_shape[0], m, Rel.EQ, "bond_k_shape", cls_name) - validator.check_int(bond_r0_shape[0], m, Rel.EQ, "bond_r0_shape", cls_name) - - return uint_crd_f_shape, [n,] - - def infer_dtype(self, uint_crd_f_dtype, scaler_f_type, atom_a_type, atom_b_type, bond_k_type, bond_r0_type): - validator.check_tensor_dtype_valid('uint_crd_f', uint_crd_f_dtype, [mstype.uint32], self.name) - validator.check_tensor_dtype_valid('scaler_f', scaler_f_type, [mstype.float32], self.name) - - validator.check_tensor_dtype_valid('atom_a', atom_a_type, [mstype.int32], self.name) - validator.check_tensor_dtype_valid('atom_b', atom_b_type, [mstype.int32], self.name) - - validator.check_tensor_dtype_valid('bond_k', bond_k_type, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('bond_r0', bond_r0_type, [mstype.float32], self.name) - return bond_r0_type, bond_r0_type - - -class BondForceWithAtomVirial(PrimitiveWithInfer): - """ - Calculate bond force and the virial coefficient caused by simple harmonic - bond for each atom together. - - The calculation formula of the force part is the same as operator BondForce(). - - Because there is a large amount of inputs and each of them are related, - there is no way to construct `Examples` using random methods. For details, refer the webpage `SPONGE in MindSpore - `_. - - The Virial part is as follows: - - .. math:: - - dr = (x_1-x_2, y_1-y_2, z_1-z_2) - - .. math:: - - virial = |dr|*(|dr| - r_0)*k - - Args: - atom_numbers(int32): the number of atoms n. - bond_numbers(int32): the number of harmonic bonds m. - - Inputs: - - **uint_crd_f** (Tensor) - The unsigned int coordinate value of each atom. - The data type is uint32 and the shape is :math:`(n, 3)`. - - **scaler_f** (Tensor) - The 3-D scale factor (x, y, z), - between the real space float coordinates and the unsigned int coordinates. - The data type is float32 and the shape is :math:`(3,)`. - - **atom_a** (Tensor) - The first atom index of each bond. - The data type is int32 and the shape is :math:`(m,)`. - - **atom_b** (Tensor) - The second atom index of each bond. - The data type is int32 and the shape is :math:`(m,)`. - - **bond_k** (Tensor) - The force constant of each bond. - The data type is float32 and the shape is :math:`(m,)`. - - **bond_r0** (Tensor) - The equlibrium length of each bond. - The data type is float32 and the shape is :math:`(m,)`. - - Outputs: - - **frc_f** (Tensor) - Same as operator BondForce(). - The data type is float32 and the shape is :math:`(n, 3)`. - - **atom_v** (Tensor) - The accumulated virial coefficient for each atom. - The data type is float32 and the shape is :math:`(n,)`. - - Supported Platforms: - ``GPU`` - """ - - @prim_attr_register - def __init__(self, bond_numbers, atom_numbers): - """Initialize BondForceWithAtomVirial.""" - validator.check_value_type('bond_numbers', bond_numbers, int, self.name) - validator.check_value_type('atom_numbers', atom_numbers, int, self.name) - self.bond_numbers = bond_numbers - self.atom_numbers = atom_numbers - self.add_prim_attr('bond_numbers', self.bond_numbers) - self.add_prim_attr('atom_numbers', self.atom_numbers) - self.init_prim_io_names(inputs=['uint_crd_f', 'scaler_f', 'atom_a', 'atom_b', 'bond_k', 'bond_r0'], - outputs=['frc_f', 'atom_v']) - - def infer_shape(self, uint_crd_f_shape, scaler_f_shape, atom_a_shape, atom_b_shape, bond_k_shape, bond_r0_shape): - cls_name = self.name - n = self.atom_numbers - m = self.bond_numbers - validator.check_int(len(uint_crd_f_shape), 2, Rel.EQ, "uint_crd_f_dim", cls_name) - validator.check_int(len(scaler_f_shape), 1, Rel.EQ, "scaler_f_dim", cls_name) - validator.check_int(len(atom_a_shape), 1, Rel.EQ, "atom_a_dim", cls_name) - validator.check_int(len(atom_b_shape), 1, Rel.EQ, "atom_b_dim", cls_name) - validator.check_int(len(bond_k_shape), 1, Rel.EQ, "bond_k_dim", cls_name) - validator.check_int(len(bond_r0_shape), 1, Rel.EQ, "bond_r0_dim", cls_name) - validator.check_int(uint_crd_f_shape[0], n, Rel.EQ, "uint_crd_f_shape[0]", cls_name) - validator.check_int(uint_crd_f_shape[1], 3, Rel.EQ, "uint_crd_f_shape[1]", cls_name) - validator.check_int(scaler_f_shape[0], 3, Rel.EQ, "scaler_f_shape", cls_name) - validator.check_int(atom_a_shape[0], m, Rel.EQ, "uint_crd_f_shape", cls_name) - validator.check_int(atom_b_shape[0], m, Rel.EQ, "atom_b_shape", cls_name) - validator.check_int(bond_k_shape[0], m, Rel.EQ, "bond_k_shape", cls_name) - validator.check_int(bond_r0_shape[0], m, Rel.EQ, "bond_r0_shape", cls_name) - - return uint_crd_f_shape, [n,] - - def infer_dtype(self, uint_crd_f_dtype, scaler_f_type, atom_a_type, atom_b_type, bond_k_type, bond_r0_type): - validator.check_tensor_dtype_valid('uint_crd_f', uint_crd_f_dtype, [mstype.uint32], self.name) - validator.check_tensor_dtype_valid('scaler_f', scaler_f_type, [mstype.float32], self.name) - - validator.check_tensor_dtype_valid('atom_a', atom_a_type, [mstype.int32], self.name) - validator.check_tensor_dtype_valid('atom_b', atom_b_type, [mstype.int32], self.name) - - validator.check_tensor_dtype_valid('bond_k', bond_k_type, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('bond_r0', bond_r0_type, [mstype.float32], self.name) - return bond_r0_type, bond_r0_type - - -class DihedralForce(PrimitiveWithInfer): - """ - Calculate the force exerted by the dihedral term which made of 4-atoms - on the corresponding atoms. Assume the number of dihedral terms is m and - the number of atoms is n. - - Because there is a large amount of inputs and each of them are related, - there is no way to construct `Examples` using random methods. For details, refer the webpage `SPONGE in MindSpore - `_. - - Args: - dihedral_numbers(int32): the number of dihedral terms m. - - Inputs: - - **uint_crd_f** (Tensor) - The unsigned int coordinates - value of each atom. The data type is uint32 and the shape is :math:`(n, 3)`. - - **scaler_f** (Tensor) - The 3-D scale factor between - the real space float coordinates and the unsigned int coordinates. - The data type is float32 and the shape is :math:`(3,)`. - - **atom_a** (Tensor) - The 1st atom index of each dihedral. - The data type is int32 and the shape is :math:`(m,)`. - - **atom_b** (Tensor) - The 2nd atom index of each dihedral. - The data type is int32 and the shape is :math:`(m,)`. - - **atom_c** (Tensor) - The 3rd atom index of each dihedral. - The data type is int32 and the shape is :math:`(m,)`. - - **atom_d** (Tensor) - The 4th atom index of each dihedral. - 4 atoms are connected in the form a-b-c-d. - The data type is int32 and the shape is :math:`(m,)`. - - **ipn** (Tensor) - The period of dihedral angle of each dihedral. - The data type is int32 and the shape is :math:`(m,)`. - - **pk** (Tensor) - The force constant of each dihedral. - The data type is float32 and the shape is :math:`(m,)`. - - **gamc** (Tensor) - k*cos(phi_0) of each dihedral. - The data type is float32 and the shape is :math:`(m,)`. - - **gams** (Tensor) - k*sin(phi_0) of each dihedral. - The data type is float32 and the shape is :math:`(m,)`. - - **pn** (Tensor) - The floating point form of ipn. - The data type is float32 and the shape is :math:`(m,)`. - - Outputs: - - **frc_f** (Tensor) - The force felt by each atom. - The data type is float32 and the shape is :math:`(n, 3)`. - - Supported Platforms: - ``GPU`` - """ - - @prim_attr_register - def __init__(self, dihedral_numbers): - """Initialize DihedralForce.""" - validator.check_value_type('dihedral_numbers', dihedral_numbers, int, self.name) - self.dihedral_numbers = dihedral_numbers - self.init_prim_io_names(inputs=['uint_crd_f', 'scaler_f', 'atom_a', 'atom_b', 'atom_c', 'atom_d', 'ipn', 'pk', - 'gamc', 'gams', 'pn'], - outputs=['frc_f']) - self.add_prim_attr('dihedral_numbers', self.dihedral_numbers) - - def infer_shape(self, uint_crd_f_shape, scaler_f_shape, atom_a_shape, atom_b_shape, atom_c_shape, atom_d_shape, - ipn_shape, pk_shape, gamc_shape, gams_shape, pn_shape): - cls_name = self.name - m = self.dihedral_numbers - validator.check_int(len(uint_crd_f_shape), 2, Rel.EQ, "uint_crd_f_dim", cls_name) - validator.check_int(len(scaler_f_shape), 1, Rel.EQ, "scaler_f_dim", cls_name) - validator.check_int(len(atom_a_shape), 1, Rel.EQ, "atom_a_dim", cls_name) - validator.check_int(len(atom_b_shape), 1, Rel.EQ, "atom_b_dim", cls_name) - validator.check_int(len(atom_c_shape), 1, Rel.EQ, "atom_c_dim", cls_name) - validator.check_int(len(atom_d_shape), 1, Rel.EQ, "atom_d_dim", cls_name) - validator.check_int(len(ipn_shape), 1, Rel.EQ, "ipn_dim", cls_name) - validator.check_int(len(pk_shape), 1, Rel.EQ, "pk_dim", cls_name) - validator.check_int(len(gamc_shape), 1, Rel.EQ, "gamc_dim", cls_name) - validator.check_int(len(gams_shape), 1, Rel.EQ, "gams_dim", cls_name) - validator.check_int(len(pn_shape), 1, Rel.EQ, "pn_dim", cls_name) - - validator.check_int(uint_crd_f_shape[1], 3, Rel.EQ, "uint_crd_f_shape[1]", cls_name) - validator.check_int(scaler_f_shape[0], 3, Rel.EQ, "scaler_f_shape", cls_name) - validator.check_int(atom_a_shape[0], m, Rel.EQ, "atom_a_shape", cls_name) - validator.check_int(atom_b_shape[0], m, Rel.EQ, "atom_b_shape", cls_name) - validator.check_int(atom_c_shape[0], m, Rel.EQ, "atom_c_shape", cls_name) - validator.check_int(atom_d_shape[0], m, Rel.EQ, "atom_d_shape", cls_name) - validator.check_int(ipn_shape[0], m, Rel.EQ, "ipn_shape", cls_name) - validator.check_int(pk_shape[0], m, Rel.EQ, "pk_shape", cls_name) - validator.check_int(gamc_shape[0], m, Rel.EQ, "gamc_shape", cls_name) - validator.check_int(gams_shape[0], m, Rel.EQ, "gams_shape", cls_name) - validator.check_int(pn_shape[0], m, Rel.EQ, "pn_shape", cls_name) - return uint_crd_f_shape - - def infer_dtype(self, uint_crd_f_dtype, scaler_f_type, atom_a_type, atom_b_type, atom_c_type, atom_d_type, - ipn_type, pk_type, gamc_type, gams_type, pn_type): - validator.check_tensor_dtype_valid('uint_crd_f', uint_crd_f_dtype, [mstype.uint32], self.name) - validator.check_tensor_dtype_valid('scaler_f', scaler_f_type, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('atom_a', atom_a_type, [mstype.int32], self.name) - validator.check_tensor_dtype_valid('atom_b', atom_b_type, [mstype.int32], self.name) - validator.check_tensor_dtype_valid('atom_c', atom_c_type, [mstype.int32], self.name) - validator.check_tensor_dtype_valid('atom_d', atom_d_type, [mstype.int32], self.name) - validator.check_tensor_dtype_valid('ipn', ipn_type, [mstype.int32], self.name) - validator.check_tensor_dtype_valid('pk', pk_type, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('gamc', gamc_type, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('gams', gams_type, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('pn', pn_type, [mstype.float32], self.name) - - return pn_type - - -class DihedralEnergy(PrimitiveWithInfer): - """ - Calculate the potential energy caused by dihedral terms for each 4-atom pair. - Assume our system has n atoms and m dihedral terms. - - Because there is a large amount of inputs and each of them are related, - there is no way to construct `Examples` using random methods. For details, refer the webpage `SPONGE in MindSpore - `_. - - Args: - dihedral_numbers(int32): the number of dihedral terms m. - - Inputs: - - **uint_crd_f** (Tensor) - The unsigned int coordinates - value of each atom. - The data type is uint32 and the shape is :math:`(n, 3)`. - - **scaler_f** (Tensor) - The 3-D scale factor between - the real space float coordinates and the unsigned int coordinates. - The data type is float32 and the shape is :math:`(3,)`. - - **atom_a** (Tensor) - The 1st atom index of each dihedral. - The data type is int32 and the shape is :math:`(m,)`. - - **atom_b** (Tensor) - The 2nd atom index of each dihedral. - The data type is int32 and the shape is :math:`(m,)`. - - **atom_c** (Tensor) - The 3rd atom index of each dihedral. - The data type is int32 and the shape is :math:`(m,)`. - - **atom_d** (Tensor) - The 4th atom index of each dihedral. - 4 atoms are connected in the form a-b-c-d. - The data type is int32 and the shape is :math:`(m,)`. - - **ipn** (Tensor) - The period of dihedral angle of each dihedral. - The data type is int32 and the shape is :math:`(m,)`. - - **pk** (Tensor) - The force constant of each dihedral. - The data type is int32 and the shape is :math:`(m,)`. - - **gamc** (Tensor) - k*cos(phi_0) of each dihedral. - The data type is float32 and the shape is :math:`(m,)`. - - **gams** (Tensor) - k*sin(phi_0) of each dihedral. - The data type is float32 and the shape is :math:`(m,)`. - - **pn** (Tensor) - The floating point form of ipn. - The data type is float32 and the shape is :math:`(m,)`. - - Outputs: - - **ene** (Tensor) - The potential energy for each - dihedral term. The data type is float32 and the shape is :math:`(m,)`. - - Supported Platforms: - ``GPU`` - """ - - @prim_attr_register - def __init__(self, dihedral_numbers): - """Initialize DihedralEnergy.""" - validator.check_value_type('dihedral_numbers', dihedral_numbers, int, self.name) - self.dihedral_numbers = dihedral_numbers - self.init_prim_io_names(inputs=['uint_crd_f', 'scaler_f', 'atom_a', 'atom_b', 'atom_c', 'atom_d', 'ipn', 'pk', - 'gamc', 'gams', 'pn'], - outputs=['ene']) - self.add_prim_attr('dihedral_numbers', self.dihedral_numbers) - - def infer_shape(self, uint_crd_f_shape, scaler_f_shape, atom_a_shape, atom_b_shape, atom_c_shape, atom_d_shape, - ipn_shape, pk_shape, gamc_shape, gams_shape, pn_shape): - cls_name = self.name - m = self.dihedral_numbers - validator.check_int(len(uint_crd_f_shape), 2, Rel.EQ, "uint_crd_f_dim", cls_name) - validator.check_int(len(scaler_f_shape), 1, Rel.EQ, "scaler_f_dim", cls_name) - validator.check_int(len(atom_a_shape), 1, Rel.EQ, "atom_a_dim", cls_name) - validator.check_int(len(atom_b_shape), 1, Rel.EQ, "atom_b_dim", cls_name) - validator.check_int(len(atom_c_shape), 1, Rel.EQ, "atom_c_dim", cls_name) - validator.check_int(len(atom_d_shape), 1, Rel.EQ, "atom_d_dim", cls_name) - validator.check_int(len(ipn_shape), 1, Rel.EQ, "ipn_dim", cls_name) - validator.check_int(len(pk_shape), 1, Rel.EQ, "pk_dim", cls_name) - validator.check_int(len(gamc_shape), 1, Rel.EQ, "gamc_dim", cls_name) - validator.check_int(len(gams_shape), 1, Rel.EQ, "gams_dim", cls_name) - validator.check_int(len(pn_shape), 1, Rel.EQ, "pn_dim", cls_name) - - validator.check_int(uint_crd_f_shape[1], 3, Rel.EQ, "uint_crd_f_shape[1]", cls_name) - validator.check_int(scaler_f_shape[0], 3, Rel.EQ, "scaler_f_shape", cls_name) - validator.check_int(atom_a_shape[0], m, Rel.EQ, "atom_a_shape", cls_name) - validator.check_int(atom_b_shape[0], m, Rel.EQ, "atom_b_shape", cls_name) - validator.check_int(atom_c_shape[0], m, Rel.EQ, "atom_c_shape", cls_name) - validator.check_int(atom_d_shape[0], m, Rel.EQ, "atom_d_shape", cls_name) - validator.check_int(ipn_shape[0], m, Rel.EQ, "ipn_shape", cls_name) - validator.check_int(pk_shape[0], m, Rel.EQ, "pk_shape", cls_name) - validator.check_int(gamc_shape[0], m, Rel.EQ, "gamc_shape", cls_name) - validator.check_int(gams_shape[0], m, Rel.EQ, "gams_shape", cls_name) - validator.check_int(pn_shape[0], m, Rel.EQ, "pn_shape", cls_name) - return [m,] - - def infer_dtype(self, uint_crd_f_dtype, scaler_f_type, atom_a_type, atom_b_type, atom_c_type, atom_d_type, - ipn_type, pk_type, gamc_type, gams_type, pn_type): - validator.check_tensor_dtype_valid('uint_crd_f', uint_crd_f_dtype, [mstype.uint32], self.name) - validator.check_tensor_dtype_valid('scaler_f', scaler_f_type, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('atom_a', atom_a_type, [mstype.int32], self.name) - validator.check_tensor_dtype_valid('atom_b', atom_b_type, [mstype.int32], self.name) - validator.check_tensor_dtype_valid('atom_c', atom_c_type, [mstype.int32], self.name) - validator.check_tensor_dtype_valid('atom_d', atom_d_type, [mstype.int32], self.name) - validator.check_tensor_dtype_valid('ipn', ipn_type, [mstype.int32], self.name) - validator.check_tensor_dtype_valid('pk', pk_type, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('gamc', gamc_type, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('gams', gams_type, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('pn', pn_type, [mstype.float32], self.name) - - return pn_type - - -class DihedralAtomEnergy(PrimitiveWithInfer): - """ - Add the potential energy caused by dihedral terms to the total potential - energy of each atom. - - Because there is a large amount of inputs and each of them are related, - there is no way to construct `Examples` using random methods. For details, refer the webpage `SPONGE in MindSpore - `_. - - The calculation formula is the same as operator DihedralEnergy(). - - Args: - dihedral_numbers(int32): the number of dihedral terms m. - - Inputs: - - **uint_crd_f** (Tensor) - The unsigned int coordinates - value of each atom. The data type is uint32 and the shape is :math:`(n, 3)`. - - **scaler_f** (Tensor) - The 3-D scale factor between - the real space float coordinates and the unsigned int coordinates. - The data type is float32 and the shape is :math:`(3,)`. - - **atom_a** (Tensor) - The 1st atom index of each dihedral. - The data type is int32 and the shape is :math:`(m,)`. - - **atom_b** (Tensor) - The 2nd atom index of each dihedral. - The data type is int32 and the shape is :math:`(m,)`. - - **atom_c** (Tensor) - The 3rd atom index of each dihedral. - The data type is int32 and the shape is :math:`(m,)`. - - **atom_d** (Tensor) - The 4th atom index of each dihedral. - 4 atoms are connected in the form a-b-c-d. The data type is int32 and the shape is :math:`(m,)`. - - **ipn** (Tensor) - The period of dihedral angle of each dihedral. - The data type is int32 and the shape is :math:`(m,)`. - - **pk** (Tensor) - The force constant of each dihedral. - The data type is float32 and the shape is :math:`(m,)`. - - **gamc** (Tensor) - k*cos(phi_0) of each dihedral. - The data type is float32 and the shape is :math:`(m,)`. - - **gams** (Tensor) - k*sin(phi_0) of each dihedral. - The data type is float32 and the shape is :math:`(m,)`. - - **pn** (Tensor) - The floating point form of ipn. - The data type is float32 and the shape is :math:`(m,)`. - - Outputs: - - **ene** (Tensor) - The accumulated potential - energy for each atom. The data type is float32 and the shape is :math:`(n,)`. - - Supported Platforms: - ``GPU`` - """ - - @prim_attr_register - def __init__(self, dihedral_numbers): - """Initialize DihedralAtomEnergy.""" - validator.check_value_type('dihedral_numbers', dihedral_numbers, int, self.name) - self.dihedral_numbers = dihedral_numbers - self.init_prim_io_names(inputs=['uint_crd_f', 'scaler_f', 'atom_a', 'atom_b', 'atom_c', 'atom_d', 'ipn', 'pk', - 'gamc', 'gams', 'pn'], - outputs=['ene']) - self.add_prim_attr('dihedral_numbers', self.dihedral_numbers) - - def infer_shape(self, uint_crd_f_shape, scaler_f_shape, atom_a_shape, atom_b_shape, atom_c_shape, atom_d_shape, - ipn_shape, pk_shape, gamc_shape, gams_shape, pn_shape): - cls_name = self.name - n = uint_crd_f_shape[0] - m = self.dihedral_numbers - validator.check_int(len(uint_crd_f_shape), 2, Rel.EQ, "uint_crd_f_dim", cls_name) - validator.check_int(len(scaler_f_shape), 1, Rel.EQ, "scaler_f_dim", cls_name) - validator.check_int(len(atom_a_shape), 1, Rel.EQ, "atom_a_dim", cls_name) - validator.check_int(len(atom_b_shape), 1, Rel.EQ, "atom_b_dim", cls_name) - validator.check_int(len(atom_c_shape), 1, Rel.EQ, "atom_c_dim", cls_name) - validator.check_int(len(atom_d_shape), 1, Rel.EQ, "atom_d_dim", cls_name) - validator.check_int(len(ipn_shape), 1, Rel.EQ, "ipn_dim", cls_name) - validator.check_int(len(pk_shape), 1, Rel.EQ, "pk_dim", cls_name) - validator.check_int(len(gamc_shape), 1, Rel.EQ, "gamc_dim", cls_name) - validator.check_int(len(gams_shape), 1, Rel.EQ, "gams_dim", cls_name) - validator.check_int(len(pn_shape), 1, Rel.EQ, "pn_dim", cls_name) - - validator.check_int(uint_crd_f_shape[1], 3, Rel.EQ, "uint_crd_f_shape[1]", cls_name) - validator.check_int(scaler_f_shape[0], 3, Rel.EQ, "scaler_f_shape", cls_name) - validator.check_int(atom_a_shape[0], m, Rel.EQ, "atom_a_shape", cls_name) - validator.check_int(atom_b_shape[0], m, Rel.EQ, "atom_b_shape", cls_name) - validator.check_int(atom_c_shape[0], m, Rel.EQ, "atom_c_shape", cls_name) - validator.check_int(atom_d_shape[0], m, Rel.EQ, "atom_d_shape", cls_name) - validator.check_int(ipn_shape[0], m, Rel.EQ, "ipn_shape", cls_name) - validator.check_int(pk_shape[0], m, Rel.EQ, "pk_shape", cls_name) - validator.check_int(gamc_shape[0], m, Rel.EQ, "gamc_shape", cls_name) - validator.check_int(gams_shape[0], m, Rel.EQ, "gams_shape", cls_name) - validator.check_int(pn_shape[0], m, Rel.EQ, "pn_shape", cls_name) - return [n,] - - def infer_dtype(self, uint_crd_f_dtype, scaler_f_type, atom_a_type, atom_b_type, atom_c_type, atom_d_type, - ipn_type, pk_type, gamc_type, gams_type, pn_type): - validator.check_tensor_dtype_valid('uint_crd_f', uint_crd_f_dtype, [mstype.uint32], self.name) - validator.check_tensor_dtype_valid('scaler_f', scaler_f_type, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('atom_a', atom_a_type, [mstype.int32], self.name) - validator.check_tensor_dtype_valid('atom_b', atom_b_type, [mstype.int32], self.name) - validator.check_tensor_dtype_valid('atom_c', atom_c_type, [mstype.int32], self.name) - validator.check_tensor_dtype_valid('atom_d', atom_d_type, [mstype.int32], self.name) - validator.check_tensor_dtype_valid('ipn', ipn_type, [mstype.int32], self.name) - validator.check_tensor_dtype_valid('pk', pk_type, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('gamc', gamc_type, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('gams', gams_type, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('pn', pn_type, [mstype.float32], self.name) - - return pn_type - - -class DihedralForceWithAtomEnergy(PrimitiveWithInfer): - """ - Calculate dihedral force and potential energy together. - - The calculation formula is the same as operator DihedralForce() and DihedralEnergy(). - - Because there is a large amount of inputs and each of them are related, - there is no way to construct `Examples` using random methods. For details, refer the webpage `SPONGE in MindSpore - `_. - - Args: - dihedral_numbers(int32): the number of dihedral terms m. - - Inputs: - - **uint_crd_f** (Tensor) - The unsigned int coordinates - value of each atom. The data type is uint32 and the shape is :math:`(n, 3)`. - - **scaler_f** (Tensor) - The 3-D scale factor between - the real space float coordinates and the unsigned int coordinates. - The data type is float32 and the shape is :math:`(3,)`. - - **atom_a** (Tensor) - The 1st atom index of each dihedral. - The data type is int32 and the shape is :math:`(m,)`. - - **atom_b** (Tensor) - The 2nd atom index of each dihedral. - The data type is int32 and the shape is :math:`(m,)`. - - **atom_c** (Tensor) - The 3rd atom index of each dihedral. - The data type is int32 and the shape is :math:`(m,)`. - - **atom_d** (Tensor) - The 4th atom index of each dihedral. - 4 atoms are connected in the form a-b-c-d. The data type is int32 and the shape is :math:`(m,)`. - - **ipn** (Tensor) - The period of dihedral angle of each dihedral. - The data type is int32 and the shape is :math:`(m,)`. - - **pk** (Tensor) - The force constant of each dihedral. - The data type is float32 and the shape is :math:`(m,)`. - - **gamc** (Tensor) - k*cos(phi_0) of each dihedral. - The data type is float32 and the shape is :math:`(m,)`. - - **gams** (Tensor) - k*sin(phi_0) of each dihedral. - The data type is float32 and the shape is :math:`(m,)`. - - **pn** (Tensor) - The floating point form of ipn. - The data type is float32 and the shape is :math:`(m,)`. - - Outputs: - - **frc_f** (Tensor) - Same as operator DihedralForce(). - The data type is float32 and the shape is :math:`(n, 3)`. - - **ene** (Tensor) - Same as operator DihedralAtomEnergy(). - The data type is float32 and the shape is :math:`(n,)`. - - Supported Platforms: - ``GPU`` - """ - - @prim_attr_register - def __init__(self, dihedral_numbers): - """Initialize DihedralForceWithAtomEnergy.""" - validator.check_value_type('dihedral_numbers', dihedral_numbers, int, self.name) - self.dihedral_numbers = dihedral_numbers - self.init_prim_io_names(inputs=['uint_crd_f', 'scaler_f', 'atom_a', 'atom_b', 'atom_c', 'atom_d', 'ipn', 'pk', - 'gamc', 'gams', 'pn'], - outputs=['frc_f', 'ene']) - self.add_prim_attr('dihedral_numbers', self.dihedral_numbers) - - def infer_shape(self, uint_crd_f_shape, scaler_f_shape, atom_a_shape, atom_b_shape, atom_c_shape, atom_d_shape, - ipn_shape, pk_shape, gamc_shape, gams_shape, pn_shape): - cls_name = self.name - n = uint_crd_f_shape[0] - m = self.dihedral_numbers - validator.check_int(len(uint_crd_f_shape), 2, Rel.EQ, "uint_crd_f_dim", cls_name) - validator.check_int(len(scaler_f_shape), 1, Rel.EQ, "scaler_f_dim", cls_name) - validator.check_int(len(atom_a_shape), 1, Rel.EQ, "atom_a_dim", cls_name) - validator.check_int(len(atom_b_shape), 1, Rel.EQ, "atom_b_dim", cls_name) - validator.check_int(len(atom_c_shape), 1, Rel.EQ, "atom_c_dim", cls_name) - validator.check_int(len(atom_d_shape), 1, Rel.EQ, "atom_d_dim", cls_name) - validator.check_int(len(ipn_shape), 1, Rel.EQ, "ipn_dim", cls_name) - validator.check_int(len(pk_shape), 1, Rel.EQ, "pk_dim", cls_name) - validator.check_int(len(gamc_shape), 1, Rel.EQ, "gamc_dim", cls_name) - validator.check_int(len(gams_shape), 1, Rel.EQ, "gams_dim", cls_name) - validator.check_int(len(pn_shape), 1, Rel.EQ, "pn_dim", cls_name) - - validator.check_int(uint_crd_f_shape[1], 3, Rel.EQ, "uint_crd_f_shape[1]", cls_name) - validator.check_int(scaler_f_shape[0], 3, Rel.EQ, "scaler_f_shape", cls_name) - validator.check_int(atom_a_shape[0], m, Rel.EQ, "atom_a_shape", cls_name) - validator.check_int(atom_b_shape[0], m, Rel.EQ, "atom_b_shape", cls_name) - validator.check_int(atom_c_shape[0], m, Rel.EQ, "atom_c_shape", cls_name) - validator.check_int(atom_d_shape[0], m, Rel.EQ, "atom_d_shape", cls_name) - validator.check_int(ipn_shape[0], m, Rel.EQ, "ipn_shape", cls_name) - validator.check_int(pk_shape[0], m, Rel.EQ, "pk_shape", cls_name) - validator.check_int(gamc_shape[0], m, Rel.EQ, "gamc_shape", cls_name) - validator.check_int(gams_shape[0], m, Rel.EQ, "gams_shape", cls_name) - validator.check_int(pn_shape[0], m, Rel.EQ, "pn_shape", cls_name) - return uint_crd_f_shape, [n,] - - def infer_dtype(self, uint_crd_f_dtype, scaler_f_type, atom_a_type, atom_b_type, atom_c_type, atom_d_type, - ipn_type, pk_type, gamc_type, gams_type, pn_type): - validator.check_tensor_dtype_valid('uint_crd_f', uint_crd_f_dtype, [mstype.uint32], self.name) - validator.check_tensor_dtype_valid('scaler_f', scaler_f_type, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('atom_a', atom_a_type, [mstype.int32], self.name) - validator.check_tensor_dtype_valid('atom_b', atom_b_type, [mstype.int32], self.name) - validator.check_tensor_dtype_valid('atom_c', atom_c_type, [mstype.int32], self.name) - validator.check_tensor_dtype_valid('atom_d', atom_d_type, [mstype.int32], self.name) - validator.check_tensor_dtype_valid('ipn', ipn_type, [mstype.int32], self.name) - validator.check_tensor_dtype_valid('pk', pk_type, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('gamc', gamc_type, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('gams', gams_type, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('pn', pn_type, [mstype.float32], self.name) - - return pn_type, pn_type - - -class AngleForce(PrimitiveWithInfer): - """ - Calculate the force exerted by angles made of 3 atoms on the - corresponding atoms. Assume the number of angles is m and the - number of atoms is n. - - Because there is a large amount of inputs and each of them are related, - there is no way to construct `Examples` using random methods. For details, refer the webpage `SPONGE in MindSpore - `_. - - .. math:: - dr_{ab} = (x_b-x_a, y_b-y_a, z_b-z_a) - .. math:: - dr_{cb} = (x_b-x_c, y_b-y_c, z_b-z_c) - .. math:: - theta = arccos(inner_product(dr_{ab}, dr_{cb})/|dr_{ab}|/|dr_{cb}|) - .. math:: - F_a = -2*k*(theta-theta_0)/sin(theta)*[cos(theta)/|dr_{ab}|^2*dr_{ab} - - 1/|dr_{ab}|/|dr_{cb}|*dr_{cb}] - .. math:: - F_c = -2*k*(theta-theta_0)/sin(theta)*[cos(theta)/|dr_{cb}|^2*dr_{cb} - - 1/|dr_{cb}|/|dr_{ab}|*dr_{ab}] - .. math:: - F_b = -F_a - F_c - - Args: - angle_numbers(int32): the number of angles m. - - Inputs: - - **uint_crd_f** (Tensor) - The unsigned int coordinate value of each atom. - The data type is uint32 and the shape is :math:`(n, 3)`. - - **scaler_f** (Tensor) - The 3-D scale factor between - the real space float coordinates and the unsigned int coordinates. - The data type is float32 and the shape is :math:`(3,)`. - - **atom_a** (Tensor) - The 1st atom index of each angle. - The data type is int32 and the shape is :math:`(m,)`. - - **atom_b** (Tensor) - The 2nd and the central atom index of each angle. - The data type is int32 and the shape is :math:`(m,)`. - - **atom_c** (Tensor) - The 3rd atom index of each angle. - The data type is int32 and the shape is :math:`(m,)`. - - **angle_k** (Tensor) - The force constant for each angle. - The data type is float32 and the shape is :math:`(m,)`. - - **angle_theta0** (Tensor) - The equilibrium position value for each angle. - The data type is float32 and the shape is :math:`(m,)`. - - Outputs: - - **frc_f** (Tensor) - The force felt by each atom. - The data type is float32 and the shape is :math:`(n, 3)`. - - Supported Platforms: - ``GPU`` - """ - - @prim_attr_register - def __init__(self, angle_numbers): - """Initialize AngleForce.""" - validator.check_value_type('angle_numbers', angle_numbers, int, self.name) - self.angle_numbers = angle_numbers - self.init_prim_io_names(inputs=['uint_crd_f', 'scaler_f', 'atom_a', 'atom_b', 'atom_c', 'angle_k', - 'angle_theta0'], - outputs=['frc_f']) - self.add_prim_attr('angle_numbers', self.angle_numbers) - - def infer_shape(self, uint_crd_f_shape, scaler_f_shape, atom_a_shape, atom_b_shape, atom_c_shape, angle_k_shape, - angle_theta0_shape): - cls_name = self.name - m = self.angle_numbers - validator.check_int(len(uint_crd_f_shape), 2, Rel.EQ, "uint_crd_f_dim", cls_name) - validator.check_int(len(scaler_f_shape), 1, Rel.EQ, "scaler_f_dim", cls_name) - validator.check_int(len(atom_a_shape), 1, Rel.EQ, "atom_a_dim", cls_name) - validator.check_int(len(atom_b_shape), 1, Rel.EQ, "atom_b_dim", cls_name) - validator.check_int(len(atom_c_shape), 1, Rel.EQ, "atom_c_dim", cls_name) - validator.check_int(len(angle_k_shape), 1, Rel.EQ, "angle_k_dim", cls_name) - validator.check_int(len(angle_theta0_shape), 1, Rel.EQ, "angle_theta0_dim", cls_name) - - validator.check_int(uint_crd_f_shape[1], 3, Rel.EQ, "uint_crd_f_shape[1]", cls_name) - validator.check_int(scaler_f_shape[0], 3, Rel.EQ, "scaler_f_shape", cls_name) - validator.check_int(atom_a_shape[0], m, Rel.EQ, "atom_a_shape", cls_name) - validator.check_int(atom_b_shape[0], m, Rel.EQ, "atom_b_shape", cls_name) - validator.check_int(atom_c_shape[0], m, Rel.EQ, "atom_c_shape", cls_name) - validator.check_int(angle_k_shape[0], m, Rel.EQ, "angle_k_shape", cls_name) - validator.check_int(angle_theta0_shape[0], m, Rel.EQ, "angle_theta0_shape", cls_name) - return uint_crd_f_shape - - def infer_dtype(self, uint_crd_f_dtype, scaler_f_type, atom_a_type, atom_b_type, atom_c_type, angle_k_type, - angle_theta0_type): - validator.check_tensor_dtype_valid('uint_crd_f', uint_crd_f_dtype, [mstype.uint32], self.name) - validator.check_tensor_dtype_valid('scaler_f', scaler_f_type, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('atom_a', atom_a_type, [mstype.int32], self.name) - validator.check_tensor_dtype_valid('atom_b', atom_b_type, [mstype.int32], self.name) - validator.check_tensor_dtype_valid('atom_c', atom_c_type, [mstype.int32], self.name) - validator.check_tensor_dtype_valid('angle_k', angle_k_type, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('angle_theta0', angle_theta0_type, [mstype.float32], self.name) - return angle_k_type - - -class AngleEnergy(PrimitiveWithInfer): - """ - Calculate the energy caused by 3-atoms angle term. Assume the number of angles is m and the - number of atoms is n. - - Because there is a large amount of inputs and each of them are related, - there is no way to construct `Examples` using random methods. For details, refer the webpage `SPONGE in MindSpore - `_. - - .. math:: - dr_{ab} = (x_b-x_a, y_b-y_a, z_b-z_a) - .. math:: - dr_{cb} = (x_b-x_c, y_b-y_c, z_b-z_c) - .. math:: - theta = arccos(inner_product(dr_{ab}, dr_{cb})/|dr_{ab}|/|dr_{cb}|) - .. math:: - E = k*(theta - theta_0)^2 - - Args: - angle_numbers(int32): the number of angles m. - - Inputs: - - **uint_crd_f** (Tensor) - The unsigned int coordinate value of each atom. - The data type is uint32 and the shape is :math:`(n, 3)`. - - **scaler_f** (Tensor) - The 3-D scale factor between - the real space float coordinates and the unsigned int coordinates. - The data type is float32 and the shape is :math:`(3,)`. - - **atom_a** (Tensor) - The 1st atom index of each angle. - The data type is int32 and the shape is :math:`(m,)`. - - **atom_b** (Tensor) - The 2nd and the central atom index of each angle. - The data type is int32 and the shape is :math:`(m,)`. - - **atom_c** (Tensor) - The 3rd atom index of each angle. - The data type is int32 and the shape is :math:`(m,)`. - - **angle_k** (Tensor) - The force constant for each angle. - The data type is float32 and the shape is :math:`(m,)`. - - **angle_theta0** (Tensor) - The equilibrium position value for each angle. - The data type is float32 and the shape is :math:`(m,)`. - - Outputs: - - **ene** (Tensor) - The potential energy for each angle term. - The data type is float32 and the shape is :math:`(m,)`. - - Supported Platforms: - ``GPU`` - """ - - @prim_attr_register - def __init__(self, angle_numbers): - """Initialize AngleEnergy.""" - validator.check_value_type('angle_numbers', angle_numbers, int, self.name) - self.angle_numbers = angle_numbers - self.init_prim_io_names(inputs=['uint_crd_f', 'scaler_f', 'atom_a', 'atom_b', 'atom_c', 'angle_k', - 'angle_theta0'], - outputs=['ene']) - self.add_prim_attr('angle_numbers', self.angle_numbers) - - def infer_shape(self, uint_crd_f_shape, scaler_f_shape, atom_a_shape, atom_b_shape, atom_c_shape, angle_k_shape, - angle_theta0_shape): - cls_name = self.name - m = self.angle_numbers - validator.check_int(len(uint_crd_f_shape), 2, Rel.EQ, "uint_crd_f_dim", cls_name) - validator.check_int(len(scaler_f_shape), 1, Rel.EQ, "scaler_f_dim", cls_name) - validator.check_int(len(atom_a_shape), 1, Rel.EQ, "atom_a_dim", cls_name) - validator.check_int(len(atom_b_shape), 1, Rel.EQ, "atom_b_dim", cls_name) - validator.check_int(len(atom_c_shape), 1, Rel.EQ, "atom_c_dim", cls_name) - validator.check_int(len(angle_k_shape), 1, Rel.EQ, "angle_k_dim", cls_name) - validator.check_int(len(angle_theta0_shape), 1, Rel.EQ, "angle_theta0_dim", cls_name) - - validator.check_int(uint_crd_f_shape[1], 3, Rel.EQ, "uint_crd_f_shape[1]", cls_name) - validator.check_int(scaler_f_shape[0], 3, Rel.EQ, "scaler_f_shape", cls_name) - validator.check_int(atom_a_shape[0], m, Rel.EQ, "atom_a_shape", cls_name) - validator.check_int(atom_b_shape[0], m, Rel.EQ, "atom_b_shape", cls_name) - validator.check_int(atom_c_shape[0], m, Rel.EQ, "atom_c_shape", cls_name) - validator.check_int(angle_k_shape[0], m, Rel.EQ, "angle_k_shape", cls_name) - validator.check_int(angle_theta0_shape[0], m, Rel.EQ, "angle_theta0_shape", cls_name) - return [m,] - - def infer_dtype(self, uint_crd_f_dtype, scaler_f_type, atom_a_type, atom_b_type, atom_c_type, angle_k_type, - angle_theta0_type): - validator.check_tensor_dtype_valid('uint_crd_f', uint_crd_f_dtype, [mstype.uint32], self.name) - validator.check_tensor_dtype_valid('scaler_f', scaler_f_type, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('atom_a', atom_a_type, [mstype.int32], self.name) - validator.check_tensor_dtype_valid('atom_b', atom_b_type, [mstype.int32], self.name) - validator.check_tensor_dtype_valid('atom_c', atom_c_type, [mstype.int32], self.name) - validator.check_tensor_dtype_valid('angle_k', angle_k_type, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('angle_theta0', angle_theta0_type, [mstype.float32], self.name) - return angle_k_type - - -class AngleAtomEnergy(Primitive): - """ - Add the potential energy caused by angle terms to the total potential - energy of each atom. Assume the number of angles is m and the - number of atoms is n. - - The calculation formula is the same as operator AngleEnergy(). - - Because there is a large amount of inputs and each of them are related, - there is no way to construct `Examples` using random methods. For details, refer the webpage `SPONGE in MindSpore - `_. - - Args: - angle_numbers(int32): the number of angles m. - - Inputs: - - **uint_crd_f** (Tensor) - The unsigned int coordinate value of each atom. - The data type is uint32 and the shape is :math:`(n, 3)`. - - **scaler_f** (Tensor) - The 3-D scale factor between - the real space float coordinates and the unsigned int coordinates. - The data type is float32 and the shape is :math:`(3,)`. - - **atom_a** (Tensor) - The 1st atom index of each angle. - The data type is int32 and the shape is :math:`(m,)`. - - **atom_b** (Tensor) - The 2nd and the central atom index of each angle. - The data type is int32 and the shape is :math:`(m,)`. - - **atom_c** (Tensor) - The 3rd atom index of each angle. - The data type is int32 and the shape is :math:`(m,)`. - - **angle_k** (Tensor) - The force constant for each angle. - The data type is float32 and the shape is :math:`(m,)`. - - **angle_theta0** (Tensor) - The equilibrium position value for each angle. - The data type is float32 and the shape is :math:`(m,)`. - - Outputs: - - **ene** (Tensor) - The accumulated potential energy for each atom. - The data type is float32 and the shape is :math:`(n,)`. - - Supported Platforms: - ``GPU`` - """ - - @prim_attr_register - def __init__(self, angle_numbers): - """Initialize AngleAtomEnergy.""" - validator.check_value_type('angle_numbers', angle_numbers, int, self.name) - self.angle_numbers = angle_numbers - self.init_prim_io_names(inputs=['uint_crd_f', 'scaler_f', 'atom_a', 'atom_b', 'atom_c', 'angle_k', - 'angle_theta0'], - outputs=['ene']) - self.add_prim_attr('angle_numbers', self.angle_numbers) - - -class AngleForceWithAtomEnergy(PrimitiveWithInfer): - """ - Calculate angle force and potential energy together. Assume the number of angles is m and the - number of atoms is n. - - The calculation formula is the same as operator AngleForce() and AngleEnergy(). - - Because there is a large amount of inputs and each of them are related, - there is no way to construct `Examples` using random methods. For details, refer the webpage `SPONGE in MindSpore - `_. - - Args: - angle_numbers(int32): the number of angles m. - - Inputs: - - **uint_crd_f** (Tensor) - The unsigned int coordinate value of each atom. - The data type is uint32 and the shape is :math:`(n, 3)`. - - **scaler_f** (Tensor) - The 3-D scale factor between - the real space float coordinates and the unsigned int coordinates. - The data type is float and the shape is :math:`(3,)`. - - **atom_a** (Tensor) - The 1st atom index of each angle. - The data type is int32 and the shape is :math:`(m,)`. - - **atom_b** (Tensor) - The 2nd and the central atom index of each angle. - The data type is int32 and the shape is :math:`(m,)`. - - **atom_c** (Tensor) - The 3rd atom index of each angle. - The data type is int32 and the shape is :math:`(m,)`. - - **angle_k** (Tensor) - The force constant for each angle. - The data type is float32 and the shape is :math:`(m,)`. - - **angle_theta0** (Tensor) - The equilibrium position value for each angle. - The data type is float32 and the shape is :math:`(m,)`. - - Outputs: - - **frc_f** (Tensor) - same as operator AngleForce(). - The data type is float32 and the shape is :math:`(n, 3)`. - - **ene** (Tensor) - same as operator AngleAtomEnergy(). - The data type is float and the shape is :math:`(n,)`. - - Supported Platforms: - ``GPU`` - """ - - @prim_attr_register - def __init__(self, angle_numbers): - """Initialize AngleForceWithAtomEnergy.""" - validator.check_value_type('angle_numbers', angle_numbers, int, self.name) - self.angle_numbers = angle_numbers - self.init_prim_io_names(inputs=['uint_crd_f', 'scaler_f', 'atom_a', 'atom_b', 'atom_c', 'angle_k', - 'angle_theta0'], - outputs=['frc_f', 'ene']) - self.add_prim_attr('angle_numbers', self.angle_numbers) - - def infer_shape(self, uint_crd_f_shape, scaler_f_shape, atom_a_shape, atom_b_shape, atom_c_shape, angle_k_shape, - angle_theta0_shape): - cls_name = self.name - n = uint_crd_f_shape[0] - m = self.angle_numbers - validator.check_int(len(uint_crd_f_shape), 2, Rel.EQ, "uint_crd_f_dim", cls_name) - validator.check_int(len(scaler_f_shape), 1, Rel.EQ, "scaler_f_dim", cls_name) - validator.check_int(len(atom_a_shape), 1, Rel.EQ, "atom_a_dim", cls_name) - validator.check_int(len(atom_b_shape), 1, Rel.EQ, "atom_b_dim", cls_name) - validator.check_int(len(atom_c_shape), 1, Rel.EQ, "atom_c_dim", cls_name) - validator.check_int(len(angle_k_shape), 1, Rel.EQ, "angle_k_dim", cls_name) - validator.check_int(len(angle_theta0_shape), 1, Rel.EQ, "angle_theta0_dim", cls_name) - - validator.check_int(uint_crd_f_shape[1], 3, Rel.EQ, "uint_crd_f_shape[1]", cls_name) - validator.check_int(scaler_f_shape[0], 3, Rel.EQ, "scaler_f_shape", cls_name) - validator.check_int(atom_a_shape[0], m, Rel.EQ, "atom_a_shape", cls_name) - validator.check_int(atom_b_shape[0], m, Rel.EQ, "atom_b_shape", cls_name) - validator.check_int(atom_c_shape[0], m, Rel.EQ, "atom_c_shape", cls_name) - validator.check_int(angle_k_shape[0], m, Rel.EQ, "angle_k_shape", cls_name) - validator.check_int(angle_theta0_shape[0], m, Rel.EQ, "angle_theta0_shape", cls_name) - return uint_crd_f_shape, [n,] - - def infer_dtype(self, uint_crd_f_dtype, scaler_f_type, atom_a_type, atom_b_type, atom_c_type, angle_k_type, - angle_theta0_type): - validator.check_tensor_dtype_valid('uint_crd_f', uint_crd_f_dtype, [mstype.uint32], self.name) - validator.check_tensor_dtype_valid('scaler_f', scaler_f_type, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('atom_a', atom_a_type, [mstype.int32], self.name) - validator.check_tensor_dtype_valid('atom_b', atom_b_type, [mstype.int32], self.name) - validator.check_tensor_dtype_valid('atom_c', atom_c_type, [mstype.int32], self.name) - validator.check_tensor_dtype_valid('angle_k', angle_k_type, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('angle_theta0', angle_theta0_type, [mstype.float32], self.name) - return angle_k_type, angle_k_type - - -class Dihedral14LJForce(PrimitiveWithInfer): - """ - Calculate the Lennard-Jones part of 1,4 dihedral force correction - for each necessary dihedral terms on the corresponding atoms. - - Assume the number of necessary dihedral 1,4 terms is m, the number of atoms is n, - and the number of Lennard-Jones types for all atoms is P, which means - there will be q = P*(P+1)/2 types of possible Lennard-Jones interactions - for all kinds of atom pairs. - - Because there is a large amount of inputs and each of them are related, - there is no way to construct `Examples` using random methods. For details, refer the webpage `SPONGE in MindSpore - `_. - - .. math:: - dr = (x_a-x_b, y_a-y_b, z_a-z_b) - .. math:: - F = k*(-12*A/|dr|^{14} + 6*B/|dr|^{8})*dr - - Args: - nb14_numbers (int32): the number of necessary dihedral 1,4 terms m. - atom_numbers (int32): the number of atoms n. - - Inputs: - - **uint_crd_f** (Tensor) - The unsigned int coordinate value of each atom. - The data type is uint32 and the shape is :math:`(n, 3)`. - - **LJ_type** (Tensor) - The Lennard-Jones type of each atom. - The data type is int32 and the shape is :math:`(n,)`. - - **charge** (Tensor) - The charge of each atom. - The data type is float32 and the shape is :math:`(n,)`. - - **boxlength_f** (Tensor) - The length of molecular simulation box in 3 dimensions. - The data type is float32 and the shape is :math:`(3,)`. - - **a_14** (Tensor) - The first atom index of each dihedral 1,4 term. - The data type is int32 and the shape is :math:`(m,)`. - - **b_14** (Tensor) - The second atom index of each dihedral 1,4 term. - The data type is int32 and the shape is :math:`(m,)`. - - **lj_scale_factor** (Tensor) - The scale factor for the - Lennard-Jones part of force correction of each dihedral 1,4 term. - The data type is float32 and the shape is :math:`(m,)`. - - **LJ_type_A** (Tensor) - The A parameter in Lennard-Jones scheme of each atom pair type. - q is the number of atom pair. The data type is float32 and the shape is :math:`(q,)`. - - **LJ_type_B** (Tensor) - The B parameter in Lennard-Jones scheme of each atom pair type. - q is the number of atom pair. The data type is float32 and the shape is :math:`(q,)`. - - Outputs: - - **frc_f** (Tensor) - The force felt by each atom. - The data type is float32 and the shape is :math:`(n, 3)`. - - Supported Platforms: - ``GPU`` - """ - - @prim_attr_register - def __init__(self, nb14_numbers, atom_numbers): - """Initialize Dihedral14LJForce.""" - validator.check_value_type('nb14_numbers', nb14_numbers, int, self.name) - validator.check_value_type('atom_numbers', atom_numbers, int, self.name) - self.dihedral_14_numbers = nb14_numbers - self.atom_numbers = atom_numbers - self.init_prim_io_names( - inputs=['uint_crd_f', 'LJtype', 'charge', 'boxlength_f', 'a_14', 'b_14', 'lj_scale_factor', - 'LJ_type_A', 'LJ_type_B'], - outputs=['frc_f']) - self.add_prim_attr('dihedral_14_numbers', self.dihedral_14_numbers) - self.add_prim_attr('atom_numbers', self.atom_numbers) - - def infer_shape(self, uint_crd_f_shape, ljtype_shape, charge_shape, boxlength_f_shape, a_14_shape, b_14_shape, - lj_scale_factor_shape, lj_type_a_shape, lj_type_b_shape): - cls_name = self.name - n = self.atom_numbers - m = self.dihedral_14_numbers - q = lj_type_a_shape[0] - validator.check_int(len(uint_crd_f_shape), 2, Rel.EQ, "uint_crd_f_dim", cls_name) - validator.check_int(len(ljtype_shape), 1, Rel.EQ, "LJtype_dim", cls_name) - validator.check_int(len(charge_shape), 1, Rel.EQ, "charge_dim", cls_name) - validator.check_int(len(boxlength_f_shape), 1, Rel.EQ, "boxlength_f_dim", cls_name) - validator.check_int(len(a_14_shape), 1, Rel.EQ, "a_14_dim", cls_name) - validator.check_int(len(b_14_shape), 1, Rel.EQ, "b_14_dim", cls_name) - validator.check_int(len(lj_scale_factor_shape), 1, Rel.EQ, "lj_scale_factor_dim", cls_name) - validator.check_int(len(lj_type_b_shape), 1, Rel.EQ, "LJ_type_B_dim", cls_name) - - validator.check_int(uint_crd_f_shape[0], n, Rel.EQ, "uint_crd_f[0]", cls_name) - validator.check_int(uint_crd_f_shape[1], 3, Rel.EQ, "uint_crd_f[1]", cls_name) - validator.check_int(ljtype_shape[0], n, Rel.EQ, "LJtype", cls_name) - validator.check_int(charge_shape[0], n, Rel.EQ, "charge", cls_name) - validator.check_int(boxlength_f_shape[0], 3, Rel.EQ, "boxlength_f", cls_name) - validator.check_int(lj_type_b_shape[0], q, Rel.EQ, "LJ_type_B", cls_name) - validator.check_int(a_14_shape[0], m, Rel.EQ, "a_14_shape", cls_name) - validator.check_int(b_14_shape[0], m, Rel.EQ, "b_14_shape", cls_name) - validator.check_int(lj_scale_factor_shape[0], m, Rel.EQ, "lj_scale_factor_shape", cls_name) - return uint_crd_f_shape - - def infer_dtype(self, uint_crd_f_dtype, ljtype_dtype, charge_dtype, boxlength_f_type, a_14_type, b_14_type, - lj_scale_factor_type, lj_type_a_type, lj_type_b_type): - validator.check_tensor_dtype_valid('uint_crd_f', uint_crd_f_dtype, [mstype.uint32], self.name) - validator.check_tensor_dtype_valid('LJtype', ljtype_dtype, [mstype.int32], self.name) - validator.check_tensor_dtype_valid('charge', charge_dtype, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('boxlength_f', boxlength_f_type, [mstype.float32], self.name) - - validator.check_tensor_dtype_valid('a_14', a_14_type, [mstype.int32], self.name) - validator.check_tensor_dtype_valid('b_14', b_14_type, [mstype.int32], self.name) - - validator.check_tensor_dtype_valid('lj_scale_factor', lj_scale_factor_type, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('LJ_type_A', lj_type_a_type, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('LJ_type_B', lj_type_b_type, [mstype.float32], self.name) - return lj_type_b_type - - -class Dihedral14LJEnergy(PrimitiveWithInfer): - """ - Calculate the Lennard-Jones part of 1,4 dihedral energy correction for - each necessary dihedral terms on the corresponding atoms. - - Because there is a large amount of inputs and each of them are related, - there is no way to construct `Examples` using random methods. For details, refer the webpage `SPONGE in MindSpore - `_. - - .. math:: - dr = (x_a-x_b, y_a-y_b, z_a-z-b) - .. math:: - E = k*(A/|dr|^{12} - B/|dr|^{6}) - - Args: - nb14_numbers (int32): the number of necessary dihedral 1,4 terms m. - atom_numbers (int32): the number of atoms n. - - Inputs: - - **uint_crd_f** (Tensor) - The unsigned int coordinate value of each atom. - The data type is uint32 and the shape is :math:`(n, 3)`. - - **LJ_type** (Tensor) - The Lennard-Jones type of each atom. - The data type is int32 and the shape is :math:`(n,)`. - - **charge** (Tensor) - The charge of each atom. - The data type is float32 and the shape is :math:`(n,)`. - - **boxlength_f** (Tensor) - The length of molecular simulation box in 3 dimensions. - The data type is float32 and the shape is :math:`(3,)`. - - **a_14** (Tensor) - The first atom index of each dihedral 1,4 term. - The data type is int32 and the shape is :math:`(m,)`. - - **b_14** (Tensor) - The second atom index of each dihedral 1,4 term. - The data type is int32 and the shape is :math:`(m,)`. - - **lj_scale_factor** (Tensor) - The scale factor for the - Lennard-Jones part of force correction of each dihedral 1,4 term. - The data type is float32 and the shape is :math:`(m,)`. - - **LJ_type_A** (Tensor) - The A parameter in Lennard-Jones scheme of each atom pair type. - q is the number of atom pair. The data type is float32 and the shape is :math:`(q,)`. - - **LJ_type_B** (Tensor) - The B parameter in Lennard-Jones scheme of each atom pair type. - q is the number of atom pair. The data type is float32 and the shape is :math:`(q,)`. - - Outputs: - - **ene** (Tensor) - The Lennard-Jones potential energy correction. - The data type is float32 and the shape is :math:`(m,)`. - - Supported Platforms: - ``GPU`` - """ - - @prim_attr_register - def __init__(self, nb14_numbers, atom_numbers): - """Initialize Dihedral14LJEnergy""" - validator.check_value_type('nb14_numbers', nb14_numbers, int, self.name) - validator.check_value_type('atom_numbers', atom_numbers, int, self.name) - self.dihedral_14_numbers = nb14_numbers - self.atom_numbers = atom_numbers - - self.init_prim_io_names( - inputs=['uint_crd_f', 'LJtype', 'charge', 'boxlength_f', 'a_14', 'b_14', 'lj_scale_factor', - 'LJ_type_A', 'LJ_type_B'], - outputs=['ene']) - self.add_prim_attr('dihedral_14_numbers', self.dihedral_14_numbers) - self.add_prim_attr('atom_numbers', self.atom_numbers) - - def infer_shape(self, uint_crd_f_shape, ljtype_shape, charge_shape, boxlength_f_shape, a_14_shape, b_14_shape, - lj_scale_factor_shape, lj_type_a_shape, lj_type_b_shape): - cls_name = self.name - n = self.atom_numbers - m = self.dihedral_14_numbers - q = lj_type_a_shape[0] - validator.check_int(len(uint_crd_f_shape), 2, Rel.EQ, "uint_crd_f_dim", cls_name) - validator.check_int(len(ljtype_shape), 1, Rel.EQ, "LJtype_dim", cls_name) - validator.check_int(len(charge_shape), 1, Rel.EQ, "charge_dim", cls_name) - validator.check_int(len(boxlength_f_shape), 1, Rel.EQ, "boxlength_f_dim", cls_name) - validator.check_int(len(a_14_shape), 1, Rel.EQ, "a_14_dim", cls_name) - validator.check_int(len(b_14_shape), 1, Rel.EQ, "b_14_dim", cls_name) - validator.check_int(len(lj_scale_factor_shape), 1, Rel.EQ, "lj_scale_factor_dim", cls_name) - validator.check_int(len(lj_type_b_shape), 1, Rel.EQ, "LJ_type_B_dim", cls_name) - - validator.check_int(uint_crd_f_shape[0], n, Rel.EQ, "uint_crd_f[0]", cls_name) - validator.check_int(uint_crd_f_shape[1], 3, Rel.EQ, "uint_crd_f[1]", cls_name) - validator.check_int(ljtype_shape[0], n, Rel.EQ, "LJtype", cls_name) - validator.check_int(charge_shape[0], n, Rel.EQ, "charge", cls_name) - validator.check_int(boxlength_f_shape[0], 3, Rel.EQ, "boxlength_f", cls_name) - validator.check_int(lj_type_b_shape[0], q, Rel.EQ, "LJ_type_B", cls_name) - validator.check_int(a_14_shape[0], m, Rel.EQ, "a_14_shape", cls_name) - validator.check_int(b_14_shape[0], m, Rel.EQ, "b_14_shape", cls_name) - validator.check_int(lj_scale_factor_shape[0], m, Rel.EQ, "lj_scale_factor_shape", cls_name) - return [self.dihedral_14_numbers,] - - def infer_dtype(self, uint_crd_f_dtype, ljtype_dtype, charge_dtype, boxlength_f_type, a_14_type, b_14_type, - lj_scale_factor_type, lj_type_a_type, lj_type_b_type): - validator.check_tensor_dtype_valid('uint_crd_f', uint_crd_f_dtype, [mstype.uint32], self.name) - validator.check_tensor_dtype_valid('LJtype', ljtype_dtype, [mstype.int32], self.name) - validator.check_tensor_dtype_valid('charge', charge_dtype, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('boxlength_f', boxlength_f_type, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('a_14', a_14_type, [mstype.int32], self.name) - validator.check_tensor_dtype_valid('b_14', b_14_type, [mstype.int32], self.name) - validator.check_tensor_dtype_valid('lj_scale_factor', lj_scale_factor_type, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('LJ_type_A', lj_type_a_type, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('LJ_type_B', lj_type_b_type, [mstype.float32], self.name) - - return lj_type_a_type - - -class Dihedral14LJForceWithDirectCF(PrimitiveWithInfer): - """ - Calculate the Lennard-Jones part and the Coulomb part of force correction - for each necessary dihedral 1,4 terms. - - Because there is a large amount of inputs and each of them are related, - there is no way to construct `Examples` using random methods. For details, refer the webpage `SPONGE in MindSpore - `_. - - The calculation formula of the Lennard-Jones part is the same as operator - Dihedral14LJForce(), and the Coulomb part is as follows: - - .. math:: - dr = (x_a-x_b, y_a-y_b, z_a-z_b) - .. math:: - F = -k*q_a*q_b/|r|^3*dr - - Args: - nb14_numbers (int32): the number of necessary dihedral 1,4 terms m. - atom_numbers (int32): the number of atoms n. - - Inputs: - - **uint_crd_f** (Tensor) - The unsigned int coordinate value of each atom. - The data type is uint32 and the shape is :math:`(n, 3)`. - - **LJ_type** (Tensor) - The Lennard-Jones type of each atom. - The data type is int32 and the shape is :math:`(n,)`. - - **charge** (Tensor) - The charge of each atom. - The data type is float32 and the shape is :math:`(n,)`. - - **boxlength_f** (Tensor) - The length of molecular simulation box in 3 dimensions. - The data type is float32 and the shape is :math:`(3,)`. - - **a_14** (Tensor) - The first atom index of each dihedral 1,4 term. - The data type is int32 and the shape is :math:`(m,)`. - - **b_14** (Tensor) - The second atom index of each dihedral 1,4 term. - The data type is int32 and the shape is :math:`(m,)`. - - **lj_scale_factor** (Tensor) - The scale factor for the - Lennard-Jones part of force correction of each dihedral 1,4 term. - The data type is float32 and the shape is :math:`(m,)`. - - **cf_scale_factor** (Tensor) - The scale factor for the - Coulomb part of force correction for each dihedral 1,4 terms. - The data type is float32 and the shape is :math:`(m,)`. - - **LJ_type_A** (Tensor) - The A parameter in Lennard-Jones scheme of each atom pair type. - q is the number of atom pair. The data type is float32 and the shape is :math:`(q,)`. - - **LJ_type_B** (Tensor) - The B parameter in Lennard-Jones shceme of each atom pair type. - q is the number of atom pair. The data type is float32 and the shape is :math:`(q,)`. - - Outputs: - - **frc_f** (Tensor) - The force felt by each atom. - The data type is float32 and the shape is :math:`(n, 3)` - - Supported Platforms: - ``GPU`` - """ - - @prim_attr_register - def __init__(self, nb14_numbers, atom_numbers): - """Initialize Dihedral14LJForceWithDirectCF.""" - validator.check_value_type('nb14_numbers', nb14_numbers, int, self.name) - validator.check_value_type('atom_numbers', atom_numbers, int, self.name) - self.dihedral_14_numbers = nb14_numbers - self.atom_numbers = atom_numbers - - self.init_prim_io_names( - inputs=['uint_crd_f', 'LJtype', 'charge', 'boxlength_f', 'a_14', 'b_14', 'lj_scale_factor', - 'cf_scale_factor', - 'LJ_type_A', 'LJ_type_B'], - outputs=['frc_f']) - self.add_prim_attr('dihedral_14_numbers', self.dihedral_14_numbers) - self.add_prim_attr('atom_numbers', self.atom_numbers) - - def infer_shape(self, uint_crd_f_shape, ljtype_shape, charge_shape, boxlength_f_shape, a_14_shape, b_14_shape, - lj_scale_factor_shape, cf_scale_factor_shape, lj_type_a_shape, lj_type_b_shape): - cls_name = self.name - n = self.atom_numbers - m = self.dihedral_14_numbers - q = lj_type_a_shape[0] - validator.check_int(len(uint_crd_f_shape), 2, Rel.EQ, "uint_crd_f_dim", cls_name) - validator.check_int(len(ljtype_shape), 1, Rel.EQ, "LJtype_dim", cls_name) - validator.check_int(len(charge_shape), 1, Rel.EQ, "charge_dim", cls_name) - validator.check_int(len(boxlength_f_shape), 1, Rel.EQ, "boxlength_f_dim", cls_name) - validator.check_int(len(a_14_shape), 1, Rel.EQ, "a_14_dim", cls_name) - validator.check_int(len(b_14_shape), 1, Rel.EQ, "b_14_dim", cls_name) - validator.check_int(len(lj_scale_factor_shape), 1, Rel.EQ, "lj_scale_factor_dim", cls_name) - validator.check_int(len(cf_scale_factor_shape), 1, Rel.EQ, "cf_scale_factor_dim", cls_name) - validator.check_int(len(lj_type_b_shape), 1, Rel.EQ, "LJ_type_B_dim", cls_name) - - validator.check_int(uint_crd_f_shape[0], n, Rel.EQ, "uint_crd_f_shape[0]", cls_name) - validator.check_int(uint_crd_f_shape[1], 3, Rel.EQ, "uint_crd_f_shape[1]", cls_name) - validator.check_int(ljtype_shape[0], n, Rel.EQ, "LJtype_shape", cls_name) - validator.check_int(charge_shape[0], n, Rel.EQ, "charge_shape", cls_name) - validator.check_int(boxlength_f_shape[0], 3, Rel.EQ, "boxlength_f_shape", cls_name) - validator.check_int(lj_type_b_shape[0], q, Rel.EQ, "LJ_type_B_shape", cls_name) - validator.check_int(a_14_shape[0], m, Rel.EQ, "a_14_shape", cls_name) - validator.check_int(b_14_shape[0], m, Rel.EQ, "b_14_shape", cls_name) - validator.check_int(lj_scale_factor_shape[0], m, Rel.EQ, "lj_scale_factor_shape", cls_name) - validator.check_int(cf_scale_factor_shape[0], m, Rel.EQ, "cf_scale_factor_shape", cls_name) - return [self.atom_numbers, 3] - - def infer_dtype(self, uint_crd_f_dtype, ljtype_dtype, charge_dtype, boxlength_f_type, a_14_type, b_14_type, - lj_scale_factor_type, cf_scale_factor_type, lj_type_a_type, lj_type_b_type): - validator.check_tensor_dtype_valid('uint_crd_f', uint_crd_f_dtype, [mstype.uint32], self.name) - validator.check_tensor_dtype_valid('LJtype', ljtype_dtype, [mstype.int32], self.name) - validator.check_tensor_dtype_valid('charge', charge_dtype, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('boxlength_f', boxlength_f_type, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('a_14', a_14_type, [mstype.int32], self.name) - validator.check_tensor_dtype_valid('b_14', b_14_type, [mstype.int32], self.name) - validator.check_tensor_dtype_valid('lj_scale_factor', lj_scale_factor_type, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('cf_scale_factor', cf_scale_factor_type, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('LJ_type_A', lj_type_a_type, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('LJ_type_B', lj_type_b_type, [mstype.float32], self.name) - - return lj_type_a_type - - -class Dihedral14LJCFForceWithAtomEnergy(PrimitiveWithInfer): - """ - Calculate the Lennard-Jones and Coulumb energy correction and force correction - for each necessary dihedral 1,4 terms together and add them to the total force - and potential energy for each atom. - - The calculation formula of force correction is the same as operator - :class:`Dihedral14LJForceWithDirectCF`, and the energy correction part is the same - as operator :class:`Dihedral14LJEnergy` and :class:`Dihedral14CFEnergy`. - - Because there is a large amount of inputs and each of them are related, - there is no way to construct `Examples` using random methods. For details, refer the webpage `SPONGE in MindSpore - `_. - - Args: - nb14_numbers (int32): the number of necessary dihedral 1,4 terms m. - atom_numbers (int32): the number of atoms n. - - Inputs: - - **uint_crd_f** (Tensor) - The unsigned int coordinate value of each atom. - The data type is uint32 and the shape is :math:`(n, 3)`. - - **LJ_type** (Tensor) - The Lennard-Jones type of each atom. - The data type is int32 and the shape is :math:`(n,)`. - - **charge** (Tensor) - The charge of each atom. - The data type is float32 and the shape is :math:`(n,)`. - - **boxlength_f** (Tensor) - The length of molecular simulation box in 3 dimensions. - The data type is float32 and the shape is :math:`(3,)`. - - **a_14** (Tensor) - The first atom index of each dihedral 1,4 term. - The data type is int32 and the shape is :math:`(m,)`. - - **b_14** (Tensor) - The second atom index of each dihedral 1,4 term. - The data type is int32 and the shape is :math:`(m,)`. - - **lj_scale_factor** (Tensor) - The scale factor for the - Lennard-Jones part of force correction of each dihedral 1,4 term. - The data type is float32 and the shape is :math:`(m,)`. - - **cf_scale_factor** (Tensor) - The scale factor for the - Coulomb part of force correction for each dihedral 1,4 terms. - The data type is float32 and the shape is :math:`(m,)`. - - **LJ_type_A** (Tensor) - The A parameter in Lennard-Jones scheme of each atom pair type. - q is the number of atom pair. The data type is float32 and the shape is :math:`(q,)`. - - **LJ_type_B** (Tensor) - The B parameter in Lennard-Jones scheme of each atom pair type. - q is the number of atom pair. The data type is float32 and the shape is :math:`(q,)`. - - Outputs: - - **frc_f** (Tensor) - The force felt by each atom. - The data type is float32 and the shape is :math:`(n, 3)`. - - **atom_energy** (Tensor) - The accumulated potential energy for each atom. - The data type is float32 and the shape is :math:`(n,)`. - - Supported Platforms: - ``GPU`` - """ - - @prim_attr_register - def __init__(self, nb14_numbers, atom_numbers): - """Initialize Dihedral14LJCFForceWithAtomEnergy.""" - validator.check_value_type('nb14_numbers', nb14_numbers, int, self.name) - validator.check_value_type('atom_numbers', atom_numbers, int, self.name) - self.dihedral_14_numbers = nb14_numbers - self.atom_numbers = atom_numbers - - self.init_prim_io_names( - inputs=['uint_crd_f', 'LJtype', 'charge', 'boxlength_f', 'a_14', 'b_14', 'lj_scale_factor', - 'cf_scale_factor', - 'LJ_type_A', 'LJ_type_B'], - outputs=['frc_f', 'atom_energy']) - self.add_prim_attr('dihedral_14_numbers', self.dihedral_14_numbers) - self.add_prim_attr('atom_numbers', self.atom_numbers) - - def infer_shape(self, uint_crd_f_shape, ljtype_shape, charge_shape, boxlength_f_shape, a_14_shape, b_14_shape, - lj_scale_factor_shape, cf_scale_factor_shape, lj_type_a_shape, lj_type_b_shape): - cls_name = self.name - n = self.atom_numbers - m = self.dihedral_14_numbers - q = lj_type_a_shape[0] - validator.check_int(len(uint_crd_f_shape), 2, Rel.EQ, "uint_crd_f_dim", cls_name) - validator.check_int(len(ljtype_shape), 1, Rel.EQ, "LJtype_dim", cls_name) - validator.check_int(len(charge_shape), 1, Rel.EQ, "charge_dim", cls_name) - validator.check_int(len(boxlength_f_shape), 1, Rel.EQ, "boxlength_f_dim", cls_name) - validator.check_int(len(a_14_shape), 1, Rel.EQ, "a_14_dim", cls_name) - validator.check_int(len(b_14_shape), 1, Rel.EQ, "b_14_dim", cls_name) - validator.check_int(len(lj_scale_factor_shape), 1, Rel.EQ, "lj_scale_factor_dim", cls_name) - validator.check_int(len(cf_scale_factor_shape), 1, Rel.EQ, "cf_scale_factor_dim", cls_name) - validator.check_int(len(lj_type_b_shape), 1, Rel.EQ, "LJ_type_B_dim", cls_name) - - validator.check_int(uint_crd_f_shape[0], n, Rel.EQ, "uint_crd_f_shape[0]", cls_name) - validator.check_int(uint_crd_f_shape[1], 3, Rel.EQ, "uint_crd_f_shape[1]", cls_name) - validator.check_int(ljtype_shape[0], n, Rel.EQ, "LJtype_shape", cls_name) - validator.check_int(charge_shape[0], n, Rel.EQ, "charge_shape", cls_name) - validator.check_int(boxlength_f_shape[0], 3, Rel.EQ, "boxlength_f_shape", cls_name) - validator.check_int(lj_type_b_shape[0], q, Rel.EQ, "LJ_type_B_shape", cls_name) - validator.check_int(a_14_shape[0], m, Rel.EQ, "a_14_shape", cls_name) - validator.check_int(b_14_shape[0], m, Rel.EQ, "b_14_shape", cls_name) - validator.check_int(lj_scale_factor_shape[0], m, Rel.EQ, "lj_scale_factor_shape", cls_name) - validator.check_int(cf_scale_factor_shape[0], m, Rel.EQ, "cf_scale_factor_shape", cls_name) - return uint_crd_f_shape, charge_shape - - def infer_dtype(self, uint_crd_f_dtype, ljtype_dtype, charge_dtype, boxlength_f_type, a_14_type, b_14_type, - lj_scale_factor_type, cf_scale_factor_type, lj_type_a_type, lj_type_b_type): - validator.check_tensor_dtype_valid('uint_crd_f', uint_crd_f_dtype, [mstype.uint32], self.name) - validator.check_tensor_dtype_valid('LJtype', ljtype_dtype, [mstype.int32], self.name) - validator.check_tensor_dtype_valid('charge', charge_dtype, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('boxlength_f', boxlength_f_type, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('a_14', a_14_type, [mstype.int32], self.name) - validator.check_tensor_dtype_valid('b_14', b_14_type, [mstype.int32], self.name) - validator.check_tensor_dtype_valid('lj_scale_factor', lj_scale_factor_type, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('cf_scale_factor', cf_scale_factor_type, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('LJ_type_A', lj_type_a_type, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('LJ_type_B', lj_type_b_type, [mstype.float32], self.name) - - return charge_dtype, charge_dtype - - -class Dihedral14LJAtomEnergy(PrimitiveWithInfer): - """ - Add the potential energy caused by Lennard-Jones energy correction for each - necessary dihedral 1,4 terms to the total potential energy of each atom. - - The calculation formula is the same as operator Dihedral14LJEnergy(). - - Because there is a large amount of inputs and each of them are related, - there is no way to construct `Examples` using random methods. For details, refer the webpage `SPONGE in MindSpore - `_. - - Args: - nb14_numbers (int32): the number of necessary dihedral 1,4 terms m. - atom_numbers (int32): the number of atoms n. - - Inputs: - - **uint_crd_f** (Tensor) - The unsigned int coordinate value of each atom. - The data type is uint32 and the shape is :math:`(n, 3)`. - - **LJ_type** (Tensor) - The Lennard-Jones type of each atom. - The data type is int32 and the shape is :math:`(n,)`. - - **charge** (Tensor) - The charge of each atom. - The data type is float32 and the shape is :math:`(n,)`. - - **boxlength_f** (Tensor) - The length of molecular simulation box in 3 dimensions. - The data type is float32 and the shape is :math:`(3,)`. - - **a_14** (Tensor) - The first atom index of each dihedral 1,4 term. - The data type is int32 and the shape is :math:`(m,)`. - - **b_14** (Tensor) - The second atom index of each dihedral 1,4 term. - The data type is int32 and the shape is :math:`(m,)`. - - **lj_scale_factor** (Tensor) - The scale factor for the - Lennard-Jones part of force correction of each dihedral 1,4 term. - The data type is float32 and the shape is :math:`(m,)`. - - **cf_scale_factor** (Tensor) - The scale factor for the - Coulomb part of force correction for each dihedral 1,4 terms. - The data type is float32 and the shape is :math:`(m,)`. - - **LJ_type_A** (Tensor) - The A parameter in Lennard-Jones scheme of each atom pair type. - q is the number of atom pair. The data type is float32 and the shape is :math:`(q,)`. - - **LJ_type_B** (Tensor) - The B parameter in Lennard-Jones scheme of each atom pair type. - q is the number of atom pair. The data type is float32 and the shape is :math:`(q,)`. - - Outputs: - - **ene** (Tensor) - The accumulated potential energy of each atom. - The data type is float32 and the shape is :math:`(n,)`. - - Supported Platforms: - ``GPU`` - """ - - @prim_attr_register - def __init__(self, nb14_numbers, atom_numbers): - """Initialize Dihedral14LJAtomEnergy.""" - validator.check_value_type('nb14_numbers', nb14_numbers, int, self.name) - validator.check_value_type('atom_numbers', atom_numbers, int, self.name) - self.dihedral_14_numbers = nb14_numbers - self.atom_numbers = atom_numbers - - self.init_prim_io_names( - inputs=['uint_crd_f', 'LJtype', 'charge', 'boxlength_f', 'a_14', 'b_14', 'lj_scale_factor', - 'LJ_type_A', 'LJ_type_B'], - outputs=['ene']) - self.add_prim_attr('dihedral_14_numbers', self.dihedral_14_numbers) - self.add_prim_attr('atom_numbers', self.atom_numbers) - - def infer_shape(self, uint_crd_f_shape, ljtype_shape, charge_shape, boxlength_f_shape, a_14_shape, b_14_shape, - lj_scale_factor_shape, lj_type_a_shape, lj_type_b_shape): - cls_name = self.name - n = self.atom_numbers - q = lj_type_a_shape[0] - validator.check_int(len(uint_crd_f_shape), 2, Rel.EQ, "uint_crd_f_dim", cls_name) - validator.check_int(len(ljtype_shape), 1, Rel.EQ, "LJtype_dim", cls_name) - validator.check_int(len(charge_shape), 1, Rel.EQ, "charge_dim", cls_name) - validator.check_int(len(boxlength_f_shape), 1, Rel.EQ, "boxlength_f_dim", cls_name) - validator.check_int(len(a_14_shape), 1, Rel.EQ, "a_14_dim", cls_name) - validator.check_int(len(b_14_shape), 1, Rel.EQ, "b_14_dim", cls_name) - validator.check_int(len(lj_scale_factor_shape), 1, Rel.EQ, "lj_scale_factor_dim", cls_name) - validator.check_int(len(lj_type_b_shape), 1, Rel.EQ, "LJ_type_B_dim", cls_name) - - validator.check_int(uint_crd_f_shape[0], n, Rel.EQ, "uint_crd_f_shape[0]", cls_name) - validator.check_int(uint_crd_f_shape[1], 3, Rel.EQ, "uint_crd_f_shape[1]", cls_name) - validator.check_int(ljtype_shape[0], n, Rel.EQ, "LJtype_shape", cls_name) - validator.check_int(charge_shape[0], n, Rel.EQ, "charge_shape", cls_name) - validator.check_int(boxlength_f_shape[0], 3, Rel.EQ, "boxlength_f_shape", cls_name) - validator.check_int(lj_type_b_shape[0], q, Rel.EQ, "LJ_type_B_shape", cls_name) - m = self.dihedral_14_numbers - validator.check_int(a_14_shape[0], m, Rel.EQ, "a_14_shape", cls_name) - validator.check_int(b_14_shape[0], m, Rel.EQ, "b_14_shape", cls_name) - validator.check_int(lj_scale_factor_shape[0], m, Rel.EQ, "lj_scale_factor_shape", cls_name) - return ljtype_shape - - def infer_dtype(self, uint_crd_f_dtype, ljtype_dtype, charge_dtype, boxlength_f_type, a_14_type, b_14_type, - lj_scale_factor_type, lj_type_a_type, lj_type_b_type): - validator.check_tensor_dtype_valid('uint_crd_f', uint_crd_f_dtype, [mstype.uint32], self.name) - validator.check_tensor_dtype_valid('LJtype', ljtype_dtype, [mstype.int32], self.name) - validator.check_tensor_dtype_valid('charge', charge_dtype, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('boxlength_f', boxlength_f_type, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('a_14', a_14_type, [mstype.int32], self.name) - validator.check_tensor_dtype_valid('b_14', b_14_type, [mstype.int32], self.name) - validator.check_tensor_dtype_valid('lj_scale_factor', lj_scale_factor_type, [mstype.float32], - self.name) - validator.check_tensor_dtype_valid('LJ_type_A', lj_type_a_type, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('LJ_type_B', lj_type_b_type, [mstype.float32], self.name) - - return lj_type_a_type - - -class Dihedral14CFEnergy(PrimitiveWithInfer): - """ - Calculate the Coulumb part of 1,4 dihedral energy correction for - each necessary dihedral terms on the corresponding atoms. - - Because there is a large amount of inputs and each of them are related, - there is no way to construct `Examples` using random methods. For details, refer the webpage `SPONGE in MindSpore - `_. - - .. math:: - - dr = (x_a-x_b, y_a-y_b, z_a-z_b) - - .. math:: - E = k*q_a*q_b/|dr| - - Args: - nb14_numbers (int32): the number of necessary dihedral 1,4 terms m. - atom_numbers (int32): the number of atoms n. - - Inputs: - - **uint_crd_f** (Tensor) - The unsigned int coordinate value of each atom. - The data type is uint32 and the shape is :math:`(n, 3)`. - - **LJ_type** (Tensor) - The Lennard-Jones type of each atom. - The data type is int32 and the shape is :math:`(n,)`. - - **charge** (Tensor) - The charge of each atom. - The data type is float32 and the shape is :math:`(n,)`. - - **boxlength_f** (Tensor) - The length of molecular simulation box in 3 dimensions. - The data type is float32 and the shape is :math:`(3,)`. - - **a_14** (Tensor) - The first atom index of each dihedral 1,4 term. - The data type is int32 and the shape is :math:`(m,)`. - - **b_14** (Tensor) - The second atom index of each dihedral 1,4 term. - The data type is int32 and the shape is :math:`(m,)`. - - **cf_scale_factor** (Tensor) - The scale factor for the - Coulomb part of force correction for each dihedral 1,4 terms. - The data type is float32 and the shape is :math:`(m,)`. - - Outputs: - - **ene** (Tensor) - The accumulated potential energy of each atom. - The data type is float32 and the shape is :math:`(m,)`. - - Supported Platforms: - ``GPU`` - """ - - @prim_attr_register - def __init__(self, nb14_numbers, atom_numbers): - """Initialize Dihedral14CFEnergy.""" - validator.check_value_type('nb14_numbers', nb14_numbers, int, self.name) - validator.check_value_type('atom_numbers', atom_numbers, int, self.name) - self.dihedral_14_numbers = nb14_numbers - self.atom_numbers = atom_numbers - - self.init_prim_io_names( - inputs=['uint_crd_f', 'LJtype', 'charge', 'boxlength_f', 'a_14', 'b_14', 'cj_scale_factor'], - outputs=['ene']) - self.add_prim_attr('dihedral_14_numbers', self.dihedral_14_numbers) - self.add_prim_attr('atom_numbers', self.atom_numbers) - - def infer_shape(self, uint_crd_f_shape, ljtype_shape, charge_shape, boxlength_f_shape, a_14_shape, b_14_shape, - cf_scale_factor_shape): - cls_name = self.name - n = self.atom_numbers - validator.check_int(len(uint_crd_f_shape), 2, Rel.EQ, "uint_crd_f_dim", cls_name) - validator.check_int(len(ljtype_shape), 1, Rel.EQ, "LJtype_dim", cls_name) - validator.check_int(len(charge_shape), 1, Rel.EQ, "charge_dim", cls_name) - validator.check_int(len(boxlength_f_shape), 1, Rel.EQ, "boxlength_f_dim", cls_name) - validator.check_int(len(a_14_shape), 1, Rel.EQ, "a_14_dim", cls_name) - validator.check_int(len(b_14_shape), 1, Rel.EQ, "b_14_dim", cls_name) - validator.check_int(len(cf_scale_factor_shape), 1, Rel.EQ, "cf_scale_factor_dim", cls_name) - - validator.check_int(uint_crd_f_shape[0], n, Rel.EQ, "uint_crd_f_shape[0]", cls_name) - validator.check_int(uint_crd_f_shape[1], 3, Rel.EQ, "uint_crd_f_shape[1]", cls_name) - validator.check_int(ljtype_shape[0], n, Rel.EQ, "LJtype_shape", cls_name) - validator.check_int(charge_shape[0], n, Rel.EQ, "charge_shape", cls_name) - validator.check_int(boxlength_f_shape[0], 3, Rel.EQ, "boxlength_f_shape", cls_name) - m = self.dihedral_14_numbers - validator.check_int(a_14_shape[0], m, Rel.EQ, "a_14_shape", cls_name) - validator.check_int(b_14_shape[0], m, Rel.EQ, "b_14_shape", cls_name) - validator.check_int(cf_scale_factor_shape[0], m, Rel.EQ, "cf_scale_factor_shape", cls_name) - return [self.dihedral_14_numbers,] - - def infer_dtype(self, uint_crd_f_dtype, ljtype_dtype, charge_dtype, boxlength_f_type, a_14_type, b_14_type, - cf_scale_factor_type): - validator.check_tensor_dtype_valid('uint_crd_f', uint_crd_f_dtype, [mstype.uint32], self.name) - validator.check_tensor_dtype_valid('LJtype', ljtype_dtype, [mstype.int32], self.name) - validator.check_tensor_dtype_valid('charge', charge_dtype, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('boxlength_f', boxlength_f_type, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('a_14', a_14_type, [mstype.int32], self.name) - validator.check_tensor_dtype_valid('b_14', b_14_type, [mstype.int32], self.name) - validator.check_tensor_dtype_valid('lj_scale_factor', cf_scale_factor_type, [mstype.float32], - self.name) - - return charge_dtype - - -class Dihedral14CFAtomEnergy(PrimitiveWithInfer): - """ - Add the potential energy caused by Coulumb energy correction for each - necessary dihedral 1,4 terms to the total potential energy of each atom. - - The calculation formula is the same as operator :class:`Dihedral14CFEnergy`. - - Because there is a large amount of inputs and each of them are related, - there is no way to construct `Examples` using random methods. For details, refer the webpage `SPONGE in MindSpore - `_. - - Args: - nb14_numbers (int32): the number of necessary dihedral 1,4 terms m. - atom_numbers (int32): the number of atoms n. - - Inputs: - - **uint_crd_f** (Tensor) - The unsigned int coordinate value of each atom. - The data type is uint32 and the shape is :math:`(n, 3)`. - - **LJ_type** (Tensor) - The Lennard-Jones type of each atom. - The data type is int32 and the shape is :math:`(n,)`. - - **charge** (Tensor) - The charge of each atom. - The data type is float32 and the shape is :math:`(n,)`. - - **boxlength_f** (Tensor) - The length of molecular simulation box in 3 dimensions. - The data type is float32 and the shape is :math:`(3,)`. - - **a_14** (Tensor) - The first atom index of each dihedral 1,4 term. - The data type is int32 and the shape is :math:`(m,)`. - - **b_14** (Tensor) - The second atom index of each dihedral 1,4 term. - The data type is int32 and the shape is :math:`(m,)`. - - **cf_scale_factor** (Tensor) - The scale factor for the - Coulomb part of force correction for each dihedral 1,4 terms. - The data type is float32 and the shape is :math:`(m,)`. - - Outputs: - - **ene** (Tensor) - The accumulated potential energy of each atom. - The data type is float32 and the shape is :math:`(n,)` - - - Supported Platforms: - ``GPU`` - """ - - @prim_attr_register - def __init__(self, nb14_numbers, atom_numbers): - """Initialize Dihedral14CFAtomEnergy.""" - validator.check_value_type('nb14_numbers', nb14_numbers, int, self.name) - validator.check_value_type('atom_numbers', atom_numbers, int, self.name) - self.dihedral_14_numbers = nb14_numbers - self.atom_numbers = atom_numbers - - self.init_prim_io_names( - inputs=['uint_crd_f', 'LJtype', 'charge', 'boxlength_f', 'a_14', 'b_14', 'cf_scale_factor'], - outputs=['ene']) - self.add_prim_attr('dihedral_14_numbers', self.dihedral_14_numbers) - self.add_prim_attr('atom_numbers', self.atom_numbers) - - def infer_shape(self, uint_crd_f_shape, ljtype_shape, charge_shape, boxlength_f_shape, a_14_shape, b_14_shape, - cf_scale_factor_shape): - cls_name = self.name - n = self.atom_numbers - validator.check_int(len(uint_crd_f_shape), 2, Rel.EQ, "uint_crd_f_dim", cls_name) - validator.check_int(len(ljtype_shape), 1, Rel.EQ, "LJtype_dim", cls_name) - validator.check_int(len(charge_shape), 1, Rel.EQ, "charge_dim", cls_name) - validator.check_int(len(boxlength_f_shape), 1, Rel.EQ, "boxlength_f_dim", cls_name) - validator.check_int(len(a_14_shape), 1, Rel.EQ, "a_14_dim", cls_name) - validator.check_int(len(b_14_shape), 1, Rel.EQ, "b_14_dim", cls_name) - validator.check_int(len(cf_scale_factor_shape), 1, Rel.EQ, "cf_scale_factor_dim", cls_name) - - validator.check_int(uint_crd_f_shape[0], n, Rel.EQ, "uint_crd_f_shape[0]", cls_name) - validator.check_int(uint_crd_f_shape[1], 3, Rel.EQ, "uint_crd_f_shape[1]", cls_name) - validator.check_int(ljtype_shape[0], n, Rel.EQ, "LJtype_shape", cls_name) - validator.check_int(charge_shape[0], n, Rel.EQ, "charge_shape", cls_name) - validator.check_int(boxlength_f_shape[0], 3, Rel.EQ, "boxlength_f_shape", cls_name) - m = self.dihedral_14_numbers - validator.check_int(a_14_shape[0], m, Rel.EQ, "a_14_shape", cls_name) - validator.check_int(b_14_shape[0], m, Rel.EQ, "b_14_shape", cls_name) - validator.check_int(cf_scale_factor_shape[0], m, Rel.EQ, "cf_scale_factor_shape", cls_name) - return ljtype_shape - - def infer_dtype(self, uint_crd_f_dtype, ljtype_dtype, charge_dtype, boxlength_f_type, a_14_type, b_14_type, - cf_scale_factor_type): - validator.check_tensor_dtype_valid('uint_crd_f', uint_crd_f_dtype, [mstype.uint32], self.name) - validator.check_tensor_dtype_valid('LJtype', ljtype_dtype, [mstype.int32], self.name) - validator.check_tensor_dtype_valid('charge', charge_dtype, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('boxlength_f', boxlength_f_type, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('a_14', a_14_type, [mstype.int32], self.name) - validator.check_tensor_dtype_valid('b_14', b_14_type, [mstype.int32], self.name) - validator.check_tensor_dtype_valid('cf_scale_factor', cf_scale_factor_type, [mstype.float32], - self.name) - - return charge_dtype - - -class PMEReciprocalForce(PrimitiveWithInfer): - """ - Calculate the reciprocal part of long-range Coulumb force using - PME(Particle Meshed Ewald) method. Assume the number of atoms is n. - - The detailed calculation formula of PME(Particle Meshed Ewald) method - can be found in this paper: A Smooth Particle Mesh Ewald Method. DOI: - 10.1063/1.470117. - - Because there is a large amount of inputs and each of them are related, - there is no way to construct `Examples` using random methods. For details, refer the webpage `SPONGE in MindSpore - `_. - - Args: - atom_numbers(int32): the number of atoms, n. - beta(float32): the PME beta parameter, determined by the - non-bond cutoff value and simulation precision tolerance. - fftx(int32): the number of points for Fourier transform in dimension X. - ffty(int32): the number of points for Fourier transform in dimension Y. - fftz(int32): the number of points for Fourier transform in dimension Z. - box_length_0(float32): the value of boxlength idx 0 - box_length_1(float32): the value of boxlength idx 1 - box_length_2(float32): the value of boxlength idx 2 - - Inputs: - - **uint_crd** (Tensor) - The unsigned int coordinates value of each atom. - The data type is uint32 and the shape is :math:`(n, 3)` - - **charge** (Tensor) - The charge carried by each atom. - The data type is float32 and the shape is :math:`(n,)` - - Outputs: - - **force** (Tensor) - The force felt by each atom. - The data type is float32 and the shape is :math:`(n, 3)` - - Supported Platforms: - ``GPU`` - """ - - @prim_attr_register - def __init__(self, atom_numbers, beta, fftx, ffty, fftz, box_length_0, box_length_1, box_length_2): - """Initialize PMEReciprocalForce.""" - validator.check_value_type('atom_numbers', atom_numbers, int, self.name) - validator.check_value_type('beta', beta, float, self.name) - validator.check_value_type('fftx', fftx, int, self.name) - validator.check_value_type('ffty', ffty, int, self.name) - validator.check_value_type('fftz', fftz, int, self.name) - validator.check_value_type('box_length_0', box_length_0, float, self.name) - validator.check_value_type('box_length_1', box_length_1, float, self.name) - validator.check_value_type('box_length_2', box_length_2, float, self.name) - self.atom_numbers = atom_numbers - self.beta = beta - self.fftx = fftx - self.ffty = ffty - self.fftz = fftz - self.box_length_0 = box_length_0 - self.box_length_1 = box_length_1 - self.box_length_2 = box_length_2 - - self.init_prim_io_names(inputs=['boxlength', 'uint_crd', 'charge'], - outputs=['force']) - self.add_prim_attr('atom_numbers', self.atom_numbers) - self.add_prim_attr('beta', self.beta) - self.add_prim_attr('fftx', self.fftx) - self.add_prim_attr('ffty', self.ffty) - self.add_prim_attr('fftz', self.fftz) - self.add_prim_attr('box_length_0', self.box_length_0) - self.add_prim_attr('box_length_1', self.box_length_1) - self.add_prim_attr('box_length_2', self.box_length_2) - - def infer_shape(self, uint_crd_shape, charge_shape): - cls_name = self.name - n = self.atom_numbers - validator.check_int(len(uint_crd_shape), 2, Rel.EQ, "uint_crd_dim", cls_name) - validator.check_int(len(charge_shape), 1, Rel.EQ, "charge_dim", cls_name) - - validator.check_int(uint_crd_shape[0], n, Rel.EQ, "uint_crd_shape[0]", cls_name) - validator.check_int(uint_crd_shape[1], 3, Rel.EQ, "uint_crd_shape[1]", cls_name) - validator.check_int(charge_shape[0], n, Rel.EQ, "charge_shape", cls_name) - return uint_crd_shape - - def infer_dtype(self, uint_crd_type, charge_type): - validator.check_tensor_dtype_valid('uint_crd', uint_crd_type, [mstype.uint32], self.name) - validator.check_tensor_dtype_valid('charge', charge_type, [mstype.float32], self.name) - return charge_type - - -class PMEExcludedForce(PrimitiveWithInfer): - """ - Calculate the excluded part of long-range Coulumb force using - PME(Particle Meshed Ewald) method. Assume the number of atoms is - n, and the length of excluded list is E. - - Because there is a large amount of inputs and each of them are related, - there is no way to construct `Examples` using random methods. For details, refer the webpage `SPONGE in MindSpore - `_. - - Args: - atom_numbers(int32): the number of atoms, n. - excluded_numbers(int32): the length of excluded list, E. - beta(float32): the PME beta parameter, determined by the - non-bond cutoff value and simulation precision tolerance. - - Inputs: - - **uint_crd** (Tensor) - The unsigned int coordinates value of each atom. - The data type is uint32 and the shape is :math:`(n, 3)` - - **scaler** (Tensor) - The scale factor between real space - coordinates and its unsigned int value. The data type is float32 and the shape is :math:`(3,)` - - **charge** (Tensor) - The charge carried by each atom. - The data type is float32 and the shape is :math:`(n,)` - - **excluded_list_start** (Tensor) - The start excluded index - in excluded list for each atom. The data type is int32 and the shape is :math:`(n,)` - - **excluded_list** (Tensor) - The contiguous join of excluded - list of each atom. E is the number of excluded atoms. The data type is int32 and the shape is :math:`(E,)` - - **excluded_atom_numbers** (Tensor) - The number of atom excluded - in excluded list for each atom. The data type is int32 and the shape is :math:`(n,)` - - Outputs: - - **force** (Tensor) - The force felt by each atom. - The data type is float32 and the shape is :math:`(n, 3)` - - Supported Platforms: - ``GPU`` - """ - - @prim_attr_register - def __init__(self, atom_numbers, excluded_numbers, beta): - """Initialize PMEExcludedForce.""" - validator.check_value_type('atom_numbers', atom_numbers, int, self.name) - validator.check_value_type('excluded_numbers', excluded_numbers, int, self.name) - validator.check_value_type('beta', beta, float, self.name) - self.atom_numbers = atom_numbers - self.excluded_numbers = excluded_numbers - self.beta = beta - self.init_prim_io_names( - inputs=['uint_crd', 'sacler', 'charge', 'excluded_list_start', 'excluded_list', 'excluded_atom_numbers'], - outputs=['force']) - self.add_prim_attr('atom_numbers', self.atom_numbers) - self.add_prim_attr('excluded_numbers', self.excluded_numbers) - self.add_prim_attr('beta', self.beta) - - def infer_shape(self, uint_crd_shape, sacler_shape, charge_shape, excluded_list_start_shape, excluded_list_shape, - excluded_atom_numbers_shape): - cls_name = self.name - n = self.atom_numbers - validator.check_int(len(uint_crd_shape), 2, Rel.EQ, "uint_crd_dim", cls_name) - validator.check_int(len(sacler_shape), 1, Rel.EQ, "sacler_dim", cls_name) - validator.check_int(len(charge_shape), 1, Rel.EQ, "charge_dim", cls_name) - validator.check_int(len(excluded_list_start_shape), 1, Rel.EQ, "excluded_list_start_dim", cls_name) - validator.check_int(len(excluded_atom_numbers_shape), 1, Rel.EQ, "excluded_atom_numbers_dim", cls_name) - - validator.check_int(uint_crd_shape[0], n, Rel.EQ, "uint_crd_shape[0]", cls_name) - validator.check_int(uint_crd_shape[1], 3, Rel.EQ, "uint_crd_shape[1]", cls_name) - validator.check_int(sacler_shape[0], 3, Rel.EQ, "sacler_shape", cls_name) - validator.check_int(charge_shape[0], n, Rel.EQ, "charge_shape", cls_name) - validator.check_int(excluded_list_start_shape[0], n, Rel.EQ, "excluded_list_start_shape", cls_name) - validator.check_int(excluded_atom_numbers_shape[0], n, Rel.EQ, "excluded_atom_numbers_shape", cls_name) - return uint_crd_shape - - def infer_dtype(self, uint_crd_type, sacler_type, charge_type, excluded_list_start_type, excluded_list_type, - excluded_atom_numbers_type): - validator.check_tensor_dtype_valid('sacler', sacler_type, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('uint_crd', uint_crd_type, [mstype.uint32], self.name) - validator.check_tensor_dtype_valid('charge', charge_type, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('excluded_list_start', excluded_list_start_type, [mstype.int32], - self.name) - validator.check_tensor_dtype_valid('excluded_list', excluded_list_type, [mstype.int32], - self.name) - validator.check_tensor_dtype_valid('excluded_atom_numbers', excluded_atom_numbers_type, [mstype.int32], - self.name) - return charge_type - - -class PMEEnergy(PrimitiveWithInfer): - """ - Calculate the Coulumb energy of the system using PME method. - - Because there is a large amount of inputs and each of them are related, - there is no way to construct `Examples` using random methods. For details, refer the webpage `SPONGE in MindSpore - `_. - - .. math:: - - E = sum_{ij} q_iq_j/r_{ij} - - Args: - atom_numbers(int32): the number of atoms, n. - excluded_numbers(int32): the length of excluded list, E. - beta(float32): the PME beta parameter, determined by the - non-bond cutoff value and simulation precision tolerance. - fftx(int32): the number of points for Fourier transform in dimension X. - ffty(int32): the number of points for Fourier transform in dimension Y. - fftz(int32): the number of points for Fourier transform in dimension Z. - box_length_0(float32): the value of boxlength idx 0 - box_length_1(float32): the value of boxlength idx 1 - box_length_2(float32): the value of boxlength idx 2 - - - Inputs: - - **uint_crd** (Tensor) - The unsigned int coordinates value of each atom. - The data type is uint32 and the shape is :math:`(n, 3)` - - **charge** (Tensor) - The charge carried by each atom. - The data type is float32 and the shape is :math:`(n,)` - - **nl_numbers** - (Tensor) - The each atom. - The data type is int32 and the shape is :math:`(n, 3)` - - **nl_serial** - (Tensor) - The neighbor list of each atom, the max number is 800. - The data type is int32 and the shape is :math:`(n, 800)` - - **scaler** (Tensor) - The scale factor between real space - coordinates and its unsigned int value. The data type is float32 and the shape is :math:`(3,)` - - **excluded_list_start** (Tensor) - The start excluded index - in excluded list for each atom. The data type is int32 and the shape is :math:`(n,)` - - **excluded_list** (Tensor) - The contiguous join of excluded - list of each atom. E is the number of excluded atoms. The data type is int32 and the shape is :math:`(E,)` - - **excluded_atom_numbers** (Tensor) - The number of atom excluded - in excluded list for each atom. The data type is int32 and the shape is :math:`(n,)` - - Outputs: - - **reciprocal_ene** (Tensor) - The reciprocal term of PME energy. - The data type is float32 and the the shape is :math:`(1,)`. - - **self_ene** (Tensor) - The self term of PME energy. - The data type is float32 and the the shape is :math:`(1,)`. - - **direct_ene** (Tensor) - The direct term of PME energy. - The data type is float32 and the the shape is :math:`(1,)`. - - **correction_ene** (Tensor) - The correction term of PME energy. - The data type is float32 and the the shape is :math:`(1,)`. - - Supported Platforms: - ``GPU`` - """ - - @prim_attr_register - def __init__(self, atom_numbers, excluded_numbers, beta, fftx, ffty, fftz, box_length_0, box_length_1, - box_length_2): - """Initialize PMEEnergy.""" - validator.check_value_type('atom_numbers', atom_numbers, int, self.name) - validator.check_value_type('excluded_numbers', excluded_numbers, int, self.name) - validator.check_value_type('beta', beta, float, self.name) - validator.check_value_type('fftx', fftx, int, self.name) - validator.check_value_type('ffty', ffty, int, self.name) - validator.check_value_type('fftz', fftz, int, self.name) - validator.check_value_type('box_length_0', box_length_0, float, self.name) - validator.check_value_type('box_length_1', box_length_1, float, self.name) - validator.check_value_type('box_length_2', box_length_2, float, self.name) - self.atom_numbers = atom_numbers - self.excluded_numbers = excluded_numbers - self.beta = beta - self.fftx = fftx - self.ffty = ffty - self.fftz = fftz - self.box_length_0 = box_length_0 - self.box_length_1 = box_length_1 - self.box_length_2 = box_length_2 - self.init_prim_io_names( - inputs=['box_length', 'uint_crd', 'charge', 'nl_numbers', 'nl_serial', 'scaler', 'excluded_list_start', - 'excluded_list', 'excluded_atom_numbers'], - outputs=['reciprocal_ene', 'self_ene', 'direct_ene', 'correction_ene']) - self.add_prim_attr('atom_numbers', self.atom_numbers) - self.add_prim_attr('excluded_numbers', self.excluded_numbers) - self.add_prim_attr('beta', self.beta) - self.add_prim_attr('fftx', self.fftx) - self.add_prim_attr('ffty', self.ffty) - self.add_prim_attr('fftz', self.fftz) - self.add_prim_attr('box_length_0', self.box_length_0) - self.add_prim_attr('box_length_1', self.box_length_1) - self.add_prim_attr('box_length_2', self.box_length_2) - - def infer_shape(self, uint_crd, charge, nl_numbers, nl_serial, scaler, excluded_list_start, - excluded_list, excluded_atom_numbers): - cls_name = self.name - n = self.atom_numbers - validator.check_int(len(uint_crd), 2, Rel.EQ, "uint_crd_dim", cls_name) - validator.check_int(len(charge), 1, Rel.EQ, "charge_dim", cls_name) - validator.check_int(len(nl_numbers), 1, Rel.EQ, "nl_numbers_dim", cls_name) - validator.check_int(len(nl_serial), 2, Rel.LE, "nl_serial_dim", cls_name) - validator.check_int(len(excluded_list_start), 1, Rel.EQ, "excluded_list_start_dim", cls_name) - validator.check_int(len(excluded_atom_numbers), 1, Rel.EQ, "excluded_atom_numbers_dim", cls_name) - validator.check_int(len(excluded_list), 1, Rel.GE, "excluded_list", cls_name) - - validator.check_int(uint_crd[0], n, Rel.EQ, "uint_crd_shape[0]", cls_name) - validator.check_int(uint_crd[1], 3, Rel.EQ, "uint_crd_shape[1]", cls_name) - validator.check_int(charge[0], n, Rel.EQ, "charge_shape", cls_name) - validator.check_int(nl_numbers[0], n, Rel.EQ, "nl_numbers_shape[0]", cls_name) - validator.check_int(nl_serial[0], n, Rel.LE, "nl_serial_shape[0]", cls_name) - validator.check_int(nl_serial[1], 800, Rel.LE, "nl_serial_shape[1]", cls_name) - validator.check_int(excluded_list_start[0], n, Rel.EQ, "excluded_list_start_shape", cls_name) - validator.check_int(excluded_atom_numbers[0], n, Rel.EQ, "excluded_atom_numbers_shape", cls_name) - validator.check_int(excluded_list[0], 0, Rel.GE, "excluded_list_shape", cls_name) - return (1,), (1,), (1,), (1,) - - def infer_dtype(self, uint_crd, charge, nl_numbers, nl_serial, scaler, excluded_list_start, - excluded_list, excluded_atom_numbers): - validator.check_tensor_dtype_valid('uint_crd', uint_crd, [mstype.uint32], self.name) - validator.check_tensor_dtype_valid('charge', charge, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('nl_numbers', nl_numbers, [mstype.int32], self.name) - validator.check_tensor_dtype_valid('nl_serial', nl_serial, [mstype.int32], self.name) - validator.check_tensor_dtype_valid('scaler', scaler, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('excluded_list_start', excluded_list_start, [mstype.int32], - self.name) - validator.check_tensor_dtype_valid('excluded_list', excluded_list, [mstype.int32], - self.name) - validator.check_tensor_dtype_valid('excluded_atom_numbers', excluded_atom_numbers, [mstype.int32], - self.name) - return charge, charge, charge, charge - - -class LJEnergy(PrimitiveWithInfer): - """ - Calculate the Van der Waals interaction energy described by Lennard-Jones - potential for each atom. Assume the number of atoms is n, and the number - of Lennard-Jones types for all atoms is P, which means there will be - q = P*(P+1)/2 types of possible Lennard-Jones interactions for all kinds - of atom pairs. - - Because there is a large amount of inputs and each of them are related, - there is no way to construct `Examples` using random methods. For details, refer the webpage `SPONGE in MindSpore - `_. - - .. math:: - - dr = (x_a-x_b, y_a-y_b, z_a-z_b) - - .. math:: - E = A/|dr|^{12} - B/|dr|^{6} - - Args: - atom_numbers(int32): the number of atoms, n. - cutoff_square(float32): the square value of cutoff. - - Inputs: - - **uint_crd** (Tensor) - The unsigned int coordinate value of each atom. - The data type is uint32 and the shape is :math:`(n, 3)` - - **LJtype** (Tensor) - The Lennard-Jones type of each atom. - The data type is int32 and the shape is :math:`(n,)` - - **charge** (Tensor) - The charge carried by each atom. - The data type is float32 and the shape is :math:`(n,)` - - **scaler** (Tensor) - The scale factor between real - space coordinate and its unsigned int value. The data type is float32 and the shape is :math:`(3,)` - - **nl_numbers** - (Tensor) - The each atom. - The data type is int32 and the shape is :math:`(n,)` - - **nl_serial** - (Tensor) - The neighbor list of each atom, the max number is 800. - The data type is int32 and the shape is :math:`(n, 800)`. - - **d_LJ_A** (Tensor) - The Lennard-Jones A coefficient of each kind of atom pair. - q is the number of atom pair. The data type is float32 and the shape is :math:`(q,)`. - - **d_LJ_B** (Tensor) - The Lennard-Jones B coefficient of each kind of atom pair. - q is the number of atom pair. The data type is float32 and the shape is :math:`(q,)`. - - Outputs: - - **d_LJ_energy_atom** (Tensor) - The Lennard-Jones potential energy of each atom. - The data type is float32 and the shape is :math:`(n,)`. - - **d_LJ_energy_sum** (Scalar), the sum of Lennard-Jones potential energy of each atom. - The data type is float32. - - Supported Platforms: - ``GPU`` - """ - - @prim_attr_register - def __init__(self, atom_numbers, cutoff_square): - """Initialize LJEnergy.""" - validator.check_value_type('atom_numbers', atom_numbers, int, self.name) - validator.check_value_type('cutoff_square', cutoff_square, float, self.name) - self.atom_numbers = atom_numbers - self.cutoff_square = cutoff_square - self.init_prim_io_names( - inputs=['uint_crd', 'LJtype', 'charge', 'scaler', 'nl_numbers', 'nl_serial', 'd_LJ_A', 'd_LJ_B'], - outputs=['d_LJ_energy_atom']) - self.add_prim_attr('atom_numbers', self.atom_numbers) - self.add_prim_attr('cutoff_square', self.cutoff_square) - - def infer_shape(self, uint_crd, ljtype, charge, scaler, nl_numbers, nl_serial, d_lj_a, d_lj_b): - cls_name = self.name - n = self.atom_numbers - q = d_lj_a[0] - validator.check_int(len(uint_crd), 2, Rel.EQ, "uint_crd_dim", cls_name) - validator.check_int(len(ljtype), 1, Rel.EQ, "LJtype_dim", cls_name) - validator.check_int(len(charge), 1, Rel.EQ, "charge_dim", cls_name) - validator.check_int(len(scaler), 1, Rel.EQ, "scaler_dim", cls_name) - validator.check_int(len(nl_numbers), 1, Rel.EQ, "nl_numbers_dim", cls_name) - validator.check_int(len(nl_serial), 2, Rel.EQ, "nl_serial_dim", cls_name) - validator.check_int(len(d_lj_b), 1, Rel.EQ, "d_LJ_B_dim", cls_name) - - validator.check_int(uint_crd[0], n, Rel.EQ, "uint_crd_shape[0]", cls_name) - validator.check_int(uint_crd[1], 3, Rel.EQ, "uint_crd_shape[1]", cls_name) - validator.check_int(ljtype[0], n, Rel.EQ, "LJtype_shape", cls_name) - validator.check_int(charge[0], n, Rel.EQ, "charge_shape", cls_name) - validator.check_int(scaler[0], 3, Rel.EQ, "scaler_shape", cls_name) - validator.check_int(nl_numbers[0], n, Rel.EQ, "nl_numbers_shape", cls_name) - validator.check_int(nl_serial[0], n, Rel.EQ, "nl_serial_shape[0]", cls_name) - validator.check_int(nl_serial[1], 800, Rel.LE, "nl_serial_shape[1]", cls_name) - validator.check_int(len(d_lj_a), 1, Rel.EQ, "d_LJ_A_dim", cls_name) - validator.check_int(d_lj_a[0], q, Rel.EQ, "d_LJ_A_shape[0]", cls_name) - validator.check_int(d_lj_b[0], q, Rel.EQ, "d_LJ_B_shape[0]", cls_name) - return charge - - def infer_dtype(self, uint_crd, ljtype, charge, scaler, nl_numbers, nl_serial, d_lj_a, d_lj_b): - validator.check_tensor_dtype_valid('uint_crd', uint_crd, [mstype.uint32], self.name) - validator.check_tensor_dtype_valid('LJtype', ljtype, [mstype.int32], self.name) - validator.check_tensor_dtype_valid('charge', charge, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('scaler', scaler, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('nl_numbers', nl_numbers, [mstype.int32], self.name) - validator.check_tensor_dtype_valid('nl_serial', nl_serial, [mstype.int32], self.name) - validator.check_tensor_dtype_valid('d_LJ_A', d_lj_a, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('d_LJ_B', d_lj_b, [mstype.float32], self.name) - return charge - - -class LJForce(PrimitiveWithInfer): - """ - Calculate the Van der Waals interaction force described by Lennard-Jones - potential energy for each atom. - - Because there is a large amount of inputs and each of them are related, - there is no way to construct `Examples` using random methods. For details, refer the webpage `SPONGE in MindSpore - `_. - - .. math:: - - dr = (x_a-x_b, y_a-y_b, z_a-z_b) - - .. math:: - - F = (-12*A/|dr|^{14} + 6*B/|dr|^{8}) * dr - - Args: - atom_numbers(int32): the number of atoms, n. - cutoff_square(float32): the square value of cutoff. - - Inputs: - - **uint_crd** (Tensor) - The unsigned int coordinates value of each atom. - The data type is uint32 and the shape is :math:`(n, 3)` - - **LJtype** (Tensor) - The Lennard-Jones type of each atom. - The data type is int32 and the shape is :math:`(n,)` - - **charge** (Tensor) - The charge carried by each atom. - The data type is float32 and the shape is :math:`(n,)` - - **scaler** (Tensor) - The scale factor between real space - coordinates and its unsigned int value. The data type is float32 and the shape is :math:`(3,)` - - **nl_numbers** - (Tensor) - The each atom. - The data type is int32 and the shape is :math:`(n,)` - - **nl_serial** - (Tensor) - The neighbor list of each atom, the max number is 800. - The data type is int32 and the shape is :math:`(n, 800)`. - - **d_LJ_A** (Tensor) - The Lennard-Jones A coefficient of each kind of atom pair. - q is the number of atom pair. The data type is float32 and the shape is :math:`(q,)`. - - **d_LJ_B** (Tensor) - The Lennard-Jones B coefficient of each kind of atom pair. - q is the number of atom pair. The data type is float32 and the shape is :math:`(q,)`. - - outputs: - - **frc** (Tensor) - The force felt by each atom. - The data type is float32 and the shape is :math:`(n, 3)`. - - Supported Platforms: - ``GPU`` - """ - - @prim_attr_register - def __init__(self, atom_numbers, cutoff_square): - """Initialize LJForce.""" - validator.check_value_type('atom_numbers', atom_numbers, int, self.name) - validator.check_value_type('cutoff_square', cutoff_square, float, self.name) - self.atom_numbers = atom_numbers - self.cutoff_square = cutoff_square - self.init_prim_io_names( - inputs=['uint_crd', 'LJtype', 'charge', 'scaler', 'nl_numbers', 'nl_serial', 'd_LJ_A', 'd_LJ_B'], - outputs=['frc']) - self.add_prim_attr('atom_numbers', self.atom_numbers) - self.add_prim_attr('cutoff_square', self.cutoff_square) - - def infer_shape(self, uint_crd, ljtype, charge, scaler, nl_numbers, nl_serial, d_lj_a, d_lj_b): - cls_name = self.name - n = self.atom_numbers - q = d_lj_a[0] - validator.check_int(len(uint_crd), 2, Rel.EQ, "uint_crd_dim", cls_name) - validator.check_int(len(ljtype), 1, Rel.EQ, "LJtype_dim", cls_name) - validator.check_int(len(charge), 1, Rel.EQ, "charge_dim", cls_name) - validator.check_int(len(scaler), 1, Rel.EQ, "scaler_dim", cls_name) - validator.check_int(len(nl_numbers), 1, Rel.EQ, "nl_numbers_dim", cls_name) - validator.check_int(len(nl_serial), 2, Rel.EQ, "nl_serial_dim", cls_name) - validator.check_int(len(d_lj_b), 1, Rel.EQ, "d_LJ_B_dim", cls_name) - - validator.check_int(uint_crd[0], n, Rel.EQ, "uint_crd_shape[0]", cls_name) - validator.check_int(uint_crd[1], 3, Rel.EQ, "uint_crd_shape[1]", cls_name) - validator.check_int(ljtype[0], n, Rel.EQ, "LJtype_shape", cls_name) - validator.check_int(charge[0], n, Rel.EQ, "charge_shape", cls_name) - validator.check_int(scaler[0], 3, Rel.EQ, "scaler_shape", cls_name) - validator.check_int(nl_numbers[0], n, Rel.EQ, "nl_numbers_shape", cls_name) - validator.check_int(nl_serial[0], n, Rel.EQ, "nl_serial_shape[0]", cls_name) - validator.check_int(nl_serial[1], 800, Rel.EQ, "nl_serial_shape[1]", cls_name) - validator.check_int(len(d_lj_a), 1, Rel.EQ, "d_LJ_A_dim", cls_name) - validator.check_int(d_lj_a[0], q, Rel.EQ, "d_LJ_A_shape[0]", cls_name) - validator.check_int(d_lj_b[0], q, Rel.EQ, "d_LJ_B_shape[0]", cls_name) - return uint_crd - - def infer_dtype(self, uint_crd, ljtype, charge, scaler, nl_numbers, nl_serial, d_lj_a, d_lj_b): - validator.check_tensor_dtype_valid('uint_crd', uint_crd, [mstype.uint32], self.name) - validator.check_tensor_dtype_valid('LJtype', ljtype, [mstype.int32], self.name) - validator.check_tensor_dtype_valid('charge', charge, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('scaler', scaler, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('nl_numbers', nl_numbers, [mstype.int32], self.name) - validator.check_tensor_dtype_valid('nl_serial', nl_serial, [mstype.int32], self.name) - validator.check_tensor_dtype_valid('d_LJ_A', d_lj_a, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('d_LJ_B', d_lj_b, [mstype.float32], self.name) - return charge - - -class LJForceWithPMEDirectForce(PrimitiveWithInfer): - """ - Calculate the Lennard-Jones force and PME direct force together. - - The calculation formula of Lennard-Jones part is the same as operator - LJForce(), and the PME direct part is within PME method. - - Because there is a large amount of inputs and each of them are related, - there is no way to construct `Examples` using random methods. For details, refer the webpage `SPONGE in MindSpore - `_. - - Args: - atom_numbers(int32): the number of atoms, n. - cutoff_square(float32): the square value of cutoff. - pme_beta(float32): PME beta parameter, same as operator PMEReciprocalForce(). - - Inputs: - - **uint_crd** (Tensor) - The unsigned int coordinate value of each atom. - The data type is uint32 and the shape is :math:`(n, 3)`. - - **LJtype** (Tensor) - The Lennard-Jones type of each atom. - The data type is int32 and the shape is :math:`(n,)`. - - **charge** (Tensor) - The charge carried by each atom. - The data type is float32 and the shape is :math:`(n,)`. - - **scaler** (Tensor) - The scale factor between real - space coordinate and its unsigned int value. - The data type is float32 and the shape is :math:`(3,)`. - - **nl_numbers** - (Tensor) - The each atom. - The data type is int32 and the shape is :math:`(n,)`. - - **nl_serial** - (Tensor) - The neighbor list of each atom, the max number is 800. - The data type is int32 and the shape is :math:`(n, 800)`. - - **d_LJ_A** (Tensor) - The Lennard-Jones A coefficient of each kind of atom pair. - q is the number of atom pair. The data type is float32 and the shape is :math:`(q,)`. - - **d_LJ_B** (Tensor) - The Lennard-Jones B coefficient of each kind of atom pair. - q is the number of atom pair. The data type is float32 and the shape is :math:`(q,)`. - - Outputs: - - **frc** (Tensor), The force felt by each atom. - The data type is float32 and the shape is :math:`(n, 3)`. - - Supported Platforms: - ``GPU`` - """ - - @prim_attr_register - def __init__(self, atom_numbers, cutoff, pme_beta): - """Initialize LJForceWithPMEDirectForce.""" - validator.check_value_type('atom_numbers', atom_numbers, int, self.name) - validator.check_value_type('cutoff', cutoff, float, self.name) - validator.check_value_type('pme_beta', pme_beta, float, self.name) - self.atom_numbers = atom_numbers - self.cutoff = cutoff - self.pme_beta = pme_beta - self.init_prim_io_names( - inputs=['uint_crd', 'LJtype', 'charge', 'scaler', 'nl_numbers', 'nl_serial', 'd_LJ_A', 'd_LJ_B'], - outputs=['frc']) - self.add_prim_attr('atom_numbers', self.atom_numbers) - self.add_prim_attr('cutoff', self.cutoff) - self.add_prim_attr('pme_beta', self.pme_beta) - - def infer_shape(self, uint_crd, ljtype, charge, scaler, nl_numbers, nl_serial, d_lj_a, d_lj_b): - cls_name = self.name - n = self.atom_numbers - q = d_lj_a[0] - validator.check_int(len(uint_crd), 2, Rel.EQ, "uint_crd_dim", cls_name) - validator.check_int(len(ljtype), 1, Rel.EQ, "LJtype_dim", cls_name) - validator.check_int(len(charge), 1, Rel.EQ, "charge_dim", cls_name) - validator.check_int(len(scaler), 1, Rel.EQ, "scaler_dim", cls_name) - validator.check_int(len(nl_numbers), 1, Rel.EQ, "nl_numbers_dim", cls_name) - validator.check_int(len(nl_serial), 2, Rel.EQ, "nl_serial_dim", cls_name) - validator.check_int(len(d_lj_b), 1, Rel.EQ, "d_LJ_B_dim", cls_name) - - validator.check_int(uint_crd[0], n, Rel.EQ, "uint_crd_shape[0]", cls_name) - validator.check_int(uint_crd[1], 3, Rel.EQ, "uint_crd_shape[1]", cls_name) - validator.check_int(ljtype[0], n, Rel.EQ, "LJtype_shape", cls_name) - validator.check_int(charge[0], n, Rel.EQ, "charge_shape", cls_name) - validator.check_int(scaler[0], 3, Rel.EQ, "scaler_shape", cls_name) - validator.check_int(nl_numbers[0], n, Rel.EQ, "nl_numbers_shape", cls_name) - validator.check_int(nl_serial[0], n, Rel.EQ, "nl_serial_shape[0]", cls_name) - validator.check_int(nl_serial[1], 800, Rel.EQ, "nl_serial_shape[1]", cls_name) - validator.check_int(len(d_lj_a), 1, Rel.EQ, "d_LJ_A_dim", cls_name) - validator.check_int(d_lj_a[0], q, Rel.EQ, "d_LJ_A_shape[0]", cls_name) - validator.check_int(d_lj_b[0], q, Rel.EQ, "d_LJ_B_shape[0]", cls_name) - return uint_crd - - def infer_dtype(self, uint_crd, ljtype, charge, scaler, nl_numbers, nl_serial, d_lj_a, d_lj_b): - validator.check_tensor_dtype_valid('uint_crd', uint_crd, [mstype.uint32], self.name) - validator.check_tensor_dtype_valid('LJtype', ljtype, [mstype.int32], self.name) - validator.check_tensor_dtype_valid('charge', charge, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('scaler', scaler, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('nl_numbers', nl_numbers, [mstype.int32], self.name) - validator.check_tensor_dtype_valid('nl_serial', nl_serial, [mstype.int32], self.name) - validator.check_tensor_dtype_valid('d_LJ_A', d_lj_a, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('d_LJ_B', d_lj_b, [mstype.float32], self.name) - return charge - - -class MDTemperature(PrimitiveWithInfer): - """ - Compute the MD temperature. - - Because there is a large amount of inputs and each of them are related, - there is no way to construct `Examples` using random methods. For details, refer the webpage `SPONGE in MindSpore - `_. - - Args: - residue_numbers (int32): the number of residues m. - atom_numbers (int32): the number of atoms n. - - Inputs: - - **start** (Tensor) - The start atom index of each residue. - The data type is int32 and the shape is :math:`(m,)`. - - **end** (Tensor) - The end atom index of each residue. - The data type is int32 and the shape is :math:`(m,)`. - - **atom_vel_f** (Tensor) - The velocity of each atom. - The data type is float32 and the shape is :math:`(n, 3)`. - - **atom_mass** (Tensor) - The mass of each atom. - The data type is float32 and the shape is :math:`(n,)`. - - Outputs: - - **ek** (Tensor) - The temperature of each atom. - The data type is float32 and the shape is :math:`(n,)`. - - Supported Platforms: - ``GPU`` - """ - - @prim_attr_register - def __init__(self, residue_numbers, atom_numbers): - """Initialize MDTemperature.""" - validator.check_value_type('residue_numbers', residue_numbers, int, self.name) - validator.check_value_type('atom_numbers', atom_numbers, int, self.name) - self.residue_numbers = residue_numbers - self.atom_numbers = atom_numbers - self.add_prim_attr('residue_numbers', self.residue_numbers) - self.add_prim_attr('atom_numbers', self.atom_numbers) - self.init_prim_io_names( - inputs=['start', 'end', 'atom_vel_f', 'atom_mass'], - outputs=['ek']) - - def infer_shape(self, start_shape, end_shape, atom_vel_f_shape, atom_mass_shape): - cls_name = self.name - n = self.residue_numbers - m = self.atom_numbers - validator.check_int(len(start_shape), 1, Rel.EQ, "start", cls_name) - validator.check_int(start_shape[0], n, Rel.EQ, "end", cls_name) - validator.check_int(len(end_shape), 1, Rel.EQ, "start", cls_name) - validator.check_int(end_shape[0], n, Rel.EQ, "end", cls_name) - validator.check_int(atom_vel_f_shape[0], m, Rel.EQ, "atom_vel_f", cls_name) - validator.check_int(atom_vel_f_shape[1], 3, Rel.EQ, "atom_vel_f", cls_name) - validator.check_int(len(atom_mass_shape), 1, Rel.EQ, "atom_mass", cls_name) - validator.check_int(atom_mass_shape[0], m, Rel.EQ, "atom_mass", cls_name) - return [n,] - - def infer_dtype(self, start_dtype, end_dtype, atom_vel_f_dtype, atom_mass_dtype): - validator.check_tensor_dtype_valid('start', start_dtype, [mstype.int32], self.name) - validator.check_tensor_dtype_valid('end', end_dtype, [mstype.int32], self.name) - validator.check_tensor_dtype_valid('atom_vel_f', atom_vel_f_dtype, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('atom_mass', atom_mass_dtype, [mstype.float32], self.name) - return atom_mass_dtype - - -class MDIterationLeapFrogWithRF(PrimitiveWithInfer): - """ - One step of classical leap frog algorithm to solve the finite difference - Hamiltonian equations of motion for certain system, using Langevin dynamics - with Liu's thermostat scheme. Assume the number of atoms is n and the target - control temperature is T. - - Detailed iteration formula can be found in this paper: A unified thermostat - scheme for efficient configurational sampling for classical/quantum canonical - ensembles via molecular dynamics. DOI: 10.1063/1.4991621. - - Because there is a large amount of inputs and each of them are related, - there is no way to construct `Examples` using random methods. For details, refer the webpage `SPONGE in MindSpore - `_. - - Inputs: - - **float4_numbers** (Scalar) - total length to store random numbers. - The data type is int32. - - **atom_numbers** (Scalar) - The number of atoms n. - The data type is int32. - - **dt** (Scalar) - time step for finite difference. The data type is float32. - - **half_dt** (Scalar) - half of time step for finite difference. - The data type is float32. - - **exp_gamma** (Scalar) - parameter in Liu's dynamic, equals - exp(-gamma_ln * dt), where gamma_ln is the firction factor in Langvin - dynamics. The data type is float32. - - **max_velocity** (Scalar) - The upper limit of velocity, when the - velocity overflows, scale it to the upper limit. The data type is float32. - - **is_max_velocity** (Scalar) - whether the max velocity control is - open or not. The data type is int32. - - **mass_inverse** (Tensor) - The inverse value of - mass of each atom. The data type is float32 and the shape is :math:`(n,)`. - - **sqrt_mass** (Tensor) - The inverse square root value - of effect mass in Liu's dynamics of each atom. - The data type is float32 and the shape is :math:`(n,)`. - - **vel** (Tensor) - The velocity of each atom. - The data type is float32 and the shape is :math:`(n, 3)`. - - **crd** (Tensor) - The coordinate of each atom. - The data type is float32 and the shape is :math:`(n, 3)`. - - **frc** (Tensor) - The force felt by each atom. - The data type is float32 and the shape is :math:`(n, 3)`. - - **acc** (Tensor) - The acceleration of each atom. - The data type is float32 and the shape is :math:`(n, 3)`. - - **random force** (Tensor) - The random forces. - The data type is float32 and the shape is :math:`(n, 3)`. - - Outputs: - - **res** (Scalar) - The data type is float32. - - Supported Platforms: - ``GPU`` - Examples: - """ - - @prim_attr_register - def __init__(self, float4_numbers, atom_numbers, half_dt, dt, exp_gamma, is_max_velocity, max_velocity): - """Initialize MDIterationLeapFrogWithRF.""" - validator.check_value_type('float4_numbers', float4_numbers, int, self.name) - validator.check_value_type('atom_numbers', atom_numbers, int, self.name) - validator.check_value_type('half_dt', half_dt, float, self.name) - validator.check_value_type('dt', dt, float, self.name) - validator.check_value_type('exp_gamma', exp_gamma, float, self.name) - validator.check_value_type('is_max_velocity', is_max_velocity, int, self.name) - validator.check_value_type('max_velocity', max_velocity, float, self.name) - self.float4_numbers = float4_numbers - self.atom_numbers = atom_numbers - self.half_dt = half_dt - self.dt = dt - self.exp_gamma = exp_gamma - self.is_max_velocity = is_max_velocity - self.max_velocity = max_velocity - - self.init_prim_io_names( - inputs=['mass_inverse', 'sqrt_mass', 'vel_in', 'crd_in', 'frc_in', 'acc_in', 'random_force'], - outputs=['res']) - self.add_prim_attr('float4_numbers', self.float4_numbers) - self.add_prim_attr('atom_numbers', self.atom_numbers) - self.add_prim_attr('half_dt', self.half_dt) - self.add_prim_attr('dt', self.dt) - self.add_prim_attr('exp_gamma', self.exp_gamma) - self.add_prim_attr('is_max_velocity', self.is_max_velocity) - self.add_prim_attr('max_velocity', self.max_velocity) - - def infer_shape(self, mass_inverse_shape, sqrt_mass_shape, vel_in_shape, crd_in_shape, frc_in_shape, acc_in_shape, - random_force_shape): - n = self.atom_numbers - validator.check_int(len(mass_inverse_shape), 1, Rel.EQ, "mass_inverse", self.name) - validator.check_int(len(sqrt_mass_shape), 1, Rel.EQ, "mass_inverse", self.name) - validator.check_int(mass_inverse_shape[0], n, Rel.EQ, "mass_inverse", self.name) - validator.check_int(sqrt_mass_shape[0], n, Rel.EQ, "mass_inverse", self.name) - validator.check_int(vel_in_shape[0], n, Rel.EQ, "vel_in", self.name) - validator.check_int(vel_in_shape[1], 3, Rel.EQ, "vel_in", self.name) - validator.check_int(crd_in_shape[0], n, Rel.EQ, "crd_in", self.name) - validator.check_int(crd_in_shape[1], 3, Rel.EQ, "crd_in", self.name) - validator.check_int(frc_in_shape[0], n, Rel.EQ, "frc_in", self.name) - validator.check_int(frc_in_shape[1], 3, Rel.EQ, "frc_in", self.name) - validator.check_int(acc_in_shape[0], n, Rel.EQ, "acc_in", self.name) - validator.check_int(acc_in_shape[1], 3, Rel.EQ, "acc_in", self.name) - validator.check_int(random_force_shape[0], n, Rel.EQ, "random_force", self.name) - validator.check_int(random_force_shape[1], 3, Rel.EQ, "random_force", self.name) - - return [1,] - - def infer_dtype(self, mass_inverse_dtype, sqrt_mass_dtype, vel_in_dtype, crd_in_dtype, frc_in_dtype, acc_in_dtype, - rf_dtype): - validator.check_tensor_dtype_valid('mass_inverse', mass_inverse_dtype, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('sqrt_mass', sqrt_mass_dtype, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('vel_in', vel_in_dtype, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('crd_in', crd_in_dtype, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('frc_in', frc_in_dtype, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('acc_in', acc_in_dtype, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('rf', rf_dtype, [mstype.float32], self.name) - return mstype.float32 - - -class MDIterationLeapFrogLiujian(PrimitiveWithInfer): - """ - One step of classical leap frog algorithm to solve the finite difference - Hamiltonian equations of motion for certain system, using Langevin dynamics - with Liu's thermostat scheme. Assume the number of atoms is n and the target - control temperature is T. - - Detailed iteration formula can be found in this paper: A unified thermostat - scheme for efficient configurational sampling for classical/quantum canonical - ensembles via molecular dynamics. DOI: 10.1063/1.4991621. - - Because there is a large amount of inputs and each of them are related, - there is no way to construct `Examples` using random methods. For details, refer the webpage `SPONGE in MindSpore - `_. - - Args: - atom_numbers(int32): the number of atoms n. - dt(float32): time step for finite difference. - half_dt(float32): half of time step for finite difference. - exp_gamma(float32): parameter in Liu's dynamic. - - Inputs: - - **inverse_mass** (Tensor) - The inverse value of - mass of each atom. The data type is float32 and the shape is :math:`(n)`. - - **sqrt_mass_inverse** (Tensor) - The inverse square root value - of effect mass in Liu's dynamics of each atom. - The data type is float32 and the shape is :math:`(n,)`. - - **vel** (Tensor) - The velocity of each atom. - The data type is float32 and the shape is :math:`(n, 3)`. - - **crd** (Tensor) - The coordinate of each atom. - The data type is float32 and the shape is :math:`(n, 3)`. - - **frc** (Tensor) - The force felt by each atom. - The data type is float32 and the shape is :math:`(n, 3)`. - - **acc** (Tensor) - The acceleration of each atom. - The data type is float32 and the shape is :math:`(n, 3)`. - - **rand_state** (Tensor) - Random state to generate - random force. The data type is float32 and the shape is :math:`(math.ceil(n * 3.0 / 4.0) * 16, )`. - - **rand_frc** (Tensor) - The random forces. - The data type is float32 and the shape is :math:`(n, 3)`. - - Outputs: - - **output** (Tensor) - The output coordinates. - The data type is float32, and the shape is :math:`(n, 3)`. - - Supported Platforms: - ``GPU`` - """ - - @prim_attr_register - def __init__(self, atom_numbers, half_dt, dt, exp_gamma): - """Initialize MDIterationLeapFrogLiujian.""" - self.atom_numbers = atom_numbers - self.half_dt = half_dt - self.dt = dt - self.exp_gamma = exp_gamma - - self.add_prim_attr('atom_numbers', self.atom_numbers) - self.add_prim_attr('half_dt', self.half_dt) - self.add_prim_attr('dt', self.dt) - self.add_prim_attr('exp_gamma', self.exp_gamma) - self.init_prim_io_names( - inputs=['inverse_mass', 'sqrt_mass_inverse', 'vel', 'crd', 'frc', 'acc', 'rand_state', 'rand_frc'], - outputs=['output']) - self.add_prim_attr('side_effect_mem', True) - - def infer_shape(self, inverse_mass, sqrt_mass_inverse, vel, crd, frc, acc, rand_state, rand_frc): - n = self.atom_numbers - validator.check_int(len(inverse_mass), 1, Rel.EQ, "inverse_mass", self.name) - validator.check_int(len(sqrt_mass_inverse), 1, Rel.EQ, "sqrt_mass_inverse", self.name) - validator.check_int(inverse_mass[0], n, Rel.EQ, "inverse_mass", self.name) - validator.check_int(sqrt_mass_inverse[0], n, Rel.EQ, "sqrt_mass_inverse", self.name) - validator.check_int(len(rand_state), 1, Rel.EQ, "rand_state_dim", self.name) - validator.check_int(len(rand_frc), 2, Rel.EQ, "rand_frc_dim", self.name) - validator.check_int(len(vel), 2, Rel.EQ, "vel_dim", self.name) - validator.check_int(len(crd), 2, Rel.EQ, "crd_dim", self.name) - validator.check_int(len(frc), 2, Rel.EQ, "frc_dim", self.name) - validator.check_int(len(acc), 2, Rel.EQ, "acc_dim", self.name) - validator.check_int(vel[0], n, Rel.EQ, "vel_shape[0]", self.name) - validator.check_int(vel[1], 3, Rel.EQ, "vel_shape[1]", self.name) - validator.check_int(crd[0], n, Rel.EQ, "crd_shape[0]", self.name) - validator.check_int(crd[1], 3, Rel.EQ, "crd_shape[1]", self.name) - validator.check_int(frc[0], n, Rel.EQ, "frc_shape[0]", self.name) - validator.check_int(frc[1], 3, Rel.EQ, "frc_shape[1]", self.name) - validator.check_int(acc[0], n, Rel.EQ, "acc_shape[0]", self.name) - validator.check_int(acc[1], 3, Rel.EQ, "acc_shape[1]", self.name) - validator.check_int(rand_frc[0], n, Rel.EQ, "rand_frc_shape[0]", self.name) - validator.check_int(rand_frc[1], 3, Rel.EQ, "rand_frc_shape[1]", self.name) - validator.check_int(rand_state[0], math.ceil(self.atom_numbers * 3 / 4.0) * 16, Rel.EQ, "rand_state", self.name) - return [self.atom_numbers, 3] - - def infer_dtype(self, inverse_mass, sqrt_mass_inverse, vel, crd, frc, acc, rand_state, rand_frc): - validator.check_tensor_dtype_valid('inverse_mass', inverse_mass, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('sqrt_mass_inverse', sqrt_mass_inverse, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('vel', vel, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('crd', crd, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('frc', frc, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('acc', acc, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('rand_frc', rand_frc, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('rand_state', rand_state, [mstype.float32], self.name) - return mstype.float32 - - -class CrdToUintCrd(PrimitiveWithInfer): - """ - Convert FP32 coordinate to Uint32 coordinate. - - Because there is a large amount of inputs and each of them are related, - there is no way to construct `Examples` using random methods. For details, refer the webpage `SPONGE in MindSpore - `_. - - Args: - atom_numbers(int32): the number of atoms n. - - Inputs: - - **crd_to_uint_crd_cof** (Tensor) - The scale factor - between the unsigned int value and the real space coordinates. - The data type is float32 and the shape is :math:`(3,)`. - - **crd** (Tensor) - The coordinate of each atom. - The data type is float32 and the shape is :math:`(n, 3)`. - - Outputs: - - **output** (Scalar) - The data type is uint32. - - Supported Platforms: - ``GPU`` - """ - - @prim_attr_register - def __init__(self, atom_numbers): - """Initialize CrdToUintCrd.""" - self.atom_numbers = atom_numbers - self.add_prim_attr('atom_numbers', self.atom_numbers) - self.init_prim_io_names( - inputs=['crd_to_uint_crd_cof', 'crd'], - outputs=['output']) - - def infer_shape(self, crd_to_uint_crd_cof, crd): - validator.check_int(crd_to_uint_crd_cof[0], 3, Rel.EQ, "crd_to_uint_crd_cof_shape", self.name) - validator.check_int(crd[0], self.atom_numbers, Rel.EQ, "crd[0]", self.name) - validator.check_int(crd[1], 3, Rel.EQ, "crd[1]", self.name) - return crd - - def infer_dtype(self, crd_to_uint_crd_cof, crd): - validator.check_tensor_dtype_valid('crd_to_uint_crd_cof', crd_to_uint_crd_cof, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('crd', crd, [mstype.float32], self.name) - return mstype.uint32 - - -class MDIterationSetupRandState(PrimitiveWithInfer): - """ - Compute the random state of the iteration. - - Because there is a large amount of inputs and each of them are related, - there is no way to construct `Examples` using random methods. For details, refer the webpage `SPONGE in MindSpore - `_. - - Args: - atom_numbers(int32): the number of atoms n. - seed(int32): random seed. - - Outputs: - - **output** (Tensor) random state. - The data type is float32 and the shape is :math:`(ceil(n * 3 / 4),)`. - - Supported Platforms: - ``GPU`` - """ - - @prim_attr_register - def __init__(self, atom_numbers, seed): - """Initialize MDIterationSetupRandState.""" - self.atom_numbers = atom_numbers - self.seed = seed - self.add_prim_attr('atom_numbers', self.atom_numbers) - self.add_prim_attr('seed', self.seed) - self.init_prim_io_names( - inputs=[], - outputs=['output']) - - def infer_shape(self): - float4_numbers = math.ceil(self.atom_numbers * 3 / 4.0) - curandsize = 64 / 4 - return [float4_numbers * int(curandsize),] - - def infer_dtype(self): - return mstype.float32 - - -class TransferCrd(PrimitiveWithInfer): - """ - Transfer the coordinates to angular and radial. - - Because there is a large amount of inputs and each of them are related, - there is no way to construct `Examples` using random methods. For details, refer the webpage `SPONGE in MindSpore - `_. - - Args: - start_serial(int32): the index start position. - end_serial(int32): the index end position. - number(int32): the length of angular and radial. - - Inputs: - - **crd** (Tensor) - The coordinate of each atom. - n is the number of atoms. The data type is float32 and the shape is :math:`(n, 3)`. - - **old_crd** (Tensor) - The last coordinate of each atom. - n is the number of atoms. The data type is float32 and the shape is :math:`(n, 3)`. - - **box** (Tensor) - The length of 3 dimensions of the simulation box. - The data type is float32 and the shape is :math:`(3,)`. - - Outputs: - - **radial** (Tensor) - The array of radial transferred from coordinates. - The data type is float32 and the shape is :math:`(number,)`. - - **angular** (Tensor) - The array of angular transferred from coordinates. - The data type is float32 and the shape is :math:`(number,)`. - - **nowarp_crd** (Tensor) - The modified coordinate of each atom for - computing radial and angular. The data type is float32 and the shape is :math:`(n, 3)`. - - **box_map_times** (Tensor) - The box map times for radial and angular. - The data type is int32 and the shape is :math:`(n, 3)`. - - Supported Platforms: - ``GPU`` - """ - - @prim_attr_register - def __init__(self, start_serial, end_serial, number, atom_numbers): - """Initialize TransferCrd.""" - validator.check_value_type('start_serial', start_serial, int, self.name) - validator.check_value_type('end_serial', end_serial, int, self.name) - validator.check_value_type('number', number, int, self.name) - validator.check_value_type('atom_numbers', atom_numbers, int, self.name) - self.start_serial = start_serial - self.end_serial = end_serial - self.number = number - self.atom_numbers = atom_numbers - self.add_prim_attr('start_serial', self.start_serial) - self.add_prim_attr('end_serial', self.end_serial) - self.add_prim_attr('number', self.number) - self.add_prim_attr('atom_numbers', self.atom_numbers) - self.init_prim_io_names( - inputs=['crd', 'old_crd', 'box'], - outputs=['radial', 'angular', 'nowarp_crd', 'box_map_times']) - - def infer_shape(self, crd_shape, old_crd_shape, box_shape): - n = self.atom_numbers - validator.check_int(len(crd_shape), 2, Rel.EQ, "crd_dim", self.name) - validator.check_int(crd_shape[0], n, Rel.EQ, "crd_shape[0]", self.name) - validator.check_int(crd_shape[1], 3, Rel.EQ, "crd_shape[1]", self.name) - validator.check_int(len(old_crd_shape), 2, Rel.EQ, "old_crd_dim", self.name) - validator.check_int(old_crd_shape[0], n, Rel.EQ, "old_crd_shape[0]", self.name) - validator.check_int(old_crd_shape[1], 3, Rel.EQ, "old_crd_shape[1]", self.name) - validator.check_int(len(box_shape), 1, Rel.EQ, "box_dim", self.name) - validator.check_int(box_shape[0], 3, Rel.EQ, "box_shape[0]", self.name) - return [self.number,], [self.number,], [n, 3], [n, 3] - - def infer_dtype(self, crd_dtype, old_crd_dtype, box_dtype): - validator.check_tensor_dtype_valid('crd', crd_dtype, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('old_crd', old_crd_dtype, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('box', box_dtype, [mstype.float32], self.name) - return mstype.float32, mstype.float32, mstype.float32, mstype.int32 - - -class PMEBatchedFFT2D(PrimitiveWithInfer): - """ - Forward FFT with N-Dimensional Input. currently this is only batched ifft2. - - .. warning:: - This is an experimental prototype that is subject to change and/or deletion. - - Inputs: - - **input_tensor** (Tensor) - Three dimentsional tensor, supported - data type is complex64. - - Outputs: - - **output_tensor** (Tensor) - The tensor after undergoing fast Fourier - transform, the data type is complex64. - - Supported Platforms: - ``GPU`` - """ - - @prim_attr_register - def __init__(self, direction): - self.init_prim_io_names( - inputs=['input_tensor'], - outputs=['output_tensor']) - - validator.check_value_type('direction', direction, int, self.name) - self.direction = direction - self.add_prim_attr('direction', self.direction) - - def infer_shape(self, input_shape): - self.add_prim_attr("fftx", input_shape[0]) - self.add_prim_attr("ffty", input_shape[1]) - self.add_prim_attr("fftz", input_shape[2]) - return [input_shape[0], input_shape[1], input_shape[2]] - - def infer_dtype(self, input_dtype): - validator.check_tensor_dtype_valid('input_tensor', input_dtype, [mstype.complex64], self.name) - return mstype.complex64 - - -class PMEFFT1D(PrimitiveWithInfer): - """ - Forward FFT with One-Dimensional Input. - - .. warning:: - This is an experimental prototype that is subject to change and/or deletion. - - Inputs: - - **input_tensor** (Tensor) - Three dimentsional tensor, supported - data type is complex64. - - Outputs: - - **output_tensor** (Tensor) - The tensor after undergoing fast Fourier - transform, the data type is complex64. - - Supported Platforms: - ``GPU`` - """ - - @prim_attr_register - def __init__(self): - self.init_prim_io_names( - inputs=['input_tensor'], - outputs=['output_tensor']) - - def infer_shape(self, input_shape): - self.add_prim_attr('fftx', input_shape[0]) - return [input_shape[0]] - - def infer_dtype(self, input_dtype): - validator.check_tensor_dtype_valid('input_tensor', input_dtype, [mstype.complex64], self.name) - return mstype.complex64 - - -class PMEIFFT1D(PrimitiveWithInfer): - """ - Inverse FFT with One-Dimensional Input. - - .. warning:: - This is an experimental prototype that is subject to change and/or deletion. - - Inputs: - - **input_tensor** (Tensor) - Three dimentsional input tensor, supported data - type is complex64. - - Outputs: - - **output_tensor** (Tensor) - Returns the tensor after undergoing - inverse Fourier transform, the data type is complex64. - - Supported Platforms: - ``GPU`` - """ - - @prim_attr_register - def __init__(self): - self.init_prim_io_names( - inputs=['input_tensor'], - outputs=['output_tensor']) - - def infer_shape(self, input_shape): - self.add_prim_attr('fftx', input_shape[0]) - return [input_shape[0]] - - def infer_dtype(self, input_dtype): - validator.check_tensor_dtype_valid('input_tensor', input_dtype, [mstype.complex64], self.name) - return mstype.complex64 - - -class PMEFFT2D(PrimitiveWithInfer): - """ - Forward FFT with Two-Dimensional Input. - - .. warning:: - This is an experimental prototype that is subject to change and/or deletion. - - Inputs: - - **input_tensor** (Tensor) - Three dimentsional tensor, supported - data type is complex64. - - Outputs: - - **output_tensor** (Tensor) - The tensor after undergoing fast Fourier - transform, the data type is complex64. - - Supported Platforms: - ``GPU`` - """ - - @prim_attr_register - def __init__(self): - self.init_prim_io_names( - inputs=['input_tensor'], - outputs=['output_tensor']) - - def infer_shape(self, input_shape): - self.add_prim_attr('fftx', input_shape[0]) - self.add_prim_attr('ffty', input_shape[1]) - return [input_shape[0], input_shape[1]] - - def infer_dtype(self, input_dtype): - validator.check_tensor_dtype_valid('input_tensor', input_dtype, [mstype.complex64], self.name) - return mstype.complex64 - - -class PMEIFFT2D(PrimitiveWithInfer): - """ - Inverse FFT with Two-Dimensional Input. - - .. warning:: - This is an experimental prototype that is subject to change and/or deletion. - - Inputs: - - **input_tensor** (Tensor) - Three dimentsional input tensor, supported data - type is complex64. - - Outputs: - - **output_tensor** (Tensor) - Return the tensor after undergoing - inverse Fourier transform, the data type is complex64. - - Supported Platforms: - ``GPU`` - """ - - @prim_attr_register - def __init__(self): - self.init_prim_io_names( - inputs=['input_tensor'], - outputs=['output_tensor']) - - def infer_shape(self, input_shape): - self.add_prim_attr('fftx', input_shape[0]) - self.add_prim_attr('ffty', input_shape[1]) - return [input_shape[0], input_shape[1]] - - def infer_dtype(self, input_dtype): - validator.check_tensor_dtype_valid('input_tensor', input_dtype, [mstype.complex64], self.name) - return mstype.complex64 - - -class PMERFFT2D(PrimitiveWithInfer): - """ - Forward FFT with Two-Dimensional Input for real -> complex. - - .. warning:: - This is an experimental prototype that is subject to change and/or deletion. - - Inputs: - - **input_tensor** (Tensor) - Three dimentsional tensor, supported - data type is float32. - - Outputs: - - **output_tensor** (Tensor) - The tensor after undergoing fast Fourier - transform, the data type is complex64. - - Supported Platforms: - ``GPU`` - """ - - @prim_attr_register - def __init__(self): - self.init_prim_io_names( - inputs=['input_tensor'], - outputs=['output_tensor']) - - def infer_shape(self, input_shape): - self.add_prim_attr('fftx', input_shape[0]) - self.add_prim_attr('ffty', input_shape[1]) - return [input_shape[0], int(input_shape[1]/2)+1] - - def infer_dtype(self, input_dtype): - validator.check_tensor_dtype_valid('input_tensor', input_dtype, [mstype.float32], self.name) - return mstype.complex64 - - -class PMEIRFFT2D(PrimitiveWithInfer): - """ - Inverse RFFT with Two-Dimensional Input. - - .. warning:: - This is an experimental prototype that is subject to change and/or deletion. - - Inputs: - - **input_tensor** (Tensor) - Three dimentsional input tensor, supported data - type is complex64. - - Outputs: - - **output_tensor** (Tensor) - Return the tensor after undergoing - inverse Fourier transform, the data type is float32. - - Supported Platforms: - ``GPU`` - """ - - @prim_attr_register - def __init__(self): - self.init_prim_io_names( - inputs=['input_tensor'], - outputs=['output_tensor']) - - def infer_shape(self, input_shape): - self.add_prim_attr('fftx', input_shape[0]) - self.add_prim_attr('ffty', input_shape[1]) - return [input_shape[0], 2*(input_shape[1]-1)] - - def infer_dtype(self, input_dtype): - validator.check_tensor_dtype_valid('input_tensor', input_dtype, [mstype.complex64], self.name) - return mstype.float32 - - -class FFT3D(PrimitiveWithInfer): - """ - Forward FFT with Three-Dimensional Input. - - .. warning:: - This is an experimental prototype that is subject to change and/or deletion. - - Inputs: - - **input_tensor** (Tensor) - Three dimensional tensor, supported - data type is float32. - - Outputs: - - **output_tensor** (Tensor) - The tensor after undergoing fast Fourier - transform, the data type is complex64. - - Supported Platforms: - ``GPU`` - """ - - @prim_attr_register - def __init__(self): - self.init_prim_io_names( - inputs=['input_tensor'], - outputs=['output_tensor']) - - def infer_shape(self, input_shape): - self.add_prim_attr('fftx', input_shape[0]) - self.add_prim_attr('ffty', input_shape[1]) - self.add_prim_attr('fftz', input_shape[2]) - return [input_shape[0], input_shape[1], int(input_shape[2]/2)+1] - - def infer_dtype(self, input_dtype): - validator.check_tensor_dtype_valid('input_tensor', input_dtype, [mstype.float32], self.name) - return mstype.complex64 - - -class IFFT3D(PrimitiveWithInfer): - """ - Inverse FFT with Three-Dimensional Input. - - .. warning:: - This is an experimental prototype that is subject to change and/or deletion. - - Inputs: - - **input_tensor** (Tensor) - Three dimensional input tensor, supported data - type is complex64. - - Outputs: - - **output_tensor** (Tensor) - Returns the tensor after undergoing - inverse Fourier transform, the data type is float32. - - Supported Platforms: - ``GPU`` - """ - - @prim_attr_register - def __init__(self): - self.init_prim_io_names( - inputs=['input_tensor'], - outputs=['output_tensor']) - - def infer_shape(self, input_shape): - self.add_prim_attr('fftx', input_shape[0]) - self.add_prim_attr('ffty', input_shape[1]) - self.add_prim_attr('fftz', input_shape[2]) - return [input_shape[0], input_shape[1], (input_shape[2]-1)*2] - - def infer_dtype(self, input_dtype): - validator.check_tensor_dtype_valid('input_tensor', input_dtype, [mstype.complex64], self.name) - return mstype.float32 - - -class NeighborListUpdate(PrimitiveWithInfer): - """ - Update (or construct if first time) the Verlet neighbor list for the - calculation of short-ranged force. Assume the number of atoms is N, - the number of grids divided is G, the maximum number of atoms in one - grid is m, the maximum number of atoms in single atom's neighbor list - is L, and the number of total atom in excluded list is E. - - Args: - grid_numbers (int32): the total number of grids divided G. - atom_numbers (int32): the number of atoms n. - not_first_time (int32): whether to construct the neighbor list first time or not. - nxy (int32): the total number of grids divided in xy plane. - excluded_atom_numbers (int32): the total atom numbers in the excluded list E. - cutoff_square (float32): the cutoff square distance for short-range force calculation. - half_skin_square (float32): the maximum square value of the distance atom allowed to move between two updates. - cutoff_with_skin (float32): cutoff + skin, indicates the radius of the neighbor list for each atom. - half_cutoff_with_skin (float32): cutoff_with_skin/2. - cutoff_with_skin_square (float32): the square value of cutoff_with_skin. - refresh_interval (int32): the number of iteration steps between two updates of neighbor list. Default: 20. - cutoff (float32): the cutoff distance for short-range force calculation. Default: 10.0. - skin (float32): the maximum value of the distance atom allowed to move. Default: 2.0. - max_atom_in_grid_numbers (int32): the maximum number of atoms in one grid k. Default: 64. - max_neighbor_numbers (int32): The maximum number of neighbors m. Default: 800. - - Inputs: - - **atom_numbers_in_grid_bucket** (Tensor) - The number of atoms in each grid bucket. - The data type is int32 and the shape is :math:`(G,)`. - - **bucket** (Tensor) - (Tensor) - The atom indices in each grid bucket. - The data type is int32 and the shape is :math:`(G, k)`. - - **crd** (Tensor) - The coordinates of each atom. - The data type is float32 and the shape is :math:`(n, 3)`. - - **box_length** (Tensor) - The box length of the simulation box. - The data type is float32 and the shape is :math:`(3,)`. - - **grid_N** (Tensor) - The number of grids divided of 3 dimensions of the simulation box. - The data type is int32 and the shape is :math:`(3,)`. - - **grid_length_inverse** (Tensor) - The inverse value of grid length. - The data type is float32 and the shape is :math:`(3,)`. - - **atom_in_grid_serial** (Tensor) - The grid index for each atom. - The data type is int32 and the shape is :math:`(n,)`. - - **old_crd** (Tensor) - The coordinates before update of each atom. - The data type is float32 and the shape is :math:`(n, 3)`. - - **crd_to_uint_crd_cof** (Tensor) - The scale factor between the unsigned int coordinate and the real one. - The data type is float32 and the shape is :math:`(3,)`. - - **uint_crd** (Tensor) - The unsigned int coordinates value fo each atom. - The data type is unsigned int32 and the shape is :math:`(n, 3)`. - - **gpointer** (Tensor) - The nearest neighbor grids (including self) of each grid. - The data type is int32 and the shape is :math:`(G, 125)`. - - **nl_atom_numbers** (Tensor) - The number of atoms in neighbor list of each atom. - The data type is int32 and the shape is :math:`(n,)`. - - **nl_atom_serial** (Tensor) - The indices of atoms in neighbor list of each atom. - The data type is int32 and the shape is :math:`(n, m)`. - - **uint_dr_to_dr_cof** (Tensor) - The scale factor. - The data type is float32 and the shape is :math:`(3,)`. - - **excluded_list_start** (Tensor) - The start excluded index in excluded list for each atom. - The data type is int32 and the shape is :math:`(n,)`. - - **excluded_list** (Tensor) - The contiguous join of excluded list of each atom. - The data type is int32 and the shape is :math:`(E,)`. - - **excluded_numbers** (Tensor) - The number of atom excluded in excluded list for each atom. - The data type is int32 and the shape is :math:`(n,)`. - - **need_refresh_flag** (Tensor) - Whether the neighbor list of each atom need update or not. - The data type is int32 and the shape is :math:`(1,)`. - - **refresh_count** (Tensor) - Count how many iteration steps have passed since last update. - The data type is int32 and the shape is :math:`(1,)` or :math:`()`. - - Outputs: - - **res** (Tensor) - The return value after updating successfully. - The data type is float32 and the shape is :math:`(1,)`. - - Supported Platforms: - ``GPU`` - """ - - @prim_attr_register - def __init__(self, grid_numbers, atom_numbers, not_first_time, nxy, excluded_atom_numbers, - cutoff_square, half_skin_square, cutoff_with_skin, half_cutoff_with_skin, cutoff_with_skin_square, - refresh_interval=20, cutoff=10.0, skin=2.0, max_atom_in_grid_numbers=64, max_neighbor_numbers=800): - self.grid_numbers = grid_numbers - self.atom_numbers = atom_numbers - self.refresh_interval = refresh_interval - self.not_first_time = not_first_time - self.cutoff = cutoff - self.skin = skin - self.max_atom_in_grid_numbers = max_atom_in_grid_numbers - self.nxy = nxy - self.excluded_atom_numbers = excluded_atom_numbers - self.cutoff_square = cutoff_square - self.half_skin_square = half_skin_square - self.cutoff_with_skin = cutoff_with_skin - self.half_cutoff_with_skin = half_cutoff_with_skin - self.cutoff_with_skin_square = cutoff_with_skin_square - self.max_neighbor_numbers = max_neighbor_numbers - validator.check_value_type('grid_numbers', grid_numbers, int, self.name) - validator.check_value_type('atom_numbers', atom_numbers, int, self.name) - validator.check_value_type('refresh_interval', refresh_interval, int, self.name) - validator.check_value_type('not_first_time', not_first_time, int, self.name) - validator.check_value_type('cutoff', cutoff, float, self.name) - validator.check_value_type('skin', skin, float, self.name) - validator.check_value_type('max_atom_in_grid_numbers', max_atom_in_grid_numbers, int, self.name) - validator.check_value_type('nxy', nxy, int, self.name) - validator.check_value_type('excluded_atom_numbers', excluded_atom_numbers, int, self.name) - validator.check_value_type('cutoff_square', cutoff_square, float, self.name) - validator.check_value_type('half_skin_square', half_skin_square, float, self.name) - validator.check_value_type('cutoff_with_skin', cutoff_with_skin, float, self.name) - validator.check_value_type('half_cutoff_with_skin', half_cutoff_with_skin, float, self.name) - validator.check_value_type('cutoff_with_skin_square', cutoff_with_skin_square, float, self.name) - validator.check_value_type('max_neighbor_numbers', max_neighbor_numbers, int, self.name) - self.init_prim_io_names( - inputs=['atom_numbers_in_grid_bucket', 'bucket', 'crd', 'box_length', 'grid_N', 'grid_length_inverse', - 'atom_in_grid_serial', 'old_crd', 'crd_to_uint_crd_cof', 'uint_crd', 'gpointer', 'nl_atom_numbers', - 'nl_atom_serial', 'uint_dr_to_dr_cof', 'excluded_list_start', 'excluded_list', 'excluded_numbers', - 'need_refresh_flag', 'refresh_count'], outputs=['res']) - - self.add_prim_attr('grid_numbers', self.grid_numbers) - self.add_prim_attr('atom_numbers', self.atom_numbers) - self.add_prim_attr('refresh_interval', self.refresh_interval) - self.add_prim_attr('not_first_time', self.not_first_time) - self.add_prim_attr('cutoff', self.cutoff) - self.add_prim_attr('skin', self.skin) - self.add_prim_attr('max_atom_in_grid_numbers', self.max_atom_in_grid_numbers) - self.add_prim_attr('nxy', self.nxy) - self.add_prim_attr('excluded_atom_numbers', self.excluded_atom_numbers) - self.add_prim_attr('cutoff_square', self.cutoff_square) - self.add_prim_attr('half_skin_square', self.half_skin_square) - self.add_prim_attr('cutoff_with_skin', self.cutoff_with_skin) - self.add_prim_attr('half_cutoff_with_skin', self.half_cutoff_with_skin) - self.add_prim_attr('cutoff_with_skin_square', self.cutoff_with_skin_square) - self.add_prim_attr('side_effect_mem', True) - - def infer_shape(self, atom_numbers_in_grid_bucket_shape, bucket_shape, crd_shape, box_length_shape, grid_n_shape, - grid_length_inverse_shape, atom_in_grid_serial_shape, old_crd_shape, crd_to_uint_crd_cof_shape, - uint_crd_shape, gpointer_shape, nl_atom_numbers_shape, nl_atom_serial_shape, - uint_dr_to_dr_cof_shape, excluded_list_start_shape, excluded_list_shape, excluded_numbers_shape, - need_refresh_flag_shape, refresh_count_shape): - validator.check_int(len(atom_numbers_in_grid_bucket_shape), 1, Rel.EQ, - "atom_numbers_in_grid_bucket_dim", self.name) - validator.check_int(len(bucket_shape), 2, Rel.EQ, "bucket_dim", self.name) - validator.check_int(len(crd_shape), 2, Rel.EQ, "crd_dim", self.name) - validator.check_int(len(box_length_shape), 1, Rel.EQ, "box_length_dim", self.name) - validator.check_int(len(grid_n_shape), 1, Rel.EQ, "grid_n_dim", self.name) - validator.check_int(len(grid_length_inverse_shape), 1, Rel.EQ, "grid_length_inverse_dim", self.name) - validator.check_int(len(atom_in_grid_serial_shape), 1, Rel.EQ, "atom_in_grid_serial_dim", self.name) - validator.check_int(len(old_crd_shape), 2, Rel.EQ, "old_crd_dim", self.name) - validator.check_int(len(crd_to_uint_crd_cof_shape), 1, Rel.EQ, "crd_to_uint_crd_cof_dim", self.name) - validator.check_int(len(uint_crd_shape), 2, Rel.EQ, "uint_crd_dim", self.name) - validator.check_int(len(gpointer_shape), 2, Rel.EQ, "gpointer_dim", self.name) - validator.check_int(len(nl_atom_numbers_shape), 1, Rel.EQ, "nl_atom_numbers_dim", self.name) - validator.check_int(len(nl_atom_serial_shape), 2, Rel.EQ, "nl_atom_serial_dim", self.name) - validator.check_int(len(uint_dr_to_dr_cof_shape), 1, Rel.EQ, "uint_dr_to_dr_cof_dim", self.name) - validator.check_int(len(excluded_list_start_shape), 1, Rel.EQ, "excluded_list_start_dim", self.name) - validator.check_int(len(excluded_list_shape), 1, Rel.EQ, "excluded_list_dim", self.name) - validator.check_int(len(excluded_numbers_shape), 1, Rel.EQ, "excluded_numbers_dim", self.name) - validator.check_int(len(need_refresh_flag_shape), 1, Rel.EQ, "need_refresh_flag_dim", self.name) - validator.check_int(len(refresh_count_shape), 1, Rel.LE, "refresh_count_dim", self.name) - validator.check_int(atom_numbers_in_grid_bucket_shape[0], self.grid_numbers, Rel.EQ, - "atom_numbers_in_grid_bucket", self.name) - validator.check_int(bucket_shape[0], self.grid_numbers, Rel.EQ, "bucket", self.name) - validator.check_int(bucket_shape[1], self.max_atom_in_grid_numbers, Rel.EQ, "bucket", self.name) - validator.check_int(crd_shape[0], self.atom_numbers, Rel.EQ, "crd", self.name) - validator.check_int(crd_shape[1], 3, Rel.EQ, "crd", self.name) - validator.check_int(box_length_shape[0], 3, Rel.EQ, "box_length", self.name) - validator.check_int(grid_n_shape[0], 3, Rel.EQ, "grid_N", self.name) - validator.check_int(grid_length_inverse_shape[0], 3, Rel.EQ, "grid_length_inverse", self.name) - validator.check_int(atom_in_grid_serial_shape[0], self.atom_numbers, Rel.EQ, "atom_in_grid_serial", - self.name) - validator.check_int(old_crd_shape[0], self.atom_numbers, Rel.EQ, "old_crd", self.name) - validator.check_int(old_crd_shape[1], 3, Rel.EQ, "old_crd", self.name) - validator.check_int(crd_to_uint_crd_cof_shape[0], 3, Rel.EQ, "crd_to_uint_crd_cof", self.name) - validator.check_int(uint_crd_shape[0], self.atom_numbers, Rel.EQ, "uint_crd", self.name) - validator.check_int(uint_crd_shape[1], 3, Rel.EQ, "uint_crd", self.name) - validator.check_int(gpointer_shape[0], self.grid_numbers, Rel.EQ, "gpointer", self.name) - validator.check_int(gpointer_shape[1], 125, Rel.EQ, "gpointer", self.name) - validator.check_int(nl_atom_numbers_shape[0], self.atom_numbers, Rel.EQ, "nl_atom_numbers", self.name) - validator.check_int(nl_atom_serial_shape[0], self.atom_numbers, Rel.EQ, "nl_atom_serial", self.name) - validator.check_int(nl_atom_serial_shape[1], self.max_neighbor_numbers, Rel.EQ, "nl_atom_serial", - self.name) - validator.check_int(uint_dr_to_dr_cof_shape[0], 3, Rel.EQ, "uint_dr_to_dr_cof", self.name) - validator.check_int(excluded_list_start_shape[0], self.atom_numbers, Rel.EQ, "excluded_list_start", - self.name) - validator.check_int(excluded_list_shape[0], self.excluded_atom_numbers, Rel.EQ, "excluded_list", - self.name) - validator.check_int(excluded_numbers_shape[0], self.atom_numbers, Rel.EQ, "excluded_numbers", self.name) - validator.check_int(need_refresh_flag_shape[0], 1, Rel.EQ, "need_refresh_flag", self.name) - if refresh_count_shape: - validator.check_int(refresh_count_shape[0], 1, Rel.EQ, "refresh_count_shape", self.name) - return [1,] - - def infer_dtype(self, atom_numbers_in_grid_bucket_dtype, bucket_dtype, crd_dtype, box_length_dtype, grid_n_dtype, - grid_length_inverse_dtype, atom_in_grid_serial_dtype, old_crd_dtype, crd_to_uint_crd_cof_dtype, - uint_crd_dtype, gpointer_dtype, nl_atom_numbers_dtype, nl_atom_serial_dtype, - uint_dr_to_dr_cof_dtype, excluded_list_start_dtype, excluded_list_dtype, excluded_numbers_dtype, - need_refresh_flag_dtype, refresh_count_dtype): - validator.check_tensor_dtype_valid('atom_numbers_in_grid_bucket', atom_numbers_in_grid_bucket_dtype, - [mstype.int32], self.name) - validator.check_tensor_dtype_valid('bucket', bucket_dtype, [mstype.int32], self.name) - validator.check_tensor_dtype_valid('crd', crd_dtype, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('box_length', box_length_dtype, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('grid_N', grid_n_dtype, [mstype.int32], self.name) - validator.check_tensor_dtype_valid('grid_length_inverse', grid_length_inverse_dtype, [mstype.float32], - self.name) - validator.check_tensor_dtype_valid('atom_in_grid_serial', atom_in_grid_serial_dtype, [mstype.int32], - self.name) - validator.check_tensor_dtype_valid('old_crd', old_crd_dtype, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('crd_to_uint_crd_cof', crd_to_uint_crd_cof_dtype, [mstype.float32], - self.name) - validator.check_tensor_dtype_valid('uint_crd', uint_crd_dtype, [mstype.uint32], self.name) - validator.check_tensor_dtype_valid('gpointer', gpointer_dtype, [mstype.int32], self.name) - validator.check_tensor_dtype_valid('nl_atom_numbers', nl_atom_numbers_dtype, [mstype.int32], self.name) - validator.check_tensor_dtype_valid('nl_atom_serial', nl_atom_serial_dtype, [mstype.int32], self.name) - validator.check_tensor_dtype_valid('uint_dr_to_dr_cof', uint_dr_to_dr_cof_dtype, [mstype.float32], - self.name) - validator.check_tensor_dtype_valid('excluded_list_start', excluded_list_start_dtype, [mstype.int32], - self.name) - validator.check_tensor_dtype_valid('excluded_list', excluded_list_dtype, [mstype.int32], self.name) - validator.check_tensor_dtype_valid('excluded_numbers', excluded_numbers_dtype, [mstype.int32], self.name) - validator.check_tensor_dtype_valid('need_refresh_flag', need_refresh_flag_dtype, [mstype.int32], - self.name) - validator.check_tensor_dtype_valid('refresh_count', refresh_count_dtype, [mstype.int32], - self.name) - return mstype.float32 diff --git a/mindspore/python/mindspore/ops/operations/sponge_update_ops.py b/mindspore/python/mindspore/ops/operations/sponge_update_ops.py deleted file mode 100644 index c82762b4991..00000000000 --- a/mindspore/python/mindspore/ops/operations/sponge_update_ops.py +++ /dev/null @@ -1,2546 +0,0 @@ -# Copyright 2021 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ - -""" -Note: - SPONGE operators for new modules. This is an experimental interface that is subject to change and/or deletion. -""" -import math -from ..primitive import PrimitiveWithInfer, prim_attr_register -from ..._checkparam import Rel -from ..._checkparam import Validator as validator -from ...common import dtype as mstype - - -class RefreshUintCrd(PrimitiveWithInfer): - """ - Refresh the unsigned coordinate of each constrained atom in each constrain iteration. - - .. warning:: - This is an experimental prototype that is subject to change and/or deletion. - - Args: - atom_numbers (int32): the number of atoms n. - half_exp_gamma_plus_half (float32): constant value (1.0 + exp(gamma * dt)) if Langvin-Liu thermostat is used, - where gamma is friction coefficient and dt is the simulation time step, 1.0 otherwise. - - Inputs: - - **crd** (Tensor) - The coordinate of each atom. - The data type is float32 and the shape is :math:`(n, 3)`. - - **quarter_cof** (Tensor) - The 3-D scale factor. - The data type is float32 and the shape is :math:`(3,)`. - - **test_frc** (Tensor) - The constraint force. - The data type is float32 and the shape is :math:`(n, 3)`. - - **mass_inverse** (Tensor) - The inverse value of mass of each atom. - The data type is float32 and the shape is :math:`(n,)`. - - Outputs: - - **uint_crd** (Tensor) - The unsigned int coordinate value of each atom. - The data type is uint32 and the shape is :math:`(n, 3)`. - - Supported Platforms: - ``GPU`` - """ - - @prim_attr_register - def __init__(self, atom_numbers, half_exp_gamma_plus_half): - validator.check_value_type('atom_numbers', atom_numbers, int, self.name) - validator.check_value_type('half_exp_gamma_plus_half', half_exp_gamma_plus_half, float, self.name) - self.atom_numbers = atom_numbers - self.half_exp_gamma_plus_half = half_exp_gamma_plus_half - self.add_prim_attr('atom_numbers', self.atom_numbers) - self.add_prim_attr('half_exp_gamma_plus_half', self.half_exp_gamma_plus_half) - self.init_prim_io_names( - inputs=['crd', 'quarter_cof', 'test_frc', 'mass_inverse'], - outputs=['uint_crd']) - - def infer_shape(self, crd_shape, quarter_cof_shape, test_frc_shape, mass_inverse_shape): - cls_name = self.name - n = self.atom_numbers - validator.check_int(len(crd_shape), 2, Rel.EQ, "crd_dim", cls_name) - validator.check_int(len(quarter_cof_shape), 1, Rel.EQ, "quarter_cof_dim", cls_name) - validator.check_int(len(test_frc_shape), 2, Rel.EQ, "test_frc_dim", cls_name) - validator.check_int(len(mass_inverse_shape), 1, Rel.EQ, "mass_inverse_dim", cls_name) - - validator.check_int(crd_shape[0], n, Rel.EQ, "crd_shape[0]", cls_name) - validator.check_int(crd_shape[1], 3, Rel.EQ, "crd_shape[1]", cls_name) - validator.check_int(quarter_cof_shape[0], 3, Rel.EQ, "quarter_cof_shape", cls_name) - validator.check_int(test_frc_shape[0], n, Rel.EQ, "test_frc_shape[0]", cls_name) - validator.check_int(test_frc_shape[1], 3, Rel.EQ, "test_frc_shape[1]", cls_name) - validator.check_int(mass_inverse_shape[0], n, Rel.EQ, "mass_inverse_shape", cls_name) - - return [n, 3] - - def infer_dtype(self, crd_dtype, quarter_cof_dtype, test_frc_dtype, mass_inverse_dtype): - validator.check_tensor_dtype_valid('crd', crd_dtype, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('quarter_cof', quarter_cof_dtype, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('test_frc', test_frc_dtype, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('mass_inverse', mass_inverse_dtype, [mstype.float32], self.name) - return mstype.uint32 - - -class ConstrainForceCycleWithVirial(PrimitiveWithInfer): - """ - Calculate the constraint force and virial in each iteration. - - .. warning:: - This is an experimental prototype that is subject to change and/or deletion. - - Args: - atom_numbers (int32): the number of atoms n. - constrain_pair_numbers (int32): the number of constrain pairs m. - - Inputs: - - **uint_crd** (Tensor) - The unsigned int coordinate value of each atom. - The data type is uint32 and the shape is :math:`(n, 3)`. - - **scaler** (Tensor) - The 3-D scale factor (x, y, z), - The data type is float32 and the shape is :math:`(3,)`. - - **pair_dr** (Tensor) - The displacement vector of each constrained atom pair. - The data type is float32 and the shape is :math:`(m, 3)`. - - **atom_i_serials** (Tensor) - The first atom index of each constrained atom pair. - The data type is int32 and the shape is :math:`(m,)`. - - **atom_j_serials** (Tensor) - The second atom index of each constrained atom pair. - The data type is int32 and the shape is :math:`(m,)`. - - **constant_rs** (Tensor) - The constrained distance of each constrained atom pair. - The data type is float32 and the shape is :math:`(m,)`. - - **constrain_ks** (Tensor) - The coefficient of each constrained atom pair. - The data type is float32 and the shape is :math:`(m,)`. - - Outputs: - - **test_frc** (Tensor) - The constraint force. - The data type is float32 and the shape is :math:`(n, 3)`. - - **atom_virial** (Tensor) - The virial caused by constraint force of each atom. - The data type is float32 and the shape is :math:`(m,)`. - - Supported Platforms: - ``GPU`` - """ - - @prim_attr_register - def __init__(self, atom_numbers, constrain_pair_numbers): - validator.check_value_type('atom_numbers', atom_numbers, int, self.name) - validator.check_value_type('constrain_pair_numbers', constrain_pair_numbers, int, self.name) - self.atom_numbers = atom_numbers - self.constrain_pair_numbers = constrain_pair_numbers - self.add_prim_attr('atom_numbers', self.atom_numbers) - self.add_prim_attr('constrain_pair_numbers', self.constrain_pair_numbers) - self.init_prim_io_names( - inputs=['uint_crd', 'scaler', 'pair_dr', 'atom_i_serials', 'atom_j_serials', - 'constant_rs', 'constrain_ks'], - outputs=['test_frc', 'atom_virial']) - - def infer_shape(self, uint_crd_shape, scaler_shape, pair_dr_shape, atom_i_serials_shape, - atom_j_serials_shape, constant_rs_shape, constrain_ks_shape): - cls_name = self.name - n = self.atom_numbers - m = self.constrain_pair_numbers - validator.check_int(len(uint_crd_shape), 2, Rel.EQ, "uint_crd_dim", cls_name) - validator.check_int(len(scaler_shape), 1, Rel.EQ, "scaler_dim", cls_name) - validator.check_int(len(pair_dr_shape), 2, Rel.EQ, "pair_dr_dim", cls_name) - validator.check_int(len(atom_i_serials_shape), 1, Rel.EQ, "atom_i_serials_dim", cls_name) - validator.check_int(len(atom_j_serials_shape), 1, Rel.EQ, "atom_j_serials_dim", cls_name) - validator.check_int(len(constant_rs_shape), 1, Rel.EQ, "constant_rs_dim", cls_name) - validator.check_int(len(constrain_ks_shape), 1, Rel.EQ, "constrain_ks_dim", cls_name) - - validator.check_int(uint_crd_shape[0], n, Rel.EQ, "uint_crd_shape[0]", cls_name) - validator.check_int(uint_crd_shape[1], 3, Rel.EQ, "uint_crd_shape[1]", cls_name) - validator.check_int(scaler_shape[0], 3, Rel.EQ, "scaler_shape", cls_name) - validator.check_int(pair_dr_shape[0], m, Rel.EQ, "pair_dr_shape[0]", cls_name) - validator.check_int(pair_dr_shape[1], 3, Rel.EQ, "pair_dr_shape[1]", cls_name) - validator.check_int(atom_i_serials_shape[0], m, Rel.EQ, "atom_i_serials_shape", cls_name) - validator.check_int(atom_j_serials_shape[0], m, Rel.EQ, "atom_j_serials_shape", cls_name) - validator.check_int(constant_rs_shape[0], m, Rel.EQ, "constant_rs_shape", cls_name) - validator.check_int(constrain_ks_shape[0], m, Rel.EQ, "constrain_ks_shape", cls_name) - return [n, 3], [m,] - - def infer_dtype(self, uint_crd_dtype, scaler_dtype, pair_dr_dtype, atom_i_serials_dtype, - atom_j_serials_dtype, constant_rs_dtype, constrain_ks_dtype): - validator.check_tensor_dtype_valid('uint_crd', uint_crd_dtype, [mstype.uint32], self.name) - validator.check_tensor_dtype_valid('scaler', scaler_dtype, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('pair_dr', pair_dr_dtype, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('atom_i_serials', atom_i_serials_dtype, [mstype.int32], self.name) - validator.check_tensor_dtype_valid('atom_j_serials', atom_j_serials_dtype, [mstype.int32], self.name) - validator.check_tensor_dtype_valid('constant_rs', constant_rs_dtype, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('constrain_ks', constrain_ks_dtype, [mstype.float32], self.name) - return mstype.float32, mstype.float32 - - -class LastCrdToDr(PrimitiveWithInfer): - """ - Calculate the displacement vector of each constrained atom pair. - - .. warning:: - This is an experimental prototype that is subject to change and/or deletion. - - Args: - atom_numbers (int32): the number of atoms n. - constrain_pair_numbers (int32): the number of constrain pairs m. - - Inputs: - - **crd** (Tensor) - The coordinate of each atom. - The data type is float32 and the shape is :math:`(n, 3)`. - - **quarter_cof** (Tensor) - The 3-D scale factor. - The data type is float32 and the shape is :math:`(3,)`. - - **uint_dr_to_dr** (Tensor) - The 3-D scale factor (x, y, z) - The data type is int32 and the shape is :math:`(3,)`.. - - **atom_i_serials** (Tensor) - The first atom index of each constrained atom pair. - The data type is int32 and the shape is :math:`(m,)`. - - **atom_j_serials** (Tensor) - The second atom index of each constrained atom pair. - The data type is int32 and the shape is :math:`(m,)`. - - **constant_rs** (Tensor) - The constrained distance of each constrained atom pair. - The data type is float32 and the shape is :math:`(m,)`. - - **constrain_ks** (Tensor) - The coefficient of each constrained atom pair. - The data type is float32 and the shape is :math:`(m,)`. - - Outputs: - - **pair_dr** (Tensor) - The displacement vector of each constrained atom pair. - The data type is float32 and the shape is :math:`(m, 3)`. - - Supported Platforms: - ``GPU`` - """ - - @prim_attr_register - def __init__(self, atom_numbers, constrain_pair_numbers): - validator.check_value_type('atom_numbers', atom_numbers, int, self.name) - validator.check_value_type('constrain_pair_numbers', constrain_pair_numbers, int, self.name) - self.constrain_pair_numbers = constrain_pair_numbers - self.atom_numbers = atom_numbers - self.add_prim_attr('constrain_pair_numbers', self.constrain_pair_numbers) - self.init_prim_io_names( - inputs=['crd', 'quarter_cof', 'uint_dr_to_dr', 'atom_i_serials', 'atom_j_serials', - 'constant_rs', 'constrain_ks'], - outputs=['pair_dr']) - - def infer_shape(self, crd_shape, quarter_cof_shape, uint_dr_to_dr_shape, atom_i_serials_shape, - atom_j_serials_shape, constant_rs_shape, constrain_ks_shape): - cls_name = self.name - n = self.atom_numbers - m = self.constrain_pair_numbers - validator.check_int(len(crd_shape), 2, Rel.EQ, "crd_dim", cls_name) - validator.check_int(len(quarter_cof_shape), 1, Rel.EQ, "quarter_cof_dim", cls_name) - validator.check_int(len(uint_dr_to_dr_shape), 1, Rel.EQ, "quarter_cof_dim", cls_name) - validator.check_int(len(atom_i_serials_shape), 1, Rel.EQ, "atom_i_serials_dim", cls_name) - validator.check_int(len(atom_j_serials_shape), 1, Rel.EQ, "atom_j_serials_dim", cls_name) - validator.check_int(len(constant_rs_shape), 1, Rel.EQ, "constant_rs_dim", cls_name) - validator.check_int(len(constrain_ks_shape), 1, Rel.EQ, "constrain_ks_dim", cls_name) - - validator.check_int(crd_shape[0], n, Rel.EQ, "crd_shape[0]", cls_name) - validator.check_int(crd_shape[1], 3, Rel.EQ, "crd_shape[1]", cls_name) - validator.check_int(quarter_cof_shape[0], 3, Rel.EQ, "quarter_cof_shape", cls_name) - validator.check_int(uint_dr_to_dr_shape[0], 3, Rel.EQ, "uint_dr_to_dr_shape", cls_name) - validator.check_int(atom_i_serials_shape[0], m, Rel.EQ, "atom_i_serials_shape", cls_name) - validator.check_int(atom_j_serials_shape[0], m, Rel.EQ, "atom_j_serials_shape", cls_name) - validator.check_int(constant_rs_shape[0], m, Rel.EQ, "constant_rs_shape", cls_name) - validator.check_int(constrain_ks_shape[0], m, Rel.EQ, "constrain_ks_shape", cls_name) - - return [m, 3] - - def infer_dtype(self, crd_dtype, quarter_cof_dtype, uint_dr_to_dr_dtype, atom_i_serials_dtype, - atom_j_serials_dtype, constant_rs_dtype, constrain_ks_dtype): - validator.check_tensor_dtype_valid('crd', crd_dtype, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('quarter_cof', quarter_cof_dtype, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('uint_dr_to_dr', uint_dr_to_dr_dtype, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('atom_i_serials', atom_i_serials_dtype, [mstype.int32], self.name) - validator.check_tensor_dtype_valid('atom_j_serials', atom_j_serials_dtype, [mstype.int32], self.name) - validator.check_tensor_dtype_valid('constant_rs', constant_rs_dtype, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('constrain_ks', constrain_ks_dtype, [mstype.float32], self.name) - return mstype.float32 - - -class RefreshCrdVel(PrimitiveWithInfer): - """ - Refresh the coordinate and velocity of each constrained atom after all iterations have ended. - - .. warning:: - This is an experimental prototype that is subject to change and/or deletion. - - Args: - atom_numbers (int32): the number of atoms n. - dt_inverse (float32): the inverse value of simulation time step. - dt (float32): the simulation time step. - exp_gamma (float32): constant value exp(gamma * dt). - half_exp_gamma_plus_half (float32): constant value (1 + exp_gamma)/2. - - Inputs: - - **crd** (Tensor) - The coordinate of each atom. - The data type is float32 and the shape is :math:`(n, 3)`. - - **vel** (Tensor) - The velocity of each atom. - The data type is float32 and the shape is :math:`(n, 3)`. - - **test_frc** (Tensor) - The constraint force calculated in the last iteration. - The data type is float32 and the shape is :math:`(n, 3)`. - - **mass_inverse** (Tensor) - The inverse value of mass of each atom. - The data type is float32 and the shape is :math:`(n,)`. - - Outputs: - - **res** (Tensor) - The return value after updating successfully. - The data type is float32 and the shape is :math:`(1,)`. - - Supported Platforms: - ``GPU`` - """ - - @prim_attr_register - def __init__(self, atom_numbers, dt_inverse, dt, exp_gamma, half_exp_gamma_plus_half): - validator.check_value_type('atom_numbers', atom_numbers, int, self.name) - validator.check_value_type('dt', dt, float, self.name) - validator.check_value_type('dt_inverse', dt_inverse, float, self.name) - validator.check_value_type('exp_gamma', exp_gamma, float, self.name) - validator.check_value_type('half_exp_gamma_plus_half', half_exp_gamma_plus_half, float, self.name) - self.atom_numbers = atom_numbers - self.dt_inverse = dt_inverse - self.dt = dt - self.exp_gamma = exp_gamma - self.half_exp_gamma_plus_half = half_exp_gamma_plus_half - self.add_prim_attr('atom_numbers', self.atom_numbers) - self.add_prim_attr('dt_inverse', self.dt_inverse) - self.add_prim_attr('dt', self.dt) - self.add_prim_attr('exp_gamma', self.exp_gamma) - self.add_prim_attr('half_exp_gamma_plus_half', self.half_exp_gamma_plus_half) - self.init_prim_io_names( - inputs=['crd', 'vel', 'test_frc', 'mass_inverse'], - outputs=['res']) - self.add_prim_attr('side_effect_mem', True) - - def infer_shape(self, crd_shape, vel_shape, test_frc_shape, mass_inverse_shape): - cls_name = self.name - n = self.atom_numbers - validator.check_int(len(crd_shape), 2, Rel.EQ, "crd_dim", cls_name) - validator.check_int(len(vel_shape), 2, Rel.EQ, "vel_dim", cls_name) - validator.check_int(len(test_frc_shape), 2, Rel.EQ, "test_frc_dim", cls_name) - validator.check_int(len(mass_inverse_shape), 1, Rel.EQ, "mass_inverse_dim", cls_name) - - validator.check_int(crd_shape[0], n, Rel.EQ, "crd_shape[0]", cls_name) - validator.check_int(crd_shape[1], 3, Rel.EQ, "crd_shape[1]", cls_name) - validator.check_int(vel_shape[0], n, Rel.EQ, "vel_shape[0]", cls_name) - validator.check_int(vel_shape[1], 3, Rel.EQ, "vel_shape[1]", cls_name) - validator.check_int(test_frc_shape[0], n, Rel.EQ, "test_frc_shape[0]", cls_name) - validator.check_int(test_frc_shape[1], 3, Rel.EQ, "test_frc_shape[1]", cls_name) - validator.check_int(mass_inverse_shape[0], n, Rel.EQ, "mass_inverse_shape[0]", cls_name) - return [1,] - - def infer_dtype(self, crd_dtype, vel_dtype, test_frc_dtype, mass_inverse_dtype): - validator.check_tensor_dtype_valid('crd', crd_dtype, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('vel', vel_dtype, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('test_frc', test_frc_dtype, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('mass_inverse', mass_inverse_dtype, [mstype.float32], self.name) - return mstype.float32 - - -class CalculateNowrapCrd(PrimitiveWithInfer): - """ - Calculate the inside-box periodic image of each atom. - - .. warning:: - This is an experimental prototype that is subject to change and/or deletion. - - Args: - atom_numbers (int32): the number of atoms n. - - Inputs: - - **crd** (Tensor) - The coordinate of each atom. - The data type is float32 and the shape is :math:`(n, 3)`. - - **box** (Tensor) - The 3-D size of system. - The data type is float32 and the shape is :math:`(3, )`. - - **box_map_times** (Tensor) - The number of times each atom has crossed the box. - The data type is int32 and the shape is :math:`(n, 3)`. - - Outputs: - - **nowrap_crd** (Tensor) - The inside-box periodic image of each atom. - The data type is float32 and the shape is :math:`(n, 3)`. - - Supported Platforms: - ``GPU`` - """ - - @prim_attr_register - def __init__(self, atom_numbers): - validator.check_value_type('atom_numbers', atom_numbers, int, self.name) - self.atom_numbers = atom_numbers - self.add_prim_attr('atom_numbers', self.atom_numbers) - self.init_prim_io_names( - inputs=['crd', 'box', 'box_map_times'], - outputs=['nowrap_crd']) - - def infer_shape(self, crd_shape, box_shape, box_map_times_shape): - cls_name = self.name - n = self.atom_numbers - validator.check_int(len(crd_shape), 2, Rel.EQ, "crd_dim", cls_name) - validator.check_int(len(box_shape), 1, Rel.EQ, "box_dim", cls_name) - validator.check_int(len(box_map_times_shape), 2, Rel.EQ, "box_map_times_dim", cls_name) - - validator.check_int(crd_shape[0], n, Rel.EQ, "crd_shape[0]", cls_name) - validator.check_int(crd_shape[1], 3, Rel.EQ, "crd_shape[1]", cls_name) - validator.check_int(box_shape[0], 3, Rel.EQ, "box_shape[0]", cls_name) - validator.check_int(box_map_times_shape[0], n, Rel.EQ, "box_map_times_shape[0]", cls_name) - validator.check_int(box_map_times_shape[1], 3, Rel.EQ, "box_map_times_shape[1]", cls_name) - return [n, 3] - - def infer_dtype(self, crd_dtype, box_dtype, box_map_times_dtype): - validator.check_tensor_dtype_valid('crd', crd_dtype, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('box', box_dtype, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('box_map_times', box_map_times_dtype, - [mstype.float32], self.name) - return mstype.float32 - - -class RefreshBoxmapTimes(PrimitiveWithInfer): - """ - Refresh the box-crossing times of each atom. - - .. warning:: - This is an experimental prototype that is subject to change and/or deletion. - - Args: - atom_numbers (int32): the number of atoms n. - - Inputs: - - **crd** (Tensor) - The coordinate of each atom. - The data type is float32 and the shape is :math:`(n, 3)`. - - **old_crd** (Tensor) - The coordinate of each atom at last update. - The data type is float32 and the shape is :math:`(n, 3)`. - - **box_length_inverse** (Tensor) - The inverse value of box length in 3 dimensions. - The data type is float32 and the shape is :math:`(3,)`. - - **box_map_times** (Tensor) - The number of times each atom has crossed the box. - The data type is int32 and the shape is :math:`(n, 3)`. - - Outputs: - - **res** (Tensor) - The return value after updating successfully. - The data type is float32 and the shape is :math:`(1,)`. - - Supported Platforms: - ``GPU`` - """ - - @prim_attr_register - def __init__(self, atom_numbers): - validator.check_value_type('atom_numbers', atom_numbers, int, self.name) - self.atom_numbers = atom_numbers - self.add_prim_attr('atom_numbers', self.atom_numbers) - self.init_prim_io_names( - inputs=['crd', 'old_crd', 'box_length_inverse', 'box_map_times'], - outputs=['res']) - - def infer_shape(self, crd_shape, old_crd_shape, box_length_inverse_shape, box_map_times_shape): - cls_name = self.name - n = self.atom_numbers - validator.check_int(len(crd_shape), 2, Rel.EQ, "crd_dim", cls_name) - validator.check_int(len(old_crd_shape), 2, Rel.EQ, "old_crd_dim", cls_name) - validator.check_int(len(box_length_inverse_shape), 1, Rel.EQ, "box_length_inverse_dim", cls_name) - validator.check_int(len(box_map_times_shape), 2, Rel.EQ, "box_map_times_dim", cls_name) - - validator.check_int(crd_shape[0], n, Rel.EQ, "crd_shape[0]", cls_name) - validator.check_int(crd_shape[1], 3, Rel.EQ, "crd_shape[1]", cls_name) - validator.check_int(old_crd_shape[0], n, Rel.EQ, "old_crd_shape[0]", cls_name) - validator.check_int(old_crd_shape[1], 3, Rel.EQ, "old_crd_shape[1]", cls_name) - validator.check_int(box_length_inverse_shape[0], 3, Rel.EQ, "box_length_inverse_shape[0]", cls_name) - validator.check_int(box_map_times_shape[0], n, Rel.EQ, "box_map_times_shape[0]", cls_name) - validator.check_int(box_map_times_shape[1], 3, Rel.EQ, "box_map_times_shape[1]", cls_name) - return [1,] - - def infer_dtype(self, crd_dtype, old_crd_dtype, box_length_inverse_dtype, box_map_times_dtype): - validator.check_tensor_dtype_valid('crd', crd_dtype, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('old_crd', old_crd_dtype, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('box_length_inverse', box_length_inverse_dtype, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('box_map_times', box_map_times_dtype, - [mstype.int32], self.name) - return mstype.float32 - - -class Totalc6get(PrimitiveWithInfer): - """ - Get the average dispersion constant of short range Lennard-Jones interaction, - for the subsequent long range correction energy and virial. Assume system has m Lennard-Jones types of atoms. - - .. warning:: - This is an experimental prototype that is subject to change and/or deletion. - - Args: - atom_numbers (int32): the number of atoms n. - - Inputs: - - **atom_lj_type** (Tensor) - The Lennard-Jones type of each atom. - The data type is float32 and the shape is :math:`(n,)`. - - **lj_b** (Tensor) - The attraction coefficient of each type. the number of pair atoms is m. - The data type is float32 and the shape is :math:`(m,)`. - - Outputs: - - **factor** (Tensor) - The average dispersion constant of Lennard-Jones interaction. - The data type is float32 and the shape is :math:`(1,)`. - - Supported Platforms: - ``GPU`` - """ - - @prim_attr_register - def __init__(self, atom_numbers): - validator.check_value_type('atom_numbers', atom_numbers, int, self.name) - self.atom_numbers = atom_numbers - self.add_prim_attr('atom_numbers', self.atom_numbers) - self.init_prim_io_names( - inputs=['atom_lj_type', 'lj_b'], - outputs=['factor']) - - def infer_shape(self, atom_lj_type, lj_b): - cls_name = self.name - n = self.atom_numbers - validator.check_int(len(atom_lj_type), 1, Rel.EQ, "atom_lj_type_dim", cls_name) - validator.check_int(len(lj_b), 1, Rel.EQ, "LJ_b_dim", cls_name) - validator.check_int(atom_lj_type[0], n, Rel.EQ, "atom_lj_type_shape[0]", cls_name) - return [1,] - - def infer_dtype(self, atom_lj_type, lj_b): - validator.check_tensor_dtype_valid('atom_lj_type', atom_lj_type, mstype.int32, self.name) - validator.check_tensor_dtype_valid('lj_b', lj_b, mstype.float32, self.name) - return mstype.float32 - - -class CrdToUintCrdQuarter(PrimitiveWithInfer): - """ - Convert FP32 coordinate to Uint32 coordinate. - - .. warning:: - This is an experimental prototype that is subject to change and/or deletion. - - Args: - atom_numbers (int32): the number of atoms n. - - Inputs: - - **crd_to_uint_crd_cof** (Tensor) - The crd_to_uint_crd coefficient. - The data type is float32 and the shape is :math:`(3,)`. - - **crd** (Tensor) - The coordinate of each atom. - The data type is float32 and the shape is :math:`(n, 3)`. - - Outputs: - - **output** (Tensor) - The unsigned int coordinates. - The data type is unsigned int32 and the shape is :math:`(n, 3)`. - - Supported Platforms: - ``GPU`` - """ - - @prim_attr_register - def __init__(self, atom_numbers): - """Initialize CrdToUintCrdQuarter""" - validator.check_value_type('atom_numbers', atom_numbers, int, self.name) - self.atom_numbers = atom_numbers - self.add_prim_attr('atom_numbers', self.atom_numbers) - self.init_prim_io_names( - inputs=['crd_to_uint_crd_cof', 'crd'], - outputs=['output']) - - def infer_shape(self, crd_to_uint_crd_cof, crd): - cls_name = self.name - n = self.atom_numbers - validator.check_int(len(crd), 2, Rel.EQ, "crd_dim", cls_name) - validator.check_int(len(crd_to_uint_crd_cof), 1, Rel.EQ, "crd_to_uint_crd_cof_dim", cls_name) - validator.check_int(crd_to_uint_crd_cof[0], 3, Rel.EQ, "crd_to_uint_crd_cof_shape", self.name) - validator.check_int(crd[0], n, Rel.EQ, "crd[0]", self.name) - validator.check_int(crd[1], 3, Rel.EQ, "crd[1]", self.name) - return crd - - def infer_dtype(self, crd_to_uint_crd_cof, crd): - validator.check_tensor_dtype_valid('crd_to_uint_crd_cof', crd_to_uint_crd_cof, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('crd', crd, [mstype.float32], self.name) - return mstype.uint32 - - -class MDIterationLeapFrogLiujianWithMaxVel(PrimitiveWithInfer): - """ - One step of classical leap frog algorithm to solve the finite difference - Hamiltonian equations of motion for certain system, using Langevin dynamics - with Liu's thermostat scheme, but with an maximum velocity limit. Assume the - number of atoms is n and the target control temperature is T. - - Detailed iteration formula can be found in this paper: A unified thermostat - scheme for efficient configurational sampling for classical/quantum canonical - ensembles via molecular dynamics. DOI: 10.1063/1.4991621. - - .. warning:: - This is an experimental prototype that is subject to change and/or deletion. - - Args: - atom_numbers (int32): the number of atoms n. - dt (float32): time step for finite difference. - half_dt (float32): half of time step for finite difference. - exp_gamma (float32): parameter in Liu's dynamic, exp(-gamma_ln * dt). - max_vel (float32): the maximum velocity limit. - - Inputs: - - **inverse_mass** (Tensor) - The inverse value of mass of each atom. - The data type is float32 and the shape is :math:`(n,)`. - - **sqrt_mass_inverse** (Tensor) - The inverse sqrt of the mass in Liu's dynamics of each atom. - The data type is float32 and the shape is :math:`(n,)`. - - **vel** (Tensor) - The velocity of each atom. - The data type is float32 and the shape is :math:`(n, 3)`. - - **crd** (Tensor) - The coordinate of each atom. - The data type is float32 and the shape is :math:`(n, 3)`. - - **frc** (Tensor) - The force felt by each atom. - The data type is float32 and the shape is :math:`(n, 3)`. - - **acc** (Tensor) - The acceleration of each atom. - The data type is float32 and the shape is :math:`(n, 3)`. - - **rand_state** (Tensor) - Random state to generate random force. - The data type is float32 and the shape is :math:`(math.ceil(n * 3.0 / 4.0) * 16, )`. - - **rand_frc** (Tensor) - The random forces. - The data type is float32 and the shape is :math:`(n, 3)`. - - Outputs: - - **output** (float32) - The output coordinate of each atom. - The data type is float32 and the shape is :math:`(n, 3)`. - - Supported Platforms: - ``GPU`` - """ - - @prim_attr_register - def __init__(self, atom_numbers, half_dt, dt, exp_gamma, max_vel): - """Initialize MDIterationLeapFrogLiujian""" - validator.check_value_type('atom_numbers', atom_numbers, int, self.name) - validator.check_value_type('half_dt', half_dt, float, self.name) - validator.check_value_type('dt', dt, float, self.name) - validator.check_value_type('exp_gamma', exp_gamma, float, self.name) - validator.check_value_type('max_vel', max_vel, float, self.name) - self.atom_numbers = atom_numbers - self.half_dt = half_dt - self.dt = dt - self.exp_gamma = exp_gamma - self.max_vel = max_vel - - self.add_prim_attr('atom_numbers', self.atom_numbers) - self.add_prim_attr('half_dt', self.half_dt) - self.add_prim_attr('dt', self.dt) - self.add_prim_attr('exp_gamma', self.exp_gamma) - self.add_prim_attr('max_vel', self.max_vel) - self.init_prim_io_names( - inputs=['inverse_mass', 'sqrt_mass_inverse', 'vel', 'crd', 'frc', 'acc', 'rand_state', 'rand_frc'], - outputs=['output']) - self.add_prim_attr('side_effect_mem', True) - - def infer_shape(self, inverse_mass, sqrt_mass_inverse, vel, crd, frc, acc, rand_state, rand_frc): - n = self.atom_numbers - validator.check_int(len(inverse_mass), 1, Rel.EQ, "inverse_mass_dim", self.name) - validator.check_int(len(sqrt_mass_inverse), 1, Rel.EQ, "sqrt_mass_inverse_dim", self.name) - validator.check_int(len(rand_state), 1, Rel.EQ, "rand_state_dim", self.name) - validator.check_int(len(rand_frc), 2, Rel.EQ, "rand_frc_dim", self.name) - validator.check_int(len(vel), 2, Rel.EQ, "vel_dim", self.name) - validator.check_int(len(crd), 2, Rel.EQ, "crd_dim", self.name) - validator.check_int(len(frc), 2, Rel.EQ, "frc_dim", self.name) - validator.check_int(len(acc), 2, Rel.EQ, "acc_dim", self.name) - validator.check_int(inverse_mass[0], n, Rel.EQ, "inverse_mass", self.name) - validator.check_int(sqrt_mass_inverse[0], n, Rel.EQ, "sqrt_mass_inverse", self.name) - validator.check_int(vel[0], n, Rel.EQ, "vel_shape[0]", self.name) - validator.check_int(vel[1], 3, Rel.EQ, "vel_shape[1]", self.name) - validator.check_int(crd[0], n, Rel.EQ, "crd_shape[0]", self.name) - validator.check_int(crd[1], 3, Rel.EQ, "crd_shape[1]", self.name) - validator.check_int(frc[0], n, Rel.EQ, "frc_shape[0]", self.name) - validator.check_int(frc[1], 3, Rel.EQ, "frc_shape[1]", self.name) - validator.check_int(acc[0], n, Rel.EQ, "acc_shape[0]", self.name) - validator.check_int(acc[1], 3, Rel.EQ, "acc_shape[1]", self.name) - validator.check_int(rand_frc[0], n, Rel.EQ, "rand_frc_shape[0]", self.name) - validator.check_int(rand_frc[1], 3, Rel.EQ, "rand_frc_shape[1]", self.name) - validator.check_int(rand_state[0], math.ceil(self.atom_numbers * 3 / 4.0) * 16, Rel.EQ, "rand_state", self.name) - return [self.atom_numbers, 3] - - def infer_dtype(self, inverse_mass, sqrt_mass_inverse, vel, crd, frc, acc, rand_state, rand_frc): - validator.check_tensor_dtype_valid('inverse_mass', inverse_mass, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('sqrt_mass_inverse', sqrt_mass_inverse, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('vel', vel, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('crd', crd, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('frc', frc, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('acc', acc, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('rand_frc', rand_frc, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('rand_state', rand_state, [mstype.float32], self.name) - return mstype.float32 - - -class GetCenterOfMass(PrimitiveWithInfer): - """ - Get coordinate of centroid of each residue. Assume system has n atoms. - - .. warning:: - This is an experimental prototype that is subject to change and/or deletion. - - Args: - residue_numbers (int32): the number of residues m. - - Inputs: - - **start** (Tensor) - The start atom index of each residue. - The data type is int32 and the shape is :math:`(m,)`. - - **end** (Tensor) - The end atom index of each residue. - The data type is int32 and the shape is :math:`(m,)`. - - **crd** (Tensor) - The coordinate of each atom. - The data type is float32 and the shape is :math:`(n, 3)`. - - **atom_mass** (Tensor) - The mass of each atom and the atom number is n. - The data type is float32 and the shape is :math:`(n,)`. - - **residue_mass_inverse** (Tensor) - The inverse of mass of each residue. - The data type is float32 and the shape is :math:`(m,)`. - - Outputs: - - **center_of_mass** (Tensor) - The coordinate of centroid of each residue. - The data type is float32 and the shape is :math:`(m, 3)`. - - Supported Platforms: - ``GPU`` - """ - - @prim_attr_register - def __init__(self, residue_numbers): - """Initialize GetCenterOfMass""" - validator.check_value_type('residue_numbers', residue_numbers, int, self.name) - self.residue_numbers = residue_numbers - self.add_prim_attr('residue_numbers', self.residue_numbers) - self.init_prim_io_names( - inputs=['start', 'end', 'crd', 'atom_mass', 'residue_mass_inverse'], - outputs=['center_of_mass']) - - def infer_shape(self, start, end, crd, atom_mass, residue_mass_inverse): - n = crd[0] - m = self.residue_numbers - validator.check_int(len(start), 1, Rel.EQ, "start_dim", self.name) - validator.check_int(len(end), 1, Rel.EQ, "end_dim", self.name) - validator.check_int(len(crd), 2, Rel.EQ, "crd_dim", self.name) - validator.check_int(len(atom_mass), 1, Rel.EQ, "atom_mass_dim", self.name) - validator.check_int(len(residue_mass_inverse), 1, Rel.EQ, "residue_mass_inverse_dim", self.name) - validator.check_int(start[0], m, Rel.EQ, "start_shape", self.name) - validator.check_int(end[0], m, Rel.EQ, "end_shape", self.name) - validator.check_int(crd[0], n, Rel.EQ, "crd_shape[0]", self.name) - validator.check_int(crd[1], 3, Rel.EQ, "crd_shape[1]", self.name) - validator.check_int(atom_mass[0], n, Rel.EQ, "atom_mass_shape[0]", self.name) - validator.check_int(residue_mass_inverse[0], m, Rel.EQ, "residue_mass_inverse_shape", self.name) - return [m, 3] - - def infer_dtype(self, start, end, crd, atom_mass, residue_mass_inverse): - validator.check_tensor_dtype_valid('start', start, [mstype.int32], self.name) - validator.check_tensor_dtype_valid('end', end, [mstype.int32], self.name) - validator.check_tensor_dtype_valid('crd', crd, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('atom_mass', atom_mass, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('residue_mass_inverse', residue_mass_inverse, [mstype.float32], self.name) - return mstype.float32 - - -class MapCenterOfMass(PrimitiveWithInfer): - """ - Map all atoms in the same residue to the same periodic box, scale if necessary (usually in pressurestat). - Assume system has n atoms. - - .. warning:: - This is an experimental prototype that is subject to change and/or deletion. - - Args: - residue_numbers (int32): the number of residues m. - - Inputs: - - **start** (Tensor) - The start atom index of each residue. - The data type is int32 and the shape is :math:`(m,)`. - - **end** (Tensor) - The end atom index of each residue. - The data type is int32 and the shape is :math:`(m,)`. - - **center_of_mass** (Tensor) - The coordinate of centroid of each residue. - The data type is float32 and the shape is :math:`(m, 3)`. - - **box_length** (Tensor) - The box length of the simulation box. - The data type is float32 and the shape is :math:`(3,)`. - - **no_wrap_crd** (Tensor) - The coordinate of each atom before wrap. - The data type is float32 and the shape is :math:`(n, 3)`. - - **crd** (Tensor) - The coordinate of each atom after wrap. - The data type is float32 and the shape is :math:`(n, 3)`. - - **scaler** (Tensor) - The scaler of system. - The data type is float32 and the shape is :math:`(1,)`. - - Outputs: - - **res** (Tensor) - The return value after updating successfully. - The data type is float32 and the shape is :math:`(1,)`. - - Supported Platforms: - ``GPU`` - """ - - @prim_attr_register - def __init__(self, residue_numbers): - """Initialize MapCenterOfMass""" - validator.check_value_type('residue_numbers', residue_numbers, int, self.name) - self.residue_numbers = residue_numbers - self.add_prim_attr('residue_numbers', self.residue_numbers) - self.init_prim_io_names( - inputs=['start', 'end', 'center_of_mass', 'box_length', - 'no_wrap_crd', 'crd', 'scaler'], - outputs=['res']) - - def infer_shape(self, start, end, center_of_mass, box_length, no_wrap_crd, crd, scaler): - m = self.residue_numbers - n = crd[0] - validator.check_int(len(start), 1, Rel.EQ, "start_dim", self.name) - validator.check_int(len(end), 1, Rel.EQ, "end_dim", self.name) - validator.check_int(len(center_of_mass), 2, Rel.EQ, "center_of_mass_dim", self.name) - validator.check_int(len(box_length), 1, Rel.EQ, "box_length_dim", self.name) - validator.check_int(len(no_wrap_crd), 2, Rel.EQ, "no_wrap_crd_dim", self.name) - validator.check_int(len(crd), 2, Rel.EQ, "crd_dim", self.name) - validator.check_int(len(scaler), 1, Rel.EQ, "scaler_dim", self.name) - - validator.check_int(start[0], m, Rel.EQ, "start_shape", self.name) - validator.check_int(end[0], m, Rel.EQ, "end_shape", self.name) - validator.check_int(center_of_mass[0], m, Rel.EQ, "center_of_mass_shape[0]", self.name) - validator.check_int(center_of_mass[1], 3, Rel.EQ, "center_of_mass_shape[1]", self.name) - validator.check_int(box_length[0], 3, Rel.EQ, "box_length_shape", self.name) - validator.check_int(scaler[0], 1, Rel.EQ, "scaler_shape", self.name) - validator.check_int(no_wrap_crd[0], n, Rel.EQ, "no_wrap_crd_shape[0]", self.name) - validator.check_int(no_wrap_crd[1], 3, Rel.EQ, "no_wrap_crd_shape[1]", self.name) - validator.check_int(crd[0], n, Rel.EQ, "crd_shape[0]", self.name) - validator.check_int(crd[1], 3, Rel.EQ, "crd_shape[1]", self.name) - return [1,] - - def infer_dtype(self, start, end, center_of_mass, box_length, no_wrap_crd, crd, scaler): - validator.check_tensor_dtype_valid('start', start, [mstype.int32], self.name) - validator.check_tensor_dtype_valid('end', end, [mstype.int32], self.name) - validator.check_tensor_dtype_valid('crd', crd, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('center_of_mass', center_of_mass, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('box_length', box_length, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('no_wrap_crd', no_wrap_crd, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('scaler', scaler, [mstype.float32], self.name) - return mstype.float32 - - -class NeighborListRefresh(PrimitiveWithInfer): - """ - Update (or construct if first time) the Verlet neighbor list for the - calculation of short-ranged force. - - .. warning:: - This is an experimental prototype that is subject to change and/or deletion. - - Args: - grid_numbers (int32): the total number of grids divided G. - atom_numbers (int32): the number of atoms n. - not_first_time (int32): whether to construct the neighbor list first time or not. - nxy (int32): the total number of grids divided in xy plane. - excluded_atom_numbers (int32): the total atom numbers in the excluded list E. - cutoff_square (float32): the cutoff square distance for short-range force calculation. - half_skin_square (float32): the maximum square value of the distance atom allowed to move between two updates. - cutoff_with_skin (float32): cutoff + skin, indicates the radius of the neighbor list for each atom. - half_cutoff_with_skin (float32): cutoff_with_skin/2. - cutoff_with_skin_square (float32): the square value of cutoff_with_skin. - refresh_interval (int32): the number of iteration steps between two updates of neighbor list. Default: 20. - cutoff (float32): the cutoff distance for short-range force calculation. Default: 10.0. - skin (float32): the maximum value of the distance atom allowed to move. Default: 2.0. - max_atom_in_grid_numbers (int32): the maximum number of atoms in one grid k. Default: 64. - max_neighbor_numbers (int32): The maximum number of neighbors m. Default: 800. - forced_update (int32): the flag that decides whether to force an update. Default: 0. - forced_check (int32): the flag that decides whether to force an check. Default: 0. - - Inputs: - - **atom_numbers_in_grid_bucket** (Tensor) - The number of atoms in each grid bucket. - The data type is int32 and the shape is :math:`(G,)`. - - **bucket** (Tensor) - (Tensor) - The atom indices in each grid bucket. - The data type is int32 and the shape is :math:`(G, k)`. - - **crd** (Tensor) - The coordinates of each atom. - The data type is float32 and the shape is :math:`(n, 3)`. - - **box_length** (Tensor) - The box length of the simulation box. - The data type is float32 and the shape is :math:`(3,)`. - - **grid_n** (Tensor) - The number of grids divided of 3 dimensions of the simulation box. - The data type is int32 and the shape is :math:`(3,)`. - - **grid_length_inverse** (Tensor) - The inverse value of grid length. - The data type is float32 and the shape is :math:`(3,)`. - - **atom_in_grid_serial** (Tensor) - The grid index for each atom. - The data type is int32 and the shape is :math:`(n,)`. - - **old_crd** (Tensor) - The coordinates before update of each atom. - The data type is float32 and the shape is :math:`(n, 3)`. - - **crd_to_uint_crd_cof** (Tensor) - The scale factor between the unsigned int coordinate and the real one. - The data type is float32 and the shape is :math:`(3,)`. - - **uint_crd** (Tensor) - The unsigned int coordinates value fo each atom. - The data type is unsigned int32 and the shape is :math:`(n, 3)`. - - **gpointer** (Tensor) - The nearest neighbor grids (including self) of each grid. - The data type is int32 and the shape is :math:`(G, 125)`. - - **nl_atom_numbers** (Tensor) - The number of atoms in neighbor list of each atom. - The data type is int32 and the shape is :math:`(n,)`. - - **nl_atom_serial** (Tensor) - The indices of atoms in neighbor list of each atom. - The data type is int32 and the shape is :math:`(n, m)`. - - **uint_dr_to_dr_cof** (Tensor) - The scale factor. - The data type is float32 and the shape is :math:`(3,)`. - - **excluded_list_start** (Tensor) - The start excluded index in excluded list for each atom. - The data type is int32 and the shape is :math:`(n,)`. - - **excluded_list** (Tensor) - The contiguous join of excluded list of each atom. - The data type is int32 and the shape is :math:`(E,)`. - - **excluded_numbers** (Tensor) - The number of atom excluded in excluded list for each atom. - The data type is int32 and the shape is :math:`(n,)`. - - **need_refresh_flag** (Tensor) - Whether the neighbor list of each atom need update or not. - The data type is int32 and the shape is :math:`(1,)`. - - **refresh_count** (Union[Tensor, Scalar]) - Count how many iteration steps have passed since last update. - The data type is int32 and the shape is :math:`(1,)` or :math:`()`. - - Outputs: - - **res** (Tensor) - The return value after updating successfully. - The data type is float32 and the shape is :math:`(1,)`. - - Supported Platforms: - ``GPU`` - """ - - @prim_attr_register - def __init__(self, grid_numbers, atom_numbers, not_first_time, nxy, excluded_atom_numbers, - cutoff_square, half_skin_square, cutoff_with_skin, half_cutoff_with_skin, cutoff_with_skin_square, - refresh_interval=20, cutoff=10.0, skin=2.0, max_atom_in_grid_numbers=64, max_neighbor_numbers=800, - forced_update=0, forced_check=0): - """Initialize NeighborListRefresh""" - self.grid_numbers = grid_numbers - self.atom_numbers = atom_numbers - self.refresh_interval = refresh_interval - self.not_first_time = not_first_time - self.cutoff = cutoff - self.skin = skin - self.max_atom_in_grid_numbers = max_atom_in_grid_numbers - self.nxy = nxy - self.excluded_atom_numbers = excluded_atom_numbers - self.cutoff_square = cutoff_square - self.half_skin_square = half_skin_square - self.cutoff_with_skin = cutoff_with_skin - self.half_cutoff_with_skin = half_cutoff_with_skin - self.cutoff_with_skin_square = cutoff_with_skin_square - self.max_neighbor_numbers = max_neighbor_numbers - self.forced_update = forced_update - self.forced_check = forced_check - validator.check_value_type('grid_numbers', grid_numbers, int, self.name) - validator.check_value_type('atom_numbers', atom_numbers, int, self.name) - validator.check_value_type('refresh_interval', refresh_interval, int, self.name) - validator.check_value_type('not_first_time', not_first_time, int, self.name) - validator.check_value_type('cutoff', cutoff, float, self.name) - validator.check_value_type('skin', skin, float, self.name) - validator.check_value_type('max_atom_in_grid_numbers', max_atom_in_grid_numbers, int, self.name) - validator.check_value_type('nxy', nxy, int, self.name) - validator.check_value_type('excluded_atom_numbers', excluded_atom_numbers, int, self.name) - validator.check_value_type('cutoff_square', cutoff_square, float, self.name) - validator.check_value_type('half_skin_square', half_skin_square, float, self.name) - validator.check_value_type('cutoff_with_skin', cutoff_with_skin, float, self.name) - validator.check_value_type('half_cutoff_with_skin', half_cutoff_with_skin, float, self.name) - validator.check_value_type('cutoff_with_skin_square', cutoff_with_skin_square, float, self.name) - validator.check_value_type('max_neighbor_numbers', max_neighbor_numbers, int, self.name) - validator.check_value_type('forced_update', forced_update, int, self.name) - validator.check_value_type('forced_check', forced_check, int, self.name) - self.init_prim_io_names( - inputs=['atom_numbers_in_grid_bucket', 'bucket', 'crd', 'box_length', 'grid_n', 'grid_length_inverse', - 'atom_in_grid_serial', 'old_crd', 'crd_to_uint_crd_cof', 'uint_crd', 'gpointer', 'nl_atom_numbers', - 'nl_atom_serial', 'uint_dr_to_dr_cof', 'excluded_list_start', 'excluded_list', 'excluded_numbers', - 'need_refresh_flag', 'refresh_count'], outputs=['res']) - - self.add_prim_attr('grid_numbers', self.grid_numbers) - self.add_prim_attr('atom_numbers', self.atom_numbers) - self.add_prim_attr('refresh_interval', self.refresh_interval) - self.add_prim_attr('not_first_time', self.not_first_time) - self.add_prim_attr('cutoff', self.cutoff) - self.add_prim_attr('skin', self.skin) - self.add_prim_attr('max_atom_in_grid_numbers', self.max_atom_in_grid_numbers) - self.add_prim_attr('nxy', self.nxy) - self.add_prim_attr('excluded_atom_numbers', self.excluded_atom_numbers) - self.add_prim_attr('cutoff_square', self.cutoff_square) - self.add_prim_attr('half_skin_square', self.half_skin_square) - self.add_prim_attr('cutoff_with_skin', self.cutoff_with_skin) - self.add_prim_attr('half_cutoff_with_skin', self.half_cutoff_with_skin) - self.add_prim_attr('cutoff_with_skin_square', self.cutoff_with_skin_square) - self.add_prim_attr('forced_update', self.forced_update) - self.add_prim_attr('forced_check', self.forced_check) - self.add_prim_attr('side_effect_mem', True) - - def infer_shape(self, atom_numbers_in_grid_bucket_shape, bucket_shape, crd_shape, box_length_shape, grid_n_shape, - grid_length_inverse_shape, atom_in_grid_serial_shape, old_crd_shape, crd_to_uint_crd_cof_shape, - uint_crd_shape, gpointer_shape, nl_atom_numbers_shape, nl_atom_serial_shape, - uint_dr_to_dr_cof_shape, excluded_list_start_shape, excluded_list_shape, excluded_numbers_shape, - need_refresh_flag_shape, refresh_count_shape): - validator.check_int(len(atom_numbers_in_grid_bucket_shape), 1, Rel.EQ, - "atom_numbers_in_grid_bucket_dim", self.name) - validator.check_int(len(bucket_shape), 2, Rel.EQ, "bucket_dim", self.name) - validator.check_int(len(crd_shape), 2, Rel.EQ, "crd_dim", self.name) - validator.check_int(len(box_length_shape), 1, Rel.EQ, "box_length_dim", self.name) - validator.check_int(len(grid_n_shape), 1, Rel.EQ, "grid_n_dim", self.name) - validator.check_int(len(grid_length_inverse_shape), 1, Rel.EQ, "grid_length_inverse_dim", self.name) - validator.check_int(len(atom_in_grid_serial_shape), 1, Rel.EQ, "atom_in_grid_serial_dim", self.name) - validator.check_int(len(old_crd_shape), 2, Rel.EQ, "old_crd_dim", self.name) - validator.check_int(len(crd_to_uint_crd_cof_shape), 1, Rel.EQ, "crd_to_uint_crd_cof_dim", self.name) - validator.check_int(len(uint_crd_shape), 2, Rel.EQ, "uint_crd_dim", self.name) - validator.check_int(len(gpointer_shape), 2, Rel.EQ, "gpointer_dim", self.name) - validator.check_int(len(nl_atom_numbers_shape), 1, Rel.EQ, "nl_atom_numbers_dim", self.name) - validator.check_int(len(nl_atom_serial_shape), 2, Rel.EQ, "nl_atom_serial_dim", self.name) - validator.check_int(len(uint_dr_to_dr_cof_shape), 1, Rel.EQ, "uint_dr_to_dr_cof_dim", self.name) - validator.check_int(len(excluded_list_start_shape), 1, Rel.EQ, "excluded_list_start_dim", self.name) - validator.check_int(len(excluded_list_shape), 1, Rel.EQ, "excluded_list_dim", self.name) - validator.check_int(len(excluded_numbers_shape), 1, Rel.EQ, "excluded_numbers_dim", self.name) - validator.check_int(len(need_refresh_flag_shape), 1, Rel.EQ, "need_refresh_flag_dim", self.name) - validator.check_int(len(refresh_count_shape), 1, Rel.LE, "refresh_count_dim", self.name) - validator.check_int(atom_numbers_in_grid_bucket_shape[0], self.grid_numbers, Rel.EQ, - "atom_numbers_in_grid_bucket", self.name) - validator.check_int(bucket_shape[0], self.grid_numbers, Rel.EQ, "bucket", self.name) - validator.check_int(bucket_shape[1], self.max_atom_in_grid_numbers, Rel.EQ, "bucket", self.name) - validator.check_int(crd_shape[0], self.atom_numbers, Rel.EQ, "crd", self.name) - validator.check_int(crd_shape[1], 3, Rel.EQ, "crd", self.name) - validator.check_int(box_length_shape[0], 3, Rel.EQ, "box_length", self.name) - validator.check_int(grid_n_shape[0], 3, Rel.EQ, "grid_n", self.name) - validator.check_int(grid_length_inverse_shape[0], 3, Rel.EQ, "grid_length_inverse", self.name) - validator.check_int(atom_in_grid_serial_shape[0], self.atom_numbers, Rel.EQ, "atom_in_grid_serial", - self.name) - validator.check_int(old_crd_shape[0], self.atom_numbers, Rel.EQ, "old_crd", self.name) - validator.check_int(old_crd_shape[1], 3, Rel.EQ, "old_crd", self.name) - validator.check_int(crd_to_uint_crd_cof_shape[0], 3, Rel.EQ, "crd_to_uint_crd_cof", self.name) - validator.check_int(uint_crd_shape[0], self.atom_numbers, Rel.EQ, "uint_crd", self.name) - validator.check_int(uint_crd_shape[1], 3, Rel.EQ, "uint_crd", self.name) - validator.check_int(gpointer_shape[0], self.grid_numbers, Rel.EQ, "gpointer", self.name) - validator.check_int(gpointer_shape[1], 125, Rel.EQ, "gpointer", self.name) - validator.check_int(nl_atom_numbers_shape[0], self.atom_numbers, Rel.EQ, "nl_atom_numbers", self.name) - validator.check_int(nl_atom_serial_shape[0], self.atom_numbers, Rel.EQ, "nl_atom_serial", self.name) - validator.check_int(nl_atom_serial_shape[1], self.max_neighbor_numbers, Rel.EQ, "nl_atom_serial", - self.name) - validator.check_int(uint_dr_to_dr_cof_shape[0], 3, Rel.EQ, "uint_dr_to_dr_cof", self.name) - validator.check_int(excluded_list_start_shape[0], self.atom_numbers, Rel.EQ, "excluded_list_start", - self.name) - validator.check_int(excluded_list_shape[0], self.excluded_atom_numbers, Rel.EQ, "excluded_list", - self.name) - validator.check_int(excluded_numbers_shape[0], self.atom_numbers, Rel.EQ, "excluded_numbers", self.name) - validator.check_int(need_refresh_flag_shape[0], 1, Rel.EQ, "need_refresh_flag", self.name) - if refresh_count_shape: - validator.check_int(refresh_count_shape[0], 1, Rel.EQ, "refresh_count_shape", self.name) - return [1,] - - def infer_dtype(self, atom_numbers_in_grid_bucket_dtype, bucket_dtype, crd_dtype, box_length_dtype, grid_n_dtype, - grid_length_inverse_dtype, atom_in_grid_serial_dtype, old_crd_dtype, crd_to_uint_crd_cof_dtype, - uint_crd_dtype, gpointer_dtype, nl_atom_numbers_dtype, nl_atom_serial_dtype, - uint_dr_to_dr_cof_dtype, excluded_list_start_dtype, excluded_list_dtype, excluded_numbers_dtype, - need_refresh_flag_dtype, refresh_count_dtype): - validator.check_tensor_dtype_valid('atom_numbers_in_grid_bucket', atom_numbers_in_grid_bucket_dtype, - [mstype.int32], self.name) - validator.check_tensor_dtype_valid('bucket', bucket_dtype, [mstype.int32], self.name) - validator.check_tensor_dtype_valid('crd', crd_dtype, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('box_length', box_length_dtype, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('grid_n', grid_n_dtype, [mstype.int32], self.name) - validator.check_tensor_dtype_valid('grid_length_inverse', grid_length_inverse_dtype, [mstype.float32], - self.name) - validator.check_tensor_dtype_valid('atom_in_grid_serial', atom_in_grid_serial_dtype, [mstype.int32], - self.name) - validator.check_tensor_dtype_valid('old_crd', old_crd_dtype, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('crd_to_uint_crd_cof', crd_to_uint_crd_cof_dtype, [mstype.float32], - self.name) - validator.check_tensor_dtype_valid('uint_crd', uint_crd_dtype, [mstype.uint32], self.name) - validator.check_tensor_dtype_valid('gpointer', gpointer_dtype, [mstype.int32], self.name) - validator.check_tensor_dtype_valid('nl_atom_numbers', nl_atom_numbers_dtype, [mstype.int32], self.name) - validator.check_tensor_dtype_valid('nl_atom_serial', nl_atom_serial_dtype, [mstype.int32], self.name) - validator.check_tensor_dtype_valid('uint_dr_to_dr_cof', uint_dr_to_dr_cof_dtype, [mstype.float32], - self.name) - validator.check_tensor_dtype_valid('excluded_list_start', excluded_list_start_dtype, [mstype.int32], - self.name) - validator.check_tensor_dtype_valid('excluded_list', excluded_list_dtype, [mstype.int32], self.name) - validator.check_tensor_dtype_valid('excluded_numbers', excluded_numbers_dtype, [mstype.int32], self.name) - validator.check_tensor_dtype_valid('need_refresh_flag', need_refresh_flag_dtype, [mstype.int32], - self.name) - validator.check_tensor_dtype_valid('refresh_count', refresh_count_dtype, [mstype.int32], - self.name) - - return mstype.float32 - - -class MDIterationLeapFrog(PrimitiveWithInfer): - """ - One step of classical leap frog algorithm to solve the finite difference - Hamiltonian equations of motion for certain system. - - .. warning:: - This is an experimental prototype that is subject to change and/or deletion. - - Args: - atom_numbers (int32): the number of atoms n. - dt (float32): the simulation time step. - - Inputs: - - **sqrt_mass_inverse** (Tensor) - The square root of the inverse value of the mass of each atom. - The data type is float32 and the shape is :math:`(n,)`. - - **vel** (Tensor) - The velocity of each atom. - The data type is float32 and the shape is :math:`(n, 3)`. - - **crd** (Tensor) - The coordinate of each atom. - The data type is float32 and the shape is :math:`(n, 3)`. - - **frc** (Tensor) - The force felt by each atom. - The data type is float32 and the shape is :math:`(n, 3)`. - - **acc** (Tensor) - The acceleration of each atom. - The data type is float32 and the shape is :math:`(n, 3)`. - - **inverse_mass** (Tensor) - The inverse value of mass of each atom. - The data type is float32 and the shape is :math:`(n,)`. - - Outputs: - - **res** (Tensor) - The return value after updating successfully. - The data type is float32 and the shape is :math:`(1,)`. - - Supported Platforms: - ``GPU`` - """ - - @prim_attr_register - def __init__(self, atom_numbers, dt): - """Initialize MDIterationLeapFrog""" - validator.check_value_type('atom_numbers', atom_numbers, int, self.name) - validator.check_value_type('dt', dt, float, self.name) - self.atom_numbers = atom_numbers - self.dt = dt - self.add_prim_attr('atom_numbers', self.atom_numbers) - self.add_prim_attr('dt', self.dt) - self.init_prim_io_names( - inputs=['sqrt_mass_inverse', 'vel', 'crd', 'frc', 'acc', 'inverse_mass'], - outputs=['res']) - self.add_prim_attr('side_effect_mem', True) - - def infer_shape(self, vel, crd, frc, acc, inverse_mass): - n = self.atom_numbers - validator.check_int(len(vel), 2, Rel.EQ, "vel_dim", self.name) - validator.check_int(len(crd), 2, Rel.EQ, "crd_dim", self.name) - validator.check_int(len(frc), 2, Rel.EQ, "frc_dim", self.name) - validator.check_int(len(acc), 2, Rel.EQ, "acc_dim", self.name) - validator.check_int(len(inverse_mass), 1, Rel.EQ, "inverse_mass_dim", self.name) - validator.check_int(vel[0], n, Rel.EQ, "vel_shape[0]", self.name) - validator.check_int(vel[1], 3, Rel.EQ, "vel_shape[1]", self.name) - validator.check_int(crd[0], n, Rel.EQ, "crd_shape[0]", self.name) - validator.check_int(crd[1], 3, Rel.EQ, "crd_shape[1]", self.name) - validator.check_int(frc[0], n, Rel.EQ, "frc_shape[0]", self.name) - validator.check_int(frc[1], 3, Rel.EQ, "frc_shape[1]", self.name) - validator.check_int(acc[0], n, Rel.EQ, "acc_shape[0]", self.name) - validator.check_int(acc[1], 3, Rel.EQ, "acc_shape[1]", self.name) - validator.check_int(inverse_mass[0], n, Rel.EQ, "inverse_mass_shape", self.name) - return [1,] - - def infer_dtype(self, vel, crd, frc, acc, inverse_mass): - validator.check_tensor_dtype_valid('inverse_mass', inverse_mass, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('vel', vel, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('crd', crd, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('frc', frc, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('acc', acc, [mstype.float32], self.name) - return mstype.float32 - - -class MDIterationLeapFrogWithMaxVel(PrimitiveWithInfer): - """ - Leap frog algorithm to solve the Hamiltonian equations of motion with a maximum velocity limit. - - .. warning:: - This is an experimental prototype that is subject to change and/or deletion. - - Args: - atom_numbers (int32): the number of atoms n. - dt (float32): the simulation time step. - max_velocity (float32): the maximum velocity limit. - - Inputs: - - **vel** (Tensor) - The velocity of each atom. - The data type is float32 and the shape is :math:`(n, 3)`. - - **crd** (Tensor) - The coordinate of each atom. - The data type is float32 and the shape is :math:`(n, 3)`. - - **frc** (Tensor) - The force felt by each atom. - The data type is float32 and the shape is :math:`(n, 3)`. - - **acc** (Tensor) - The acceleration of each atom. - The data type is float32 and the shape is :math:`(n, 3)`. - - **inverse_mass** (Tensor) - The inverse value of mass of each atom. - The data type is float32 and the shape is :math:`(n,)`. - - Outputs: - - **res** (Tensor) - The return value after updating successfully. - The data type is float32 and the shape is :math:`(1,)`. - - - Supported Platforms: - ``GPU`` - """ - - @prim_attr_register - def __init__(self, atom_numbers, dt, max_velocity): - """Initialize MDIterationLeapFrogWithMaxVel""" - validator.check_value_type('atom_numbers', atom_numbers, int, self.name) - validator.check_value_type('dt', dt, float, self.name) - validator.check_value_type('max_velocity', max_velocity, float, self.name) - self.atom_numbers = atom_numbers - self.dt = dt - self.max_velocity = max_velocity - - self.add_prim_attr('atom_numbers', self.atom_numbers) - self.add_prim_attr('dt', self.dt) - self.add_prim_attr('max_velocity', self.max_velocity) - self.init_prim_io_names( - inputs=['vel', 'crd', 'frc', 'acc', 'inverse_mass'], - outputs=['res']) - self.add_prim_attr('side_effect_mem', True) - - def infer_shape(self, vel, crd, frc, acc, inverse_mass): - n = self.atom_numbers - validator.check_int(len(vel), 2, Rel.EQ, "vel_dim", self.name) - validator.check_int(len(crd), 2, Rel.EQ, "crd_dim", self.name) - validator.check_int(len(frc), 2, Rel.EQ, "frc_dim", self.name) - validator.check_int(len(acc), 2, Rel.EQ, "acc_dim", self.name) - validator.check_int(len(inverse_mass), 1, Rel.EQ, "inverse_mass_dim", self.name) - validator.check_int(inverse_mass[0], n, Rel.EQ, "inverse_mass_shape", self.name) - validator.check_int(vel[0], n, Rel.EQ, "vel_shape[0]", self.name) - validator.check_int(vel[1], 3, Rel.EQ, "vel_shape[1]", self.name) - validator.check_int(crd[0], n, Rel.EQ, "crd_shape[0]", self.name) - validator.check_int(crd[1], 3, Rel.EQ, "crd_shape[1]", self.name) - validator.check_int(frc[0], n, Rel.EQ, "frc_shape[0]", self.name) - validator.check_int(frc[1], 3, Rel.EQ, "frc_shape[1]", self.name) - validator.check_int(acc[0], n, Rel.EQ, "acc_shape[0]", self.name) - validator.check_int(acc[1], 3, Rel.EQ, "acc_shape[1]", self.name) - return [1,] - - def infer_dtype(self, vel, crd, frc, acc, inverse_mass): - validator.check_tensor_dtype_valid('inverse_mass', inverse_mass, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('vel', vel, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('crd', crd, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('frc', frc, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('acc', acc, [mstype.float32], self.name) - return mstype.float32 - - -class MDIterationGradientDescent(PrimitiveWithInfer): - """ - Update the coordinate of each atom in the direction of potential for energy minimization. - - .. warning:: - This is an experimental prototype that is subject to change and/or deletion. - - Args: - atom_numbers (int32): the number of atoms n. - learning_rate (float32): the update step length. - - Inputs: - - **crd** (Tensor) - The coordinate of each atom. - The data type is float32 and the shape is :math:`(n, 3)`. - - **frc** (Tensor), The force felt by each atom. - The data type is float32 and the shape is :math:`(n, 3)`. - - Output: - - **res** (Tensor) - The return value after updating successfully. - The data type is float32 and the shape is :math:`(1,)`. - - Supported Platforms: - ``GPU`` - """ - - @prim_attr_register - def __init__(self, atom_numbers, learning_rate): - """Initialize MDIterationGradientDescent""" - validator.check_value_type('atom_numbers', atom_numbers, int, self.name) - validator.check_value_type('learning_rate', learning_rate, float, self.name) - self.atom_numbers = atom_numbers - self.learning_rate = learning_rate - self.add_prim_attr('atom_numbers', self.atom_numbers) - self.add_prim_attr('learning_rate', self.learning_rate) - self.init_prim_io_names( - inputs=['crd', 'frc'], - outputs=['res']) - self.add_prim_attr('side_effect_mem', True) - - def infer_shape(self, crd, frc): - n = self.atom_numbers - validator.check_int(len(crd), 2, Rel.EQ, "crd_dim", self.name) - validator.check_int(len(frc), 2, Rel.EQ, "frc_dim", self.name) - validator.check_int(crd[0], n, Rel.EQ, "crd_shape[0]", self.name) - validator.check_int(crd[1], 3, Rel.EQ, "crd_shape[1]", self.name) - validator.check_int(frc[0], n, Rel.EQ, "frc_shape[0]", self.name) - validator.check_int(frc[1], 3, Rel.EQ, "frc_shape[1]", self.name) - return [1,] - - def infer_dtype(self, crd, frc): - validator.check_tensor_dtype_valid('crd', crd, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('frc', frc, [mstype.float32], self.name) - return mstype.float32 - - -class BondForceWithAtomEnergyAndVirial(PrimitiveWithInfer): - """ - Calculate bond force, harmonic potential energy and atom virial together. - - The calculation formula is the same as operator BondForce() and BondEnergy(). - - Because there is a large amount of inputs and each of them are related, - there is no way to construct `Examples` using random methods. For details, refer the webpage `SPONGE in MindSpore - `_. - - .. warning:: - This is an experimental prototype that is subject to change and/or deletion. - - Args: - atom_numbers (int32): the number of atoms n. - bond_numbers (int32): the number of harmonic bonds m. - - Inputs: - - **uint_crd_f** (Tensor) - The unsigned int coordinate value of each atom. - The data type is uint32 and the shape is :math:`(n, 3)`. - - **scaler_f** (Tensor) - The 3-D scale factor (x, y, z), - The data type is float32 and the shape is :math:`(3,)`. - - **atom_a** (Tensor) - The first atom index of each bond. - The data type is int32 and the shape is :math:`(m,)`. - - **atom_b** (Tensor) - The second atom index of each bond. - The data type is int32 and the shape is :math:`(m,)`. - - **bond_k** (Tensor) - The force constant of each bond. - The data type is float32 and the shape is :math:`(m,)`. - - **bond_r0** (Tensor) - The equlibrium length of each bond. - The data type is float32 and the shape is :math:`(m,)`. - - Outputs: - - **frc_f** (Tensor) - The force of each atom. - The data type is float32 and the shape is :math:`(n, 3)`. - - **atom_e** (Tensor) - The energy of each atom. - The data type is float32 and the shape is :math:`(n,)`. - - **atom_virial** (Tensor) - The virial of each atom. - The data type is float32 and the shape is :math:`(n,)`. - - Supported Platforms: - ``GPU`` - """ - - @prim_attr_register - def __init__(self, bond_numbers, atom_numbers): - """Initialize BondForceWithAtomEnergyAndVirial""" - validator.check_value_type('bond_numbers', bond_numbers, int, self.name) - validator.check_value_type('atom_numbers', atom_numbers, int, self.name) - self.bond_numbers = bond_numbers - self.atom_numbers = atom_numbers - self.add_prim_attr('bond_numbers', self.bond_numbers) - self.add_prim_attr('atom_numbers', self.atom_numbers) - self.init_prim_io_names(inputs=['uint_crd_f', 'scaler_f', 'atom_a', 'atom_b', 'bond_k', 'bond_r0'], - outputs=['frc_f', 'atom_e', 'atom_virial']) - - def infer_shape(self, uint_crd_f_shape, scaler_f_shape, atom_a_shape, atom_b_shape, bond_k_shape, bond_r0_shape): - cls_name = self.name - n = self.atom_numbers - m = self.bond_numbers - validator.check_int(len(uint_crd_f_shape), 2, Rel.EQ, "uint_crd_f_dim", cls_name) - validator.check_int(len(scaler_f_shape), 1, Rel.EQ, "scaler_f_dim", cls_name) - validator.check_int(len(atom_a_shape), 1, Rel.EQ, "atom_a_dim", cls_name) - validator.check_int(len(atom_b_shape), 1, Rel.EQ, "atom_b_dim", cls_name) - validator.check_int(len(bond_k_shape), 1, Rel.EQ, "bond_k_dim", cls_name) - validator.check_int(len(bond_r0_shape), 1, Rel.EQ, "bond_r0_dim", cls_name) - validator.check_int(uint_crd_f_shape[0], n, Rel.EQ, "uint_crd_f_shape[0]", cls_name) - validator.check_int(uint_crd_f_shape[1], 3, Rel.EQ, "uint_crd_f_shape[1]", cls_name) - validator.check_int(scaler_f_shape[0], 3, Rel.EQ, "scaler_f_shape", cls_name) - validator.check_int(atom_a_shape[0], m, Rel.EQ, "atom_a_shape", cls_name) - validator.check_int(atom_b_shape[0], m, Rel.EQ, "atom_b_shape", cls_name) - validator.check_int(bond_k_shape[0], m, Rel.EQ, "bond_k_shape", cls_name) - validator.check_int(bond_r0_shape[0], m, Rel.EQ, "bond_r0_shape", cls_name) - - return uint_crd_f_shape, [n,], [n,] - - def infer_dtype(self, uint_crd_f_dtype, scaler_f_type, atom_a_type, atom_b_type, bond_k_type, bond_r0_type): - validator.check_tensor_dtype_valid('uint_crd_f', uint_crd_f_dtype, [mstype.uint32], self.name) - validator.check_tensor_dtype_valid('scaler_f', scaler_f_type, [mstype.float32], self.name) - - validator.check_tensor_dtype_valid('atom_a', atom_a_type, [mstype.int32], self.name) - validator.check_tensor_dtype_valid('atom_b', atom_b_type, [mstype.int32], self.name) - - validator.check_tensor_dtype_valid('bond_k', bond_k_type, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('bond_r0', bond_r0_type, [mstype.float32], self.name) - return mstype.float32, mstype.float32, mstype.float32 - - -class LJForceWithVirialEnergy(PrimitiveWithInfer): - """ - Calculate the Lennard-Jones force, virial and atom energy together. - - The calculation formula of Lennard-Jones part is the same as operator - LJForce(), and the PME direct part is within PME method. - - Because there is a large amount of inputs and each of them are related, - there is no way to construct `Examples` using random methods. For details, refer the webpage `SPONGE in MindSpore - `_. - - .. warning:: - This is an experimental prototype that is subject to change and/or deletion. - - Args: - atom_numbers (int32): the number of atoms, n. - cutoff (float32): the square value of cutoff. - pme_beta (float32): PME beta parameter, same as operator PMEReciprocalForce(). - max_neighbor_numbers (int32): the max neighbor numbers, default 800. - - Inputs: - - **uint_crd** (Tensor) - The unsigned int coordinate value of each atom. - The data type is uint32 and the shape is :math:`(n, 3)`. - - **LJtype** (Tensor) - The Lennard-Jones type of each atom. - The data type is int32 and the shape is :math:`(n,)`. - - **charge** (Tensor) - The charge carried by each atom. - The data type is float32 and the shape is :math:`(n,)`. - - **scaler** (Tensor) - The scale factor between real - space coordinate and its unsigned int value. - The data type is float32 and the shape is :math:`(3,)`. - - **nl_numbers** (Tensor) - The each atom. - The data type is int32 and the shape is :math:`(n,)`. - - **nl_serial** (Tensor) - The neighbor list of each atom, the max number is 800. - The data type is int32 and the shape is :math:`(n, 800)`. - - **d_LJ_A** (Tensor) - The Lennard-Jones A coefficient of each kind of atom pair. - The number of atom pair is q. The data type is float32 and the shape is :math:`(q,)`. - - **d_LJ_B** (Tensor) - The Lennard-Jones B coefficient of each kind of atom pair. - The number of atom pair is q. The data type is float32 and the shape is :math:`(q,)`. - - Outputs: - - **frc** (Tensor), The force felt by each atom. - The data type is float32 and the shape is :math:`(n, 3)`. - - **virial** (Tensor), The virial felt by each atom. - The data type is float32 and the shape is :math:`(n,)`. - - **atom_energy** (Tensor), The atom energy felt by each atom. - The data type is float32 and the shape is :math:`(n, 3)`. - - Supported Platforms: - ``GPU`` - """ - - @prim_attr_register - def __init__(self, atom_numbers, cutoff, pme_beta, max_neighbor_numbers=800): - """Initialize LJForceWithVirialEnergy""" - validator.check_value_type('atom_numbers', atom_numbers, int, self.name) - validator.check_value_type('cutoff', cutoff, float, self.name) - validator.check_value_type('pme_beta', pme_beta, float, self.name) - validator.check_value_type('max_neighbor_numbers', max_neighbor_numbers, int, self.name) - self.atom_numbers = atom_numbers - self.cutoff = cutoff - self.pme_beta = pme_beta - self.max_neighbor_numbers = max_neighbor_numbers - self.init_prim_io_names( - inputs=['uint_crd', 'LJtype', 'charge', 'scaler', 'nl_numbers', 'nl_serial', 'd_LJ_A', 'd_LJ_B'], - outputs=['frc', 'virial', 'atom_energy']) - self.add_prim_attr('atom_numbers', self.atom_numbers) - self.add_prim_attr('cutoff', self.cutoff) - self.add_prim_attr('pme_beta', self.pme_beta) - self.add_prim_attr('max_neighbor_numbers', self.max_neighbor_numbers) - - def infer_shape(self, uint_crd, ljtype, charge, scaler, nl_numbers, nl_serial, d_lj_a, d_lj_b): - cls_name = self.name - n = self.atom_numbers - q = d_lj_a[0] - m = self.max_neighbor_numbers - validator.check_int(len(uint_crd), 2, Rel.EQ, "uint_crd_dim", cls_name) - validator.check_int(len(ljtype), 1, Rel.EQ, "LJtype_dim", cls_name) - validator.check_int(len(charge), 1, Rel.EQ, "charge_dim", cls_name) - validator.check_int(len(scaler), 1, Rel.EQ, "scaler_dim", cls_name) - validator.check_int(len(nl_numbers), 1, Rel.EQ, "nl_numbers_dim", cls_name) - validator.check_int(len(nl_serial), 2, Rel.EQ, "nl_serial_dim", cls_name) - validator.check_int(len(d_lj_a), 1, Rel.EQ, "d_LJ_A_dim", cls_name) - validator.check_int(len(d_lj_b), 1, Rel.EQ, "d_LJ_B_dim", cls_name) - - validator.check_int(uint_crd[0], n, Rel.EQ, "uint_crd_shape[0]", cls_name) - validator.check_int(uint_crd[1], 3, Rel.EQ, "uint_crd_shape[1]", cls_name) - validator.check_int(ljtype[0], n, Rel.EQ, "LJtype_shape", cls_name) - validator.check_int(charge[0], n, Rel.EQ, "charge_shape", cls_name) - validator.check_int(scaler[0], 3, Rel.EQ, "scaler_shape", cls_name) - validator.check_int(nl_numbers[0], n, Rel.EQ, "nl_numbers_shape", cls_name) - validator.check_int(nl_serial[0], n, Rel.EQ, "nl_serial_shape[0]", cls_name) - validator.check_int(nl_serial[1], m, Rel.EQ, "nl_serial_shape[1]", cls_name) - validator.check_int(d_lj_a[0], q, Rel.EQ, "d_LJ_A_shape[0]", cls_name) - validator.check_int(d_lj_b[0], q, Rel.EQ, "d_LJ_B_shape[0]", cls_name) - return [n, 3], [n,], [n,] - - def infer_dtype(self, uint_crd, ljtype, charge, scaler, nl_numbers, nl_serial, d_lj_a, d_lj_b): - validator.check_tensor_dtype_valid('uint_crd', uint_crd, [mstype.uint32], self.name) - validator.check_tensor_dtype_valid('LJtype', ljtype, [mstype.int32], self.name) - validator.check_tensor_dtype_valid('charge', charge, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('scaler', scaler, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('nl_numbers', nl_numbers, [mstype.int32], self.name) - validator.check_tensor_dtype_valid('nl_serial', nl_serial, [mstype.int32], self.name) - validator.check_tensor_dtype_valid('d_LJ_A', d_lj_a, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('d_LJ_B', d_lj_b, [mstype.float32], self.name) - return mstype.float32, mstype.float32, mstype.float32 - - -class LJForceWithPMEDirectForceUpdate(PrimitiveWithInfer): - """ - Calculate the Lennard-Jones force and PME direct force together for pressure. - - The calculation formula of Lennard-Jones part is the same as operator - LJForce(), and the PME direct part is within PME method. - - Because there is a large amount of inputs and each of them are related, - there is no way to construct `Examples` using random methods. For details, refer the webpage `SPONGE in MindSpore - `_. - - .. warning:: - This is an experimental prototype that is subject to change and/or deletion. - - Args: - atom_numbers (int32): the number of atoms, n. - cutoff (float32): the square value of cutoff. - pme_beta (float32): PME beta parameter, same as operator PMEReciprocalForce(). - need_update (int32): if need_update = 1, calculate the pressure, default 0. - - Inputs: - - **uint_crd** (Tensor) - The unsigned int coordinate value of each atom. - The data type is uint32 and the shape is :math:`(n, 3)`. - - **LJtype** (Tensor) - The Lennard-Jones type of each atom. - The data type is int32 and the shape is :math:`(n,)`. - - **charge** (Tensor) - The charge carried by each atom. - The data type is float32 and the shape is :math:`(n,)`. - - **scaler** (Tensor) - The scale factor between real - space coordinate and its unsigned int value. - The data type is float32 and the shape is :math:`(3,)`. - - **nl_numbers** (Tensor) - The each atom. - The data type is int32 and the shape is :math:`(n,)`. - - **nl_serial** (Tensor) - The neighbor list of each atom, the max number is 800. - The data type is int32 and the shape is :math:`(n, 800)`. - - **d_LJ_A** (Tensor) - The Lennard-Jones A coefficient of each kind of atom pair. - The number of atom pair is q. The data type is float32 and the shape is :math:`(q,)`. - - **d_LJ_B** (Tensor) - The Lennard-Jones B coefficient of each kind of atom pair. - The number of atom pair is q. The data type is float32 and the shape is :math:`(q,)`. - - **beta** (Tensor) - PME beta parameter. The data type is float32 and the shape is :math:`(1,)`. - - Outputs: - - **frc** (Tensor) - The force felt by each atom. - The data type is float32 and the shape is :math:`(n, 3)`. - - Supported Platforms: - ``GPU`` - """ - - @prim_attr_register - def __init__(self, atom_numbers, cutoff, pme_beta, need_update=0): - """Initialize LJForceWithPMEDirectForce""" - validator.check_value_type('atom_numbers', atom_numbers, int, self.name) - validator.check_value_type('cutoff', cutoff, float, self.name) - validator.check_value_type('pme_beta', pme_beta, float, self.name) - validator.check_value_type('need_update', need_update, int, self.name) - self.atom_numbers = atom_numbers - self.cutoff = cutoff - self.pme_beta = pme_beta - self.need_update = need_update - self.init_prim_io_names( - inputs=['uint_crd', 'LJtype', 'charge', 'scaler', 'nl_numbers', 'nl_serial', 'd_LJ_A', 'd_LJ_B', 'beta'], - outputs=['frc']) - self.add_prim_attr('atom_numbers', self.atom_numbers) - self.add_prim_attr('cutoff', self.cutoff) - self.add_prim_attr('pme_beta', self.pme_beta) - self.add_prim_attr('need_update', self.need_update) - - def infer_shape(self, uint_crd, ljtype, charge, scaler, nl_numbers, nl_serial, d_lj_a, d_lj_b, beta): - cls_name = self.name - n = self.atom_numbers - q = d_lj_a[0] - m = nl_serial[1] - validator.check_int(len(uint_crd), 2, Rel.EQ, "uint_crd_dim", cls_name) - validator.check_int(len(ljtype), 1, Rel.EQ, "LJtype_dim", cls_name) - validator.check_int(len(charge), 1, Rel.EQ, "charge_dim", cls_name) - validator.check_int(len(scaler), 1, Rel.EQ, "scaler_dim", cls_name) - validator.check_int(len(nl_numbers), 1, Rel.EQ, "nl_numbers_dim", cls_name) - validator.check_int(len(nl_serial), 2, Rel.EQ, "nl_serial_dim", cls_name) - validator.check_int(len(d_lj_a), 1, Rel.EQ, "d_LJ_A_dim", cls_name) - validator.check_int(len(d_lj_b), 1, Rel.EQ, "d_LJ_B_dim", cls_name) - validator.check_int(len(beta), 1, Rel.EQ, "beta_dim", cls_name) - - validator.check_int(uint_crd[0], n, Rel.EQ, "uint_crd_shape[0]", cls_name) - validator.check_int(uint_crd[1], 3, Rel.EQ, "uint_crd_shape[1]", cls_name) - validator.check_int(ljtype[0], n, Rel.EQ, "LJtype_shape", cls_name) - validator.check_int(charge[0], n, Rel.EQ, "charge_shape", cls_name) - validator.check_int(scaler[0], 3, Rel.EQ, "scaler_shape", cls_name) - validator.check_int(nl_numbers[0], n, Rel.EQ, "nl_numbers_shape", cls_name) - validator.check_int(nl_serial[0], n, Rel.EQ, "nl_serial_shape[0]", cls_name) - validator.check_int(nl_serial[1], m, Rel.EQ, "nl_serial_shape[1]", cls_name) - validator.check_int(d_lj_a[0], q, Rel.EQ, "d_LJ_A_shape[0]", cls_name) - validator.check_int(d_lj_b[0], q, Rel.EQ, "d_LJ_B_shape[0]", cls_name) - validator.check_int(beta[0], 1, Rel.EQ, "beta_shape", cls_name) - return [n, 3] - - def infer_dtype(self, uint_crd, ljtype, charge, scaler, nl_numbers, nl_serial, d_lj_a, d_lj_b, beta): - validator.check_tensor_dtype_valid('uint_crd', uint_crd, [mstype.uint32], self.name) - validator.check_tensor_dtype_valid('LJtype', ljtype, [mstype.int32], self.name) - validator.check_tensor_dtype_valid('charge', charge, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('scaler', scaler, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('nl_numbers', nl_numbers, [mstype.int32], self.name) - validator.check_tensor_dtype_valid('nl_serial', nl_serial, [mstype.int32], self.name) - validator.check_tensor_dtype_valid('d_LJ_A', d_lj_a, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('d_LJ_B', d_lj_b, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('beta', beta, [mstype.float32], self.name) - return mstype.float32 - - -class PMEReciprocalForceUpdate(PrimitiveWithInfer): - """ - Calculate the reciprocal part of long-range Coulumb force using - PME(Particle Meshed Ewald) method for pressure. Assume the number of atoms is n. - - The detailed calculation formula of PME(Particle Meshed Ewald) method - can be found in this paper: A Smooth Particle Mesh Ewald Method. DOI: - 10.1063/1.470117. - - Because there is a large amount of inputs and each of them are related, - there is no way to construct `Examples` using random methods. For details, refer the webpage `SPONGE in MindSpore - `_. - - .. warning:: - This is an experimental prototype that is subject to change and/or deletion. - - Args: - atom_numbers (int32): the number of atoms, n. - beta (float32): the PME beta parameter, determined by the - non-bond cutoff value and simulation precision tolerance. - fftx (int32): the number of points for Fourier transform in dimension X. - ffty (int32): the number of points for Fourier transform in dimension Y. - fftz (int32): the number of points for Fourier transform in dimension Z. - box_length_0 (float32): the value of boxlength idx 0 - box_length_1 (float32): the value of boxlength idx 1 - box_length_2 (float32): the value of boxlength idx 2 - need_update (int32): if need_update = 1, calculate the pressure, default 0. - - Inputs: - - **uint_crd** (Tensor) - The unsigned int coordinate value of each atom. - The data type is uint32 and the shape is :math:`(n, 3)`. - - **charge** (Tensor) - The charge carried by each atom. - The data type is float32 and the shape is :math:`(n,)` - - **beta** (Tensor) - The PME beta parameter to be updated in pressure calculation. - The data type is float32 and the shape is :math:`(1,)` - - Outputs: - - **force** (Tensor) - The force felt by each atom. - The data type is float32 and the shape is :math:`(n, 3)` - - Supported Platforms: - ``GPU`` - """ - - @prim_attr_register - def __init__(self, atom_numbers, beta, fftx, ffty, fftz, - box_length_0, box_length_1, box_length_2, need_update=0): - """Initialize PMEReciprocalForce""" - validator.check_value_type('atom_numbers', atom_numbers, int, self.name) - validator.check_value_type('beta', beta, float, self.name) - validator.check_value_type('fftx', fftx, int, self.name) - validator.check_value_type('ffty', ffty, int, self.name) - validator.check_value_type('fftz', fftz, int, self.name) - validator.check_value_type('box_length_0', box_length_0, float, self.name) - validator.check_value_type('box_length_1', box_length_1, float, self.name) - validator.check_value_type('box_length_2', box_length_2, float, self.name) - validator.check_value_type('need_update', need_update, int, self.name) - self.atom_numbers = atom_numbers - self.beta = beta - self.fftx = fftx - self.ffty = ffty - self.fftz = fftz - self.box_length_0 = box_length_0 - self.box_length_1 = box_length_1 - self.box_length_2 = box_length_2 - self.need_update = need_update - - self.init_prim_io_names(inputs=['uint_crd', 'charge', 'beta'], - outputs=['force']) - self.add_prim_attr('atom_numbers', self.atom_numbers) - self.add_prim_attr('beta', self.beta) - self.add_prim_attr('fftx', self.fftx) - self.add_prim_attr('ffty', self.ffty) - self.add_prim_attr('fftz', self.fftz) - self.add_prim_attr('box_length_0', self.box_length_0) - self.add_prim_attr('box_length_1', self.box_length_1) - self.add_prim_attr('box_length_2', self.box_length_2) - self.add_prim_attr('need_update', self.need_update) - - def infer_shape(self, uint_crd_shape, charge_shape, beta): - cls_name = self.name - n = self.atom_numbers - validator.check_int(len(uint_crd_shape), 2, Rel.EQ, "uint_crd_dim", cls_name) - validator.check_int(len(charge_shape), 1, Rel.EQ, "charge_dim", cls_name) - validator.check_int(len(beta), 1, Rel.EQ, "beta_dim", cls_name) - validator.check_int(uint_crd_shape[0], n, Rel.EQ, "uint_crd_shape[0]", cls_name) - validator.check_int(uint_crd_shape[1], 3, Rel.EQ, "uint_crd_shape[1]", cls_name) - validator.check_int(charge_shape[0], n, Rel.EQ, "charge_shape", cls_name) - validator.check_int(beta[0], 1, Rel.EQ, "beta_shape", cls_name) - return uint_crd_shape - - def infer_dtype(self, uint_crd_type, charge_type, beta): - validator.check_tensor_dtype_valid('uint_crd', uint_crd_type, [mstype.uint32], self.name) - validator.check_tensor_dtype_valid('charge', charge_type, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('beta', beta, [mstype.float32], self.name) - return charge_type - - -class PMEExcludedForceUpdate(PrimitiveWithInfer): - """ - Calculate the excluded part of long-range Coulumb force using - PME(Particle Meshed Ewald) method for pressure. Assume the number of atoms is - n, and the length of excluded list is E. - - Because there is a large amount of inputs and each of them are related, - there is no way to construct `Examples` using random methods. For details, refer the webpage `SPONGE in MindSpore - `_. - - .. warning:: - This is an experimental prototype that is subject to change and/or deletion. - - Args: - atom_numbers (int32): the number of atoms, n. - excluded_numbers (int32): the length of excluded list, E. - beta (float32): the PME beta parameter, determined by the - non-bond cutoff value and simulation precision tolerance. - need_update (int32): if need_update = 1, calculate the pressure, default 0. - - Inputs: - - **uint_crd** (Tensor) - The unsigned int coordinates value of each atom. - The data type is uint32 and the shape is :math:`(n, 3)` - - **scaler** (Tensor) - The scale factor between real space - coordinates and its unsigned int value. The data type is float32 and the shape is :math:`(3,)` - - **charge** (Tensor) - The charge carried by each atom. - The data type is float32 and the shape is :math:`(n,)` - - **excluded_list_start** (Tensor) - The start excluded index - in excluded list for each atom. The data type is int32 and the shape is :math:`(n,)` - - **excluded_list** (Tensor) - The contiguous join of excluded - list of each atom. E is the number of excluded atoms. The data type is int32 and the shape is :math:`(E,)` - - **excluded_atom_numbers** (Tensor) - The number of atom excluded - in excluded list for each atom. The data type is int32 and the shape is :math:`(n,)` - - **beta** (Tensor) - The PME beta parameter to be updated in pressure calculation. - The data type is float32 and the shape is :math:`(1,)` - - Outputs: - - **force** (Tensor) - The force felt by each atom. - The data type is float32 and the shape is :math:`(n, 3)` - - Supported Platforms: - ``GPU`` - """ - - @prim_attr_register - def __init__(self, atom_numbers, excluded_numbers, beta, need_update=0): - """Initialize PMEExcludedForce""" - validator.check_value_type('atom_numbers', atom_numbers, int, self.name) - validator.check_value_type('excluded_numbers', excluded_numbers, int, self.name) - validator.check_value_type('beta', beta, float, self.name) - validator.check_value_type('need_update', need_update, int, self.name) - self.atom_numbers = atom_numbers - self.excluded_numbers = excluded_numbers - self.beta = beta - self.need_update = need_update - self.init_prim_io_names( - inputs=['uint_crd', 'scaler', 'charge', 'excluded_list_start', 'excluded_list', - 'excluded_atom_numbers', 'beta'], - outputs=['force']) - self.add_prim_attr('atom_numbers', self.atom_numbers) - self.add_prim_attr('excluded_numbers', self.excluded_numbers) - self.add_prim_attr('beta', self.beta) - self.add_prim_attr('need_update', self.need_update) - - def infer_shape(self, uint_crd_shape, scaler_shape, charge_shape, excluded_list_start_shape, excluded_list_shape, - excluded_atom_numbers_shape, beta): - cls_name = self.name - n = self.atom_numbers - e = self.excluded_numbers - validator.check_int(len(uint_crd_shape), 2, Rel.EQ, "uint_crd_dim", cls_name) - validator.check_int(len(scaler_shape), 1, Rel.EQ, "scaler_dim", cls_name) - validator.check_int(len(charge_shape), 1, Rel.EQ, "charge_dim", cls_name) - validator.check_int(len(excluded_list_start_shape), 1, Rel.EQ, "excluded_list_start_dim", cls_name) - validator.check_int(len(excluded_atom_numbers_shape), 1, Rel.EQ, "excluded_atom_numbers_dim", cls_name) - validator.check_int(len(excluded_list_shape), 1, Rel.EQ, "excluded_list_dim", cls_name) - validator.check_int(len(beta), 1, Rel.EQ, "beta_dim", cls_name) - validator.check_int(uint_crd_shape[0], n, Rel.EQ, "uint_crd_shape[0]", cls_name) - validator.check_int(uint_crd_shape[1], 3, Rel.EQ, "uint_crd_shape[1]", cls_name) - validator.check_int(scaler_shape[0], 3, Rel.EQ, "scaler_shape", cls_name) - validator.check_int(charge_shape[0], n, Rel.EQ, "charge_shape", cls_name) - validator.check_int(excluded_list_start_shape[0], n, Rel.EQ, "excluded_list_start_shape", cls_name) - validator.check_int(excluded_atom_numbers_shape[0], n, Rel.EQ, "excluded_atom_numbers_shape", cls_name) - validator.check_int(excluded_list_shape[0], e, Rel.EQ, "excluded_list_shape", cls_name) - validator.check_int(beta[0], 1, Rel.EQ, "beta_shape", cls_name) - return [n, 3] - - def infer_dtype(self, uint_crd_type, scaler_type, charge_type, excluded_list_start_type, excluded_list_type, - excluded_atom_numbers_type, beta): - validator.check_tensor_dtype_valid('scaler', scaler_type, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('uint_crd', uint_crd_type, [mstype.uint32], self.name) - validator.check_tensor_dtype_valid('charge', charge_type, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('excluded_list_start', excluded_list_start_type, [mstype.int32], - self.name) - validator.check_tensor_dtype_valid('excluded_list', excluded_list_type, [mstype.int32], - self.name) - validator.check_tensor_dtype_valid('excluded_atom_numbers', excluded_atom_numbers_type, [mstype.int32], - self.name) - validator.check_tensor_dtype_valid('beta', beta, [mstype.float32], self.name) - return mstype.float32 - - -class LJForceWithVirialEnergyUpdate(PrimitiveWithInfer): - """ - Calculate the Lennard-Jones force and PME direct force together for pressure. - - The calculation formula of Lennard-Jones part is the same as operator - LJForce(), and the PME direct part is within PME method. - - Because there is a large amount of inputs and each of them are related, - there is no way to construct `Examples` using random methods. For details, refer the webpage `SPONGE in MindSpore - `_. - - .. warning:: - This is an experimental prototype that is subject to change and/or deletion. - - Args: - atom_numbers (int32): the number of atoms, n. - cutoff (float32): the square value of cutoff. - pme_beta (float32): PME beta parameter, same as operator PMEReciprocalForce(). - max_neighbor_numbers (int32): the max neighbor numbers, default 800. - need_update (int32): if need_update = 1, calculate the pressure, default 0. - - Inputs: - - **uint_crd** (Tensor) - The unsigned int coordinate value of each atom. - The data type is uint32 and the shape is :math:`(n, 3)`. - - **LJtype** (Tensor) - The Lennard-Jones type of each atom. - The data type is int32 and the shape is :math:`(n,)`. - - **charge** (Tensor) - The charge carried by each atom. - The data type is float32 and the shape is :math:`(n,)`. - - **scaler** (Tensor) - The scale factor. - The data type is float32 and the shape is :math:`(3,)`. - - **nl_numbers** (Tensor) - The each atom. - The data type is int32 and the shape is :math:`(n,)`. - - **nl_serial** (Tensor) - The neighbor list of each atom, the max number is 800. - The data type is int32 and the shape is :math:`(n, 800)`. - - **d_LJ_A** (Tensor) - The Lennard-Jones A coefficient of each kind of atom pair. - The number of atom pair is q. The data type is float32 and the shape is :math:`(q,)`. - - **d_LJ_B** (Tensor) - The Lennard-Jones B coefficient of each kind of atom pair. - The number of atom pair is q. The data type is float32 and the shape is :math:`(q,)`. - - **beta** (Tensor) - The PME beta parameter to be updated in pressure calculation. - The data type is float32 and the shape is :math:`(1,)` - - Outputs: - - **frc** (Tensor) - The force felt by each atom. - The data type is float32 and the shape is :math:`(n, 3)`. - - **virial** (Tensor) - The accumulated potential virial for each atom. - The data type is float32 and the shape is :math:`(n, )`. - - **atom_energy** (Tensor) - The accumulated potential energy for each atom. - The data type is float32 and the shape is :math:`(n, )`. - - Supported Platforms: - ``GPU`` - """ - - @prim_attr_register - def __init__(self, atom_numbers, cutoff, pme_beta, max_neighbor_numbers=800, need_update=0): - """Initialize LJForceWithPMEDirectForce""" - validator.check_value_type('atom_numbers', atom_numbers, int, self.name) - validator.check_value_type('cutoff', cutoff, float, self.name) - validator.check_value_type('pme_beta', pme_beta, float, self.name) - validator.check_value_type('max_neighbor_numbers', max_neighbor_numbers, int, self.name) - validator.check_value_type('need_update', need_update, int, self.name) - self.atom_numbers = atom_numbers - self.cutoff = cutoff - self.pme_beta = pme_beta - self.max_neighbor_numbers = max_neighbor_numbers - self.need_update = need_update - self.init_prim_io_names( - inputs=['uint_crd', 'LJtype', 'charge', 'scaler', 'nl_numbers', 'nl_serial', 'd_LJ_A', 'd_LJ_B', 'beta'], - outputs=['frc', 'virial', 'atom_energy']) - self.add_prim_attr('atom_numbers', self.atom_numbers) - self.add_prim_attr('cutoff', self.cutoff) - self.add_prim_attr('pme_beta', self.pme_beta) - self.add_prim_attr('max_neighbor_numbers', self.max_neighbor_numbers) - self.add_prim_attr('need_update', self.need_update) - - def infer_shape(self, uint_crd, ljtype, charge, scaler, nl_numbers, nl_serial, d_lj_a, d_lj_b, beta): - cls_name = self.name - n = self.atom_numbers - q = d_lj_a[0] - m = self.max_neighbor_numbers - validator.check_int(len(uint_crd), 2, Rel.EQ, "uint_crd_dim", cls_name) - validator.check_int(len(ljtype), 1, Rel.EQ, "LJtype_dim", cls_name) - validator.check_int(len(charge), 1, Rel.EQ, "charge_dim", cls_name) - validator.check_int(len(scaler), 1, Rel.EQ, "scaler_dim", cls_name) - validator.check_int(len(nl_numbers), 1, Rel.EQ, "nl_numbers_dim", cls_name) - validator.check_int(len(nl_serial), 2, Rel.EQ, "nl_serial_dim", cls_name) - validator.check_int(len(d_lj_a), 1, Rel.EQ, "d_LJ_A_dim", cls_name) - validator.check_int(len(d_lj_b), 1, Rel.EQ, "d_LJ_B_dim", cls_name) - validator.check_int(len(beta), 1, Rel.EQ, "beta_dim", cls_name) - validator.check_int(uint_crd[0], n, Rel.EQ, "uint_crd_shape[0]", cls_name) - validator.check_int(uint_crd[1], 3, Rel.EQ, "uint_crd_shape[1]", cls_name) - validator.check_int(ljtype[0], n, Rel.EQ, "LJtype_shape", cls_name) - validator.check_int(charge[0], n, Rel.EQ, "charge_shape", cls_name) - validator.check_int(scaler[0], 3, Rel.EQ, "scaler_shape", cls_name) - validator.check_int(nl_numbers[0], n, Rel.EQ, "nl_numbers_shape", cls_name) - validator.check_int(nl_serial[0], n, Rel.EQ, "nl_serial_shape[0]", cls_name) - validator.check_int(nl_serial[1], m, Rel.EQ, "nl_serial_shape[1]", cls_name) - validator.check_int(d_lj_a[0], q, Rel.EQ, "d_LJ_A_shape[0]", cls_name) - validator.check_int(d_lj_b[0], q, Rel.EQ, "d_LJ_B_shape[0]", cls_name) - validator.check_int(beta[0], 1, Rel.EQ, "beta_shape[0]", cls_name) - return [n, 3], [n,], [n,] - - def infer_dtype(self, uint_crd, ljtype, charge, scaler, nl_numbers, nl_serial, d_lj_a, d_lj_b, beta): - validator.check_tensor_dtype_valid('uint_crd', uint_crd, [mstype.uint32], self.name) - validator.check_tensor_dtype_valid('LJtype', ljtype, [mstype.int32], self.name) - validator.check_tensor_dtype_valid('charge', charge, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('scaler', scaler, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('nl_numbers', nl_numbers, [mstype.int32], self.name) - validator.check_tensor_dtype_valid('nl_serial', nl_serial, [mstype.int32], self.name) - validator.check_tensor_dtype_valid('d_LJ_A', d_lj_a, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('d_LJ_B', d_lj_b, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('beta', beta, [mstype.float32], self.name) - return mstype.float32, mstype.float32, mstype.float32 - - -class Dihedral14ForceWithAtomEnergyVirial(PrimitiveWithInfer): - """ - Calculate the Lennard-Jones and Coulumb energy correction and force correction - for each necessary dihedral 1,4 terms together and add them to the total force - and potential energy for each atom. - - The calculation formula of force correction is the same as operator - :class:`Dihedral14LJForceWithDirectCF`, and the energy correction part is the same - as operator :class:`Dihedral14LJEnergy` and :class:`Dihedral14CFEnergy`. - - .. warning:: - This is an experimental prototype that is subject to change and/or deletion. - - Args: - nb14_numbers (int32): the number of necessary dihedral 1,4 terms m. - atom_numbers (int32): the number of atoms n. - - Inputs: - - **uint_crd_f** (Tensor) - The unsigned int coordinate value of each atom. - The data type is uint32 and the shape is :math:`(n, 3)`. - - **LJtype** (Tensor) - The Lennard-Jones type of each atom. - The data type is int32 and the shape is :math:`(n,)`. - - **charge** (Tensor) - The charge carried by each atom. - The data type is float32 and the shape is :math:`(n,)`. - - **boxlength** (Tensor) - The length of molecular simulation box in 3 dimensions. - The data type is float32 and the shape is :math:`(3,)`. - - **a_14** (Tensor) - The first atom index of each dihedral 1,4 term. - The data type is int32 and the shape is :math:`(m,)`. - - **b_14** (Tensor) - The second atom index of each dihedral 1,4 term. - The data type is int32 and the shape is :math:`(m,)`. - - **lj_scale_factor** (Tensor) - The scale factor for the - Lennard-Jones part of force correction of each dihedral 1,4 term. - - **cf_scale_factor** (Tensor) - The scale factor for the Coulomb force. - The data type is float32 and the shape is :math:`(m,)`. - - **LJ_type_A** (Tensor) - The A parameter in Lennard-Jones scheme of each atom pair type. - The number of atom pair is q. The data type is float32 and the shape is :math:`(q,)`. - - **LJ_type_B** (Tensor) - The B parameter in Lennard-Jones scheme of each atom pair type. - The number of atom pair is q. The data type is float32 and the shape is :math:`(q,)`. - - Outputs: - - **frc** (Tensor) - The force felt by each atom. - The data type is float32 and the shape is :math:`(n, 3)`. - - **atom_energy** (Tensor) - The accumulated potential energy for each atom. - The data type is float32 and the shape is :math:`(n, )`. - - **atom_virial** (Tensor) - The accumulated potential virial for each atom. - The data type is float32 and the shape is :math:`(n, )`. - - Supported Platforms: - ``GPU`` - """ - - @prim_attr_register - def __init__(self, nb14_numbers, atom_numbers): - """Initialize Dihedral14LJCFForceWithAtomEnergy""" - validator.check_value_type('nb14_numbers', nb14_numbers, int, self.name) - validator.check_value_type('atom_numbers', atom_numbers, int, self.name) - self.dihedral_14_numbers = nb14_numbers - self.atom_numbers = atom_numbers - - self.init_prim_io_names( - inputs=['uint_crd_f', 'LJtype', 'charge', 'boxlength', 'a_14', 'b_14', 'lj_scale_factor', - 'cf_scale_factor', 'LJ_type_A', 'LJ_type_B'], - outputs=['frc', 'atom_energy', 'atom_virial']) - self.add_prim_attr('dihedral_14_numbers', self.dihedral_14_numbers) - self.add_prim_attr('atom_numbers', self.atom_numbers) - - def infer_shape(self, uint_crd_f_shape, ljtype_shape, charge_shape, boxlength_f_shape, a_14_shape, b_14_shape, - lj_scale_factor_shape, cf_scale_factor_shape, lj_type_a_shape, lj_type_b_shape): - cls_name = self.name - n = self.atom_numbers - m = self.dihedral_14_numbers - q = lj_type_a_shape[0] - validator.check_int(len(uint_crd_f_shape), 2, Rel.EQ, "uint_crd_f_dim", cls_name) - validator.check_int(len(ljtype_shape), 1, Rel.EQ, "LJtype_dim", cls_name) - validator.check_int(len(charge_shape), 1, Rel.EQ, "charge_dim", cls_name) - validator.check_int(len(boxlength_f_shape), 1, Rel.EQ, "boxlength_f_dim", cls_name) - validator.check_int(len(a_14_shape), 1, Rel.EQ, "a_14_dim", cls_name) - validator.check_int(len(b_14_shape), 1, Rel.EQ, "b_14_dim", cls_name) - validator.check_int(len(lj_scale_factor_shape), 1, Rel.EQ, "lj_scale_factor_dim", cls_name) - validator.check_int(len(cf_scale_factor_shape), 1, Rel.EQ, "cf_scale_factor_dim", cls_name) - validator.check_int(len(lj_type_a_shape), 1, Rel.EQ, "LJ_type_A_dim", cls_name) - validator.check_int(len(lj_type_b_shape), 1, Rel.EQ, "LJ_type_B_dim", cls_name) - - validator.check_int(uint_crd_f_shape[0], n, Rel.EQ, "uint_crd_f_shape[0]", cls_name) - validator.check_int(uint_crd_f_shape[1], 3, Rel.EQ, "uint_crd_f_shape[1]", cls_name) - validator.check_int(ljtype_shape[0], n, Rel.EQ, "LJtype_shape", cls_name) - validator.check_int(charge_shape[0], n, Rel.EQ, "charge_shape", cls_name) - validator.check_int(boxlength_f_shape[0], 3, Rel.EQ, "boxlength_f_shape", cls_name) - validator.check_int(lj_type_a_shape[0], q, Rel.EQ, "LJ_type_A_shape", cls_name) - validator.check_int(lj_type_b_shape[0], q, Rel.EQ, "LJ_type_B_shape", cls_name) - validator.check_int(a_14_shape[0], m, Rel.EQ, "a_14_shape", cls_name) - validator.check_int(b_14_shape[0], m, Rel.EQ, "b_14_shape", cls_name) - validator.check_int(lj_scale_factor_shape[0], m, Rel.EQ, "lj_scale_factor_shape", cls_name) - validator.check_int(cf_scale_factor_shape[0], m, Rel.EQ, "cf_scale_factor_shape", cls_name) - return [n, 3], [n,], [n,] - - def infer_dtype(self, uint_crd_f_dtype, ljtype_dtype, charge_dtype, boxlength_f_type, a_14_type, b_14_type, - lj_scale_factor_type, cf_scale_factor_type, lj_type_a_type, lj_type_b_type): - validator.check_tensor_dtype_valid('uint_crd_f', uint_crd_f_dtype, [mstype.uint32], self.name) - validator.check_tensor_dtype_valid('LJtype', ljtype_dtype, [mstype.int32], self.name) - validator.check_tensor_dtype_valid('charge', charge_dtype, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('boxlength_f', boxlength_f_type, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('a_14', a_14_type, [mstype.int32], self.name) - validator.check_tensor_dtype_valid('b_14', b_14_type, [mstype.int32], self.name) - validator.check_tensor_dtype_valid('lj_scale_factor', lj_scale_factor_type, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('cf_scale_factor', cf_scale_factor_type, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('LJ_type_A', lj_type_a_type, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('LJ_type_B', lj_type_b_type, [mstype.float32], self.name) - - return mstype.float32, mstype.float32, mstype.float32 - - -class PMEEnergyUpdate(PrimitiveWithInfer): - """ - Calculate the Coulumb energy of the system using PME method for pressure. - - Because there is a large amount of inputs and each of them are related, - there is no way to construct `Examples` using random methods. For details, refer the webpage `SPONGE in MindSpore - `_. - - .. warning:: - This is an experimental prototype that is subject to change and/or deletion. - - Args: - atom_numbers (int32): the number of atoms, n. - excluded_numbers (int32): the length of excluded list, E. - beta (float32): the PME beta parameter, determined by the - non-bond cutoff value and simulation precision tolerance. - fftx (int32): the number of points for Fourier transform in dimension X. - ffty (int32): the number of points for Fourier transform in dimension Y. - fftz (int32): the number of points for Fourier transform in dimension Z. - box_length_0 (float32): the value of boxlength idx 0. - box_length_1 (float32): the value of boxlength idx 1. - box_length_2 (float32): the value of boxlength idx 2. - max_neighbor_numbers (int32): the max neighbor numbers, m, default 800. - need_update (int32): if need_update = 1, calculate the pressure, default 0. - - Inputs: - - **uint_crd** (Tensor) - The unsigned int coordinates value of each atom. - The data type is uint32 and the shape is :math:`(n, 3)` - - **charge** (Tensor) - The charge carried by each atom. - The data type is float32 and the shape is :math:`(n,)` - - **nl_numbers** - (Tensor) - The each atom. - The data type is int32 and the shape is :math:`(n, 3)` - - **nl_serial** - (Tensor) - The neighbor list of each atom, the max number is 800. - The data type is int32 and the shape is :math:`(n, m)` - - **scaler** (Tensor) - The scale factor between real space - coordinates and its unsigned int value. The data type is float32 and the shape is :math:`(3,)` - - **excluded_list_start** (Tensor) - The start excluded index - in excluded list for each atom. The data type is int32 and the shape is :math:`(n,)` - - **excluded_list** (Tensor) - The contiguous join of excluded - list of each atom. E is the number of excluded atoms. The data type is int32 and the shape is :math:`(E,)` - - **excluded_atom_numbers** (Tensor) - The number of atom excluded - in excluded list for each atom. The data type is int32 and the shape is :math:`(n,)` - - **factor** (Tensor) - The factor parameter to be updated in pressure calculation. - The data type is float32 and the shape is :math:`(1,)` - - **beta** (Tensor) - The PME beta parameter to be updated in pressure calculation. - The data type is float32 and the shape is :math:`(1,)` - - Outputs: - - **reciprocal_ene** (Tensor) - The reciprocal term of PME energy. - The data type is float32 and the the shape is :math:`(1,)`. - - **self_ene** (Tensor) - The self term of PME energy. - The data type is float32 and the the shape is :math:`(1,)`. - - **direct_ene** (Tensor) - The direct term of PME energy. - The data type is float32 and the the shape is :math:`(1,)`. - - **correction_ene** (Tensor) - The correction term of PME energy. - The data type is float32 and the the shape is :math:`(1,)`. - - Supported Platforms: - ``GPU`` - """ - - @prim_attr_register - def __init__(self, atom_numbers, excluded_numbers, beta, fftx, ffty, fftz, box_length_0, box_length_1, - box_length_2, max_neighbor_numbers=800, need_update=0): - """Initialize PMEEnergyUpdate""" - validator.check_value_type('atom_numbers', atom_numbers, int, self.name) - validator.check_value_type('excluded_numbers', excluded_numbers, int, self.name) - validator.check_value_type('beta', beta, float, self.name) - validator.check_value_type('fftx', fftx, int, self.name) - validator.check_value_type('ffty', ffty, int, self.name) - validator.check_value_type('fftz', fftz, int, self.name) - validator.check_value_type('box_length_0', box_length_0, float, self.name) - validator.check_value_type('box_length_1', box_length_1, float, self.name) - validator.check_value_type('box_length_2', box_length_2, float, self.name) - validator.check_value_type('max_neighbor_numbers', max_neighbor_numbers, int, self.name) - validator.check_value_type('need_update', need_update, int, self.name) - self.atom_numbers = atom_numbers - self.excluded_numbers = excluded_numbers - self.beta = beta - self.fftx = fftx - self.ffty = ffty - self.fftz = fftz - self.box_length_0 = box_length_0 - self.box_length_1 = box_length_1 - self.box_length_2 = box_length_2 - self.max_neighbor_numbers = max_neighbor_numbers - self.need_update = need_update - self.init_prim_io_names( - inputs=['uint_crd', 'charge', 'nl_numbers', 'nl_serial', 'scaler', 'excluded_list_start', - 'excluded_list', 'excluded_atom_numbers', 'factor', 'beta'], - outputs=['reciprocal_ene', 'self_ene', 'direct_ene', 'correction_ene']) - self.add_prim_attr('atom_numbers', self.atom_numbers) - self.add_prim_attr('excluded_numbers', self.excluded_numbers) - self.add_prim_attr('beta', self.beta) - self.add_prim_attr('fftx', self.fftx) - self.add_prim_attr('ffty', self.ffty) - self.add_prim_attr('fftz', self.fftz) - self.add_prim_attr('box_length_0', self.box_length_0) - self.add_prim_attr('box_length_1', self.box_length_1) - self.add_prim_attr('box_length_2', self.box_length_2) - self.add_prim_attr('max_neighbor_numbers', self.max_neighbor_numbers) - self.add_prim_attr('need_update', self.need_update) - - def infer_shape(self, uint_crd, charge, nl_numbers, nl_serial, scaler, excluded_list_start, - excluded_list, excluded_atom_numbers, factor, beta): - cls_name = self.name - n = self.atom_numbers - m = self.max_neighbor_numbers - e = self.excluded_numbers - validator.check_int(len(uint_crd), 2, Rel.EQ, "uint_crd_dim", cls_name) - validator.check_int(len(charge), 1, Rel.EQ, "charge_dim", cls_name) - validator.check_int(len(nl_numbers), 1, Rel.EQ, "nl_numbers_dim", cls_name) - validator.check_int(len(nl_serial), 2, Rel.EQ, "nl_serial_dim", cls_name) - validator.check_int(len(excluded_list_start), 1, Rel.EQ, "excluded_list_start_dim", cls_name) - validator.check_int(len(excluded_atom_numbers), 1, Rel.EQ, "excluded_atom_numbers_dim", cls_name) - validator.check_int(len(excluded_list), 1, Rel.EQ, "excluded_list_dim", cls_name) - validator.check_int(len(scaler), 1, Rel.EQ, "scaler_dim", cls_name) - validator.check_int(len(factor), 1, Rel.EQ, "factor_dim", cls_name) - validator.check_int(len(beta), 1, Rel.EQ, "beta_dim", cls_name) - validator.check_int(uint_crd[0], n, Rel.EQ, "uint_crd_shape[0]", cls_name) - validator.check_int(uint_crd[1], 3, Rel.EQ, "uint_crd_shape[1]", cls_name) - validator.check_int(charge[0], n, Rel.EQ, "charge_shape", cls_name) - validator.check_int(nl_numbers[0], n, Rel.EQ, "nl_numbers_shape[0]", cls_name) - validator.check_int(nl_serial[0], n, Rel.EQ, "nl_serial_shape[0]", cls_name) - validator.check_int(nl_serial[1], m, Rel.EQ, "nl_serial_shape[1]", cls_name) - validator.check_int(excluded_list_start[0], n, Rel.EQ, "excluded_list_start_shape", cls_name) - validator.check_int(excluded_atom_numbers[0], n, Rel.EQ, "excluded_atom_numbers_shape", cls_name) - validator.check_int(excluded_list[0], e, Rel.EQ, "excluded_list_shape", cls_name) - validator.check_int(factor[0], 1, Rel.EQ, "factor_shape", cls_name) - validator.check_int(beta[0], 1, Rel.EQ, "beta_shape", cls_name) - validator.check_int(scaler[0], 3, Rel.EQ, "scaler_shape", cls_name) - return [1,], [1,], [1,], [1,] - - def infer_dtype(self, uint_crd, charge, nl_numbers, nl_serial, scaler, excluded_list_start, - excluded_list, excluded_atom_numbers, factor, beta): - validator.check_tensor_dtype_valid('uint_crd', uint_crd, [mstype.uint32], self.name) - validator.check_tensor_dtype_valid('charge', charge, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('nl_numbers', nl_numbers, [mstype.int32], self.name) - validator.check_tensor_dtype_valid('nl_serial', nl_serial, [mstype.int32], self.name) - validator.check_tensor_dtype_valid('scaler', scaler, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('excluded_list_start', excluded_list_start, [mstype.int32], - self.name) - validator.check_tensor_dtype_valid('excluded_list', excluded_list, [mstype.int32], - self.name) - validator.check_tensor_dtype_valid('excluded_atom_numbers', excluded_atom_numbers, [mstype.int32], - self.name) - validator.check_tensor_dtype_valid('factor', factor, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('beta', beta, [mstype.float32], self.name) - return charge, charge, charge, charge - - -class ConstrainForceCycle(PrimitiveWithInfer): - """ - Calculate the constraint force in each iteration. - - .. warning:: - This is an experimental prototype that is subject to change and/or deletion. - - Args: - atom_numbers (int32): the number of atoms n. - constrain_pair_numbers (int32): the number of constrain pairs m. - - Inputs: - - **uint_crd** (Tensor) - The unsigned int coordinate value of each atom. - The data type is uint32 and the shape is :math:`(n, 3)`. - - **scaler** (Tensor) - The 3-D scale factor (x, y, z), - The data type is float32 and the shape is :math:`(3,)`. - - **pair_dr** (Tensor) - The displacement vector of each constrained atom pair. - The data type is float32 and the shape is :math:`(m, 3)`. - - **atom_i_serials** (Tensor) - The first atom index of each constrained atom pair. - The data type is int32 and the shape is :math:`(m,)`. - - **atom_j_serials** (Tensor) - The second atom index of each constrained atom pair. - The data type is int32 and the shape is :math:`(m,)`. - - **constant_rs** (Tensor) - The constrained distance of each constrained atom pair. - The data type is float32 and the shape is :math:`(m,)`. - - **constrain_ks** (Tensor) - The coefficient of each constrained atom pair. - The data type is float32 and the shape is :math:`(m,)`. - - Outputs: - - **test_frc** (Tensor) - The constraint force. - The data type is float32 and the shape is :math:`(n, 3)`. - - Supported Platforms: - ``GPU`` - """ - - @prim_attr_register - def __init__(self, atom_numbers, constrain_pair_numbers): - validator.check_value_type('atom_numbers', atom_numbers, int, self.name) - validator.check_value_type('constrain_pair_numbers', constrain_pair_numbers, int, self.name) - self.atom_numbers = atom_numbers - self.constrain_pair_numbers = constrain_pair_numbers - self.add_prim_attr('atom_numbers', self.atom_numbers) - self.add_prim_attr('constrain_pair_numbers', self.constrain_pair_numbers) - self.init_prim_io_names( - inputs=['uint_crd', 'scaler', 'pair_dr', 'atom_i_serials', 'atom_j_serials', - 'constant_rs', 'constrain_ks'], - outputs=['test_frc']) - - def infer_shape(self, uint_crd_shape, scaler_shape, pair_dr_shape, atom_i_serials_shape, - atom_j_serials_shape, constant_rs_shape, constrain_ks_shape): - cls_name = self.name - n = self.atom_numbers - m = self.constrain_pair_numbers - validator.check_int(len(uint_crd_shape), 2, Rel.EQ, "uint_crd_dim", cls_name) - validator.check_int(len(scaler_shape), 1, Rel.EQ, "scaler_dim", cls_name) - validator.check_int(len(pair_dr_shape), 2, Rel.EQ, "pair_dr_dim", cls_name) - validator.check_int(len(atom_i_serials_shape), 1, Rel.EQ, "atom_i_serials_dim", cls_name) - validator.check_int(len(atom_j_serials_shape), 1, Rel.EQ, "atom_j_serials_dim", cls_name) - validator.check_int(len(constant_rs_shape), 1, Rel.EQ, "constant_rs_dim", cls_name) - validator.check_int(len(constrain_ks_shape), 1, Rel.EQ, "constrain_ks_dim", cls_name) - - validator.check_int(uint_crd_shape[0], n, Rel.EQ, "uint_crd_shape[0]", cls_name) - validator.check_int(uint_crd_shape[1], 3, Rel.EQ, "uint_crd_shape[1]", cls_name) - validator.check_int(scaler_shape[0], 3, Rel.EQ, "scaler_shape", cls_name) - validator.check_int(pair_dr_shape[0], m, Rel.EQ, "pair_dr_shape[0]", cls_name) - validator.check_int(pair_dr_shape[1], 3, Rel.EQ, "pair_dr_shape[1]", cls_name) - validator.check_int(atom_i_serials_shape[0], m, Rel.EQ, "atom_i_serials_shape[0]", cls_name) - validator.check_int(atom_j_serials_shape[0], m, Rel.EQ, "atom_j_serials_shape[0]", cls_name) - validator.check_int(constant_rs_shape[0], m, Rel.EQ, "constant_rs_shape[0]", cls_name) - validator.check_int(constrain_ks_shape[0], m, Rel.EQ, "constrain_ks_shape[0]", cls_name) - return [n, 3] - - def infer_dtype(self, uint_crd_dtype, scaler_dtype, pair_dr_dtype, atom_i_serials_dtype, - atom_j_serials_dtype, constant_rs_dtype, constrain_ks_dtype): - validator.check_tensor_dtype_valid('uint_crd', uint_crd_dtype, [mstype.uint32], self.name) - validator.check_tensor_dtype_valid('scaler', scaler_dtype, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('pair_dr', pair_dr_dtype, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('atom_i_serials', atom_i_serials_dtype, [mstype.int32], self.name) - validator.check_tensor_dtype_valid('atom_j_serials', atom_j_serials_dtype, [mstype.int32], self.name) - validator.check_tensor_dtype_valid('constant_rs', constant_rs_dtype, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('constrain_ks', constrain_ks_dtype, [mstype.float32], self.name) - return mstype.float32 - - -class ConstrainForceVirial(PrimitiveWithInfer): - """ - Calculate the constraint force and virial in a step with iteration numbers. - - .. warning:: - This is an experimental prototype that is subject to change and/or deletion. - - Args: - atom_numbers (int32): the number of atoms n. - constrain_pair_numbers (int32): the number of constrain pairs m. - iteration_numbers (int32): the number of iteration numbers p. - half_exp_gamma_plus_half (float32): half exp_gamma plus half q. - - Inputs: - - **crd** (Tensor) - The coordinate of each atom. - The data type is float32 and the shape is :math:`(n, 3)`. - - **quarter_cof** (Tensor) - The 3-D scale factor. - The data type is float32 and the shape is :math:`(3,)`. - - **mass_inverse** (Tensor) - The inverse value of mass of each atom. - The data type is float32 and the shape is :math:`(n,)`. - - **scaler** (Tensor) - The 3-D scale factor (x, y, z), - The data type is float32 and the shape is :math:`(3,)`. - - **pair_dr** (Tensor) - The displacement vector of each constrained atom pair. - The data type is float32 and the shape is :math:`(m, 3)`. - - **atom_i_serials** (Tensor) - The first atom index of each constrained atom pair. - The data type is int32 and the shape is :math:`(m,)`. - - **atom_j_serials** (Tensor) - The second atom index of each constrained atom pair. - The data type is int32 and the shape is :math:`(m,)`. - - **constant_rs** (Tensor) - The constrained distance of each constrained atom pair. - The data type is float32 and the shape is :math:`(m,)`. - - **constrain_ks** (Tensor) - The coefficient of each constrained atom pair. - The data type is float32 and the shape is :math:`(m,)`. - - Outputs: - - **uint_crd** (Tensor) - The unsigned int coordinate value of each atom. - The data type is uint32 and the shape is :math:`(n, 3)`. - - **frc** (Tensor) - The force felt by each atom. - The data type is float32 and the shape is :math:`(n, 3)`. - - **virial** (Tensor) - The constraint virial on each atom. - The data type is float32 and the shape is :math:`(m,)`. - - Supported Platforms: - ``GPU`` - """ - - @prim_attr_register - def __init__(self, atom_numbers, constrain_pair_numbers, iteration_numbers, half_exp_gamma_plus_half): - validator.check_value_type('atom_numbers', atom_numbers, int, self.name) - validator.check_value_type('constrain_pair_numbers', constrain_pair_numbers, int, self.name) - validator.check_value_type('iteration_numbers', iteration_numbers, int, self.name) - validator.check_value_type('half_exp_gamma_plus_half', half_exp_gamma_plus_half, float, self.name) - self.atom_numbers = atom_numbers - self.constrain_pair_numbers = constrain_pair_numbers - self.iteration_numbers = iteration_numbers - self.half_exp_gamma_plus_half = half_exp_gamma_plus_half - self.add_prim_attr('atom_numbers', self.atom_numbers) - self.add_prim_attr('constrain_pair_numbers', self.constrain_pair_numbers) - self.add_prim_attr('iteration_numbers', self.iteration_numbers) - self.add_prim_attr('half_exp_gamma_plus_half', self.half_exp_gamma_plus_half) - - self.init_prim_io_names( - inputs=['crd', 'quarter_cof', 'mass_inverse', - 'scaler', 'pair_dr', 'atom_i_serials', 'atom_j_serials', - 'constant_rs', 'constrain_ks'], - outputs=['uint_crd', 'frc', 'virial']) - - def infer_shape(self, crd, quarter_cof, mass_inverse, scaler_shape, pair_dr_shape, atom_i_serials_shape, - atom_j_serials_shape, constant_rs_shape, constrain_ks_shape): - cls_name = self.name - n = self.atom_numbers - m = self.constrain_pair_numbers - validator.check_int(len(crd), 2, Rel.EQ, "crd_dim", cls_name) - validator.check_int(len(quarter_cof), 1, Rel.EQ, "quarter_cof_dim", cls_name) - validator.check_int(len(mass_inverse), 1, Rel.EQ, "mass_inverse_dim", cls_name) - validator.check_int(len(scaler_shape), 1, Rel.EQ, "scaler_dim", cls_name) - validator.check_int(len(pair_dr_shape), 2, Rel.EQ, "pair_dr_dim", cls_name) - validator.check_int(len(atom_i_serials_shape), 1, Rel.EQ, "atom_i_serials_dim", cls_name) - validator.check_int(len(atom_j_serials_shape), 1, Rel.EQ, "atom_j_serials_dim", cls_name) - validator.check_int(len(constant_rs_shape), 1, Rel.EQ, "constant_rs_dim", cls_name) - validator.check_int(len(constrain_ks_shape), 1, Rel.EQ, "constrain_ks_dim", cls_name) - validator.check_int(crd[0], n, Rel.EQ, "crd_shape[0]", cls_name) - validator.check_int(crd[1], 3, Rel.EQ, "crd_shape[1]", cls_name) - validator.check_int(quarter_cof[0], 3, Rel.EQ, "quarter_cof_shape", cls_name) - validator.check_int(mass_inverse[0], n, Rel.EQ, "mass_inverse_shape", cls_name) - validator.check_int(scaler_shape[0], 3, Rel.EQ, "scaler_shape", cls_name) - validator.check_int(pair_dr_shape[0], m, Rel.EQ, "pair_dr_shape[0]", cls_name) - validator.check_int(pair_dr_shape[1], 3, Rel.EQ, "pair_dr_shape[1]", cls_name) - validator.check_int(atom_i_serials_shape[0], m, Rel.EQ, "atom_i_serials_shape[0]", cls_name) - validator.check_int(atom_j_serials_shape[0], m, Rel.EQ, "atom_j_serials_shape[0]", cls_name) - validator.check_int(constant_rs_shape[0], m, Rel.EQ, "constant_rs_shape[0]", cls_name) - validator.check_int(constrain_ks_shape[0], m, Rel.EQ, "constrain_ks_shape[0]", cls_name) - return [n, 3], [n, 3], [m,] - - def infer_dtype(self, crd, quarter_cof, mass_inverse, scaler_dtype, pair_dr_dtype, atom_i_serials_dtype, - atom_j_serials_dtype, constant_rs_dtype, constrain_ks_dtype): - validator.check_tensor_dtype_valid('crd', crd, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('quarter_cof', quarter_cof, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('mass_inverse', mass_inverse, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('scaler', scaler_dtype, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('pair_dr', pair_dr_dtype, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('atom_i_serials', atom_i_serials_dtype, [mstype.int32], self.name) - validator.check_tensor_dtype_valid('atom_j_serials', atom_j_serials_dtype, [mstype.int32], self.name) - validator.check_tensor_dtype_valid('constant_rs', constant_rs_dtype, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('constrain_ks', constrain_ks_dtype, [mstype.float32], self.name) - return mstype.uint32, mstype.float32, mstype.float32 - - -class ConstrainForce(PrimitiveWithInfer): - """ - Calculate the constraint force in a step with iteration numbers. - - .. warning:: - This is an experimental prototype that is subject to change and/or deletion. - - Args: - atom_numbers (int32): the number of atoms n. - constrain_pair_numbers (int32): the number of constrain pairs m. - iteration_numbers (int32): the number of iteration numbers p. - half_exp_gamma_plus_half (float32): half exp_gamma plus half q. - - Inputs: - - **crd** (Tensor) - The coordinate of each atom. - The data type is float32 and the shape is :math:`(n, 3)`. - - **quarter_cof** (Tensor) - The 3-D scale factor. - The data type is float32 and the shape is :math:`(3,)`. - - **mass_inverse** (Tensor) - The inverse value of mass of each atom. - The data type is float32 and the shape is :math:`(n,)`. - - **scaler** (Tensor) - The 3-D scale factor (x, y, z), - The data type is float32 and the shape is :math:`(3,)`. - - **pair_dr** (Tensor) - The displacement vector of each constrained atom pair. - The data type is float32 and the shape is :math:`(m, 3)`. - - **atom_i_serials** (Tensor) - The first atom index of each constrained atom pair. - The data type is int32 and the shape is :math:`(m,)`. - - **atom_j_serials** (Tensor) - The second atom index of each constrained atom pair. - The data type is int32 and the shape is :math:`(m,)`. - - **constant_rs** (Tensor) - The constrained distance of each constrained atom pair. - The data type is float32 and the shape is :math:`(m,)`. - - **constrain_ks** (Tensor) - The coefficient of each constrained atom pair. - The data type is float32 and the shape is :math:`(m,)`. - - Outputs: - - **uint_crd** (Tensor) - The unsigned int coordinate value of each atom. - The data type is uint32 and the shape is :math:`(n, 3)`. - - **frc** (Tensor) - The constraint force on each atom. - The data type is float32 and the shape is :math:`(n, 3)`. - - **virial** (Tensor) - The constraint virial on each atom and it is zero. - The data type is float32 and the shape is :math:`(m,)`. - - Supported Platforms: - ``GPU`` - """ - - @prim_attr_register - def __init__(self, atom_numbers, constrain_pair_numbers, iteration_numbers, half_exp_gamma_plus_half): - validator.check_value_type('atom_numbers', atom_numbers, int, self.name) - validator.check_value_type('constrain_pair_numbers', constrain_pair_numbers, int, self.name) - validator.check_value_type('iteration_numbers', iteration_numbers, int, self.name) - validator.check_value_type('half_exp_gamma_plus_half', half_exp_gamma_plus_half, float, self.name) - self.atom_numbers = atom_numbers - self.constrain_pair_numbers = constrain_pair_numbers - self.iteration_numbers = iteration_numbers - self.half_exp_gamma_plus_half = half_exp_gamma_plus_half - self.add_prim_attr('atom_numbers', self.atom_numbers) - self.add_prim_attr('constrain_pair_numbers', self.constrain_pair_numbers) - self.add_prim_attr('iteration_numbers', self.iteration_numbers) - self.add_prim_attr('half_exp_gamma_plus_half', self.half_exp_gamma_plus_half) - - self.init_prim_io_names( - inputs=['crd', 'quarter_cof', 'mass_inverse', - 'scaler', 'pair_dr', 'atom_i_serials', 'atom_j_serials', 'constant_rs', 'constrain_ks'], - outputs=['uint_crd', 'frc', 'virial']) - - def infer_shape(self, crd, quarter_cof, mass_inverse, scaler_shape, pair_dr_shape, atom_i_serials_shape, - atom_j_serials_shape, constant_rs_shape, constrain_ks_shape): - cls_name = self.name - n = self.atom_numbers - m = self.constrain_pair_numbers - validator.check_int(len(crd), 2, Rel.EQ, "crd_dim", cls_name) - validator.check_int(len(quarter_cof), 1, Rel.EQ, "quarter_cof_dim", cls_name) - validator.check_int(len(mass_inverse), 1, Rel.EQ, "mass_inverse_dim", cls_name) - validator.check_int(len(scaler_shape), 1, Rel.EQ, "scaler_dim", cls_name) - validator.check_int(len(pair_dr_shape), 2, Rel.EQ, "pair_dr_dim", cls_name) - validator.check_int(len(atom_i_serials_shape), 1, Rel.EQ, "atom_i_serials_dim", cls_name) - validator.check_int(len(atom_j_serials_shape), 1, Rel.EQ, "atom_j_serials_dim", cls_name) - validator.check_int(len(constant_rs_shape), 1, Rel.EQ, "constant_rs_dim", cls_name) - validator.check_int(len(constrain_ks_shape), 1, Rel.EQ, "constrain_ks_dim", cls_name) - validator.check_int(crd[0], n, Rel.EQ, "crd_shape[0]", cls_name) - validator.check_int(crd[1], 3, Rel.EQ, "crd_shape[1]", cls_name) - validator.check_int(quarter_cof[0], 3, Rel.EQ, "quarter_cof_shape", cls_name) - validator.check_int(mass_inverse[0], n, Rel.EQ, "mass_inverse_shape", cls_name) - validator.check_int(scaler_shape[0], 3, Rel.EQ, "scaler_shape", cls_name) - validator.check_int(pair_dr_shape[0], m, Rel.EQ, "pair_dr_shape[0]", cls_name) - validator.check_int(pair_dr_shape[1], 3, Rel.EQ, "pair_dr_shape[1]", cls_name) - validator.check_int(atom_i_serials_shape[0], m, Rel.EQ, "atom_i_serials_shape[0]", cls_name) - validator.check_int(atom_j_serials_shape[0], m, Rel.EQ, "atom_j_serials_shape[0]", cls_name) - validator.check_int(constant_rs_shape[0], m, Rel.EQ, "constant_rs_shape[0]", cls_name) - validator.check_int(constrain_ks_shape[0], m, Rel.EQ, "constrain_ks_shape[0]", cls_name) - return [n, 3], [n, 3], [m,] - - def infer_dtype(self, crd, quarter_cof, mass_inverse, scaler_dtype, pair_dr_dtype, atom_i_serials_dtype, - atom_j_serials_dtype, constant_rs_dtype, constrain_ks_dtype): - validator.check_tensor_dtype_valid('crd', crd, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('quarter_cof', quarter_cof, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('mass_inverse', mass_inverse, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('scaler', scaler_dtype, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('pair_dr', pair_dr_dtype, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('atom_i_serials', atom_i_serials_dtype, [mstype.int32], self.name) - validator.check_tensor_dtype_valid('atom_j_serials', atom_j_serials_dtype, [mstype.int32], self.name) - validator.check_tensor_dtype_valid('constant_rs', constant_rs_dtype, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('constrain_ks', constrain_ks_dtype, [mstype.float32], self.name) - return mstype.uint32, mstype.float32, mstype.float32 - - -class Constrain(PrimitiveWithInfer): - """ - Calculate the constraint force and virial depends on pressure calculation. - - .. warning:: - This is an experimental prototype that is subject to change and/or deletion. - - Args: - atom_numbers (int32): the number of atoms n. - constrain_pair_numbers (int32): the number of constrain pairs m. - iteration_numbers (int32): the number of iteration numbers p. - half_exp_gamma_plus_half (float32): half exp_gamma plus half q. - update_interval (int32): the number of update interval, default 10. - - Inputs: - - **crd** (Tensor) - The coordinate of each atom. - The data type is float32 and the shape is :math:`(n, 3)`. - - **quarter_cof** (Tensor) - The 3-D scale factor. - The data type is float32 and the shape is :math:`(3,)`. - - **mass_inverse** (Tensor) - The inverse value of mass of each atom. - The data type is float32 and the shape is :math:`(n,)`. - - **scaler** (Tensor) - The 3-D scale factor (x, y, z), - The data type is float32 and the shape is :math:`(3,)`. - - **pair_dr** (Tensor) - The displacement vector of each constrained atom pair. - The data type is float32 and the shape is :math:`(m, 3)`. - - **atom_i_serials** (Tensor) - The first atom index of each constrained atom pair. - The data type is int32 and the shape is :math:`(m,)`. - - **atom_j_serials** (Tensor) - The second atom index of each constrained atom pair. - The data type is int32 and the shape is :math:`(m,)`. - - **constant_rs** (Tensor) - The constrained distance of each constrained atom pair. - The data type is float32 and the shape is :math:`(m,)`. - - **constrain_ks** (Tensor) - The coefficient of each constrained atom pair. - The data type is float32 and the shape is :math:`(m,)`. - - **need_pressure** (Tensor) - If need pressure, 1 else 0. - The data type is int32 and the shape is :math:`(1,)` or :math:`()`. - - Outputs: - - **uint_crd** (Tensor) - The unsigned int coordinate value of each atom. - The data type is uint32 and the shape is :math:`(n, 3)`. - - **frc** (Tensor) - The constraint force on each atom. - The data type is float32 and the shape is :math:`(n, 3)`. - - **virial** (Tensor) - The constraint virial on each atom. - The data type is float32 and the shape is :math:`(m,)`. - - Supported Platforms: - ``GPU`` - """ - - @prim_attr_register - def __init__(self, atom_numbers, constrain_pair_numbers, iteration_numbers, half_exp_gamma_plus_half, - update_interval=10): - validator.check_value_type('atom_numbers', atom_numbers, int, self.name) - validator.check_value_type('constrain_pair_numbers', constrain_pair_numbers, int, self.name) - validator.check_value_type('iteration_numbers', iteration_numbers, int, self.name) - validator.check_value_type('half_exp_gamma_plus_half', half_exp_gamma_plus_half, float, self.name) - validator.check_value_type('update_interval', update_interval, int, self.name) - self.atom_numbers = atom_numbers - self.constrain_pair_numbers = constrain_pair_numbers - self.iteration_numbers = iteration_numbers - self.half_exp_gamma_plus_half = half_exp_gamma_plus_half - self.update_interval = update_interval - self.add_prim_attr('atom_numbers', self.atom_numbers) - self.add_prim_attr('constrain_pair_numbers', self.constrain_pair_numbers) - self.add_prim_attr('iteration_numbers', self.iteration_numbers) - self.add_prim_attr('half_exp_gamma_plus_half', self.half_exp_gamma_plus_half) - self.add_prim_attr('update_interval', self.update_interval) - - self.init_prim_io_names( - inputs=['crd', 'quarter_cof', 'mass_inverse', - 'scaler', 'pair_dr', 'atom_i_serials', 'atom_j_serials', - 'constant_rs', 'constrain_ks', 'need_pressure'], - outputs=['uint_crd', 'frc', 'virial']) - - def infer_shape(self, crd, quarter_cof, mass_inverse, scaler_shape, pair_dr_shape, atom_i_serials_shape, - atom_j_serials_shape, constant_rs_shape, constrain_ks_shape, need_pressure): - cls_name = self.name - n = self.atom_numbers - m = self.constrain_pair_numbers - validator.check_int(len(crd), 2, Rel.EQ, "crd_dim", cls_name) - validator.check_int(len(quarter_cof), 1, Rel.EQ, "quarter_cof_dim", cls_name) - validator.check_int(len(mass_inverse), 1, Rel.EQ, "mass_inverse_dim", cls_name) - validator.check_int(len(scaler_shape), 1, Rel.EQ, "scaler_dim", cls_name) - validator.check_int(len(pair_dr_shape), 2, Rel.EQ, "pair_dr_dim", cls_name) - validator.check_int(len(atom_i_serials_shape), 1, Rel.EQ, "atom_i_serials_dim", cls_name) - validator.check_int(len(atom_j_serials_shape), 1, Rel.EQ, "atom_j_serials_dim", cls_name) - validator.check_int(len(constant_rs_shape), 1, Rel.EQ, "constant_rs_dim", cls_name) - validator.check_int(len(constrain_ks_shape), 1, Rel.EQ, "constrain_ks_dim", cls_name) - validator.check_int(len(need_pressure), 1, Rel.LE, "need_pressure_dim", cls_name) - validator.check_int(crd[0], n, Rel.EQ, "crd_shape[0]", cls_name) - validator.check_int(crd[1], 3, Rel.EQ, "crd_shape[1]", cls_name) - validator.check_int(quarter_cof[0], 3, Rel.EQ, "quarter_cof_shape", cls_name) - validator.check_int(mass_inverse[0], n, Rel.EQ, "mass_inverse_shape", cls_name) - validator.check_int(scaler_shape[0], 3, Rel.EQ, "scaler_shape", cls_name) - validator.check_int(pair_dr_shape[0], m, Rel.EQ, "pair_dr_shape[0]", cls_name) - validator.check_int(pair_dr_shape[1], 3, Rel.EQ, "pair_dr_shape[1]", cls_name) - validator.check_int(atom_i_serials_shape[0], m, Rel.EQ, "atom_i_serials_shape[0]", cls_name) - validator.check_int(atom_j_serials_shape[0], m, Rel.EQ, "atom_j_serials_shape[0]", cls_name) - validator.check_int(constant_rs_shape[0], m, Rel.EQ, "constant_rs_shape[0]", cls_name) - validator.check_int(constrain_ks_shape[0], m, Rel.EQ, "constrain_ks_shape[0]", cls_name) - if need_pressure: - validator.check_int(need_pressure[0], 1, Rel.EQ, "need_pressure_shape", self.name) - return [n, 3], [n, 3], [m,] - - def infer_dtype(self, crd, quarter_cof, mass_inverse, scaler_dtype, pair_dr_dtype, atom_i_serials_dtype, - atom_j_serials_dtype, constant_rs_dtype, constrain_ks_dtype, need_pressure): - validator.check_tensor_dtype_valid('crd', crd, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('quarter_cof', quarter_cof, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('mass_inverse', mass_inverse, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('scaler', scaler_dtype, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('pair_dr', pair_dr_dtype, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('atom_i_serials', atom_i_serials_dtype, [mstype.int32], self.name) - validator.check_tensor_dtype_valid('atom_j_serials', atom_j_serials_dtype, [mstype.int32], self.name) - validator.check_tensor_dtype_valid('constant_rs', constant_rs_dtype, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('constrain_ks', constrain_ks_dtype, [mstype.float32], self.name) - validator.check_tensor_dtype_valid('need_pressure', need_pressure, [mstype.int32], self.name) - return mstype.uint32, mstype.float32, mstype.float32 diff --git a/tests/st/sponge/__init__.py b/tests/st/sponge/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/tests/st/sponge/data/NVT_290_10ns.in b/tests/st/sponge/data/NVT_290_10ns.in deleted file mode 100644 index 54f9ab22f84..00000000000 --- a/tests/st/sponge/data/NVT_290_10ns.in +++ /dev/null @@ -1,15 +0,0 @@ -290K 200ns - - mode = 1, - dt= 0.001, - step_limit = 1000, - langevin_gamma=1.0, - target_temperature=290, - thermostat=1, - write_information_interval=100, - amber_irest=0, - cut=10.0, - - - - diff --git a/tests/st/sponge/data/WATER_ALA.parm7 b/tests/st/sponge/data/WATER_ALA.parm7 deleted file mode 100644 index 46842e27559..00000000000 --- a/tests/st/sponge/data/WATER_ALA.parm7 +++ /dev/null @@ -1,4764 +0,0 @@ -%VERSION VERSION_STAMP = V0001.000 DATE = 04/15/21 15:06:14 -%FLAG TITLE -%FORMAT(20a4) -ALA -%FLAG POINTERS -%FORMAT(10I8) - 2298 9 2283 14 32 18 57 44 0 0 - 3169 759 14 18 44 10 14 20 10 1 - 0 0 0 0 0 0 0 1 10 0 - 0 -%FLAG ATOM_NAME -%FORMAT(20a4) -N H CA HA CB HB1 HB2 HB3 C O N H CA HA CB HB1 HB2 HB3 C O -N H CA HA CB HB1 HB2 HB3 C O O H1 H2 O H1 H2 O H1 H2 O -H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 -O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 -H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O -H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 -O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 -H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O -H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 -O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 -H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O -H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 -O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 -H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O -H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 -O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 -H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O -H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 -O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 -H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O -H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 -O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 -H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O -H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 -O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 -H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O -H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 -O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 -H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O -H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 -O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 -H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O -H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 -O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 -H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O -H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 -O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 -H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O -H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 -O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 -H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O -H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 -O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 -H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O -H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 -O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 -H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O -H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 -O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 -H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O -H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 -O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 -H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O -H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 -O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 -H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O -H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 -O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 -H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O -H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 -O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 -H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O -H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 -O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 -H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O -H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 -O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 -H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O -H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 -O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 -H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O -H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 -O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 -H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O -H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 -O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 -H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O -H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 -O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 -H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O -H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 -O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 -H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O -H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 -O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 -H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O -H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 -O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 -H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O -H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 -O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 -H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O -H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 -O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 -H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O -H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 -O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 -H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O -H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 -O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 -H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O -H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 -O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 -H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O -H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 -O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 -H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O -H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 -O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 -H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O -H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 -O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 -H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O -H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 -O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 O H1 H2 -%FLAG CHARGE -%FORMAT(5E16.8) - -7.57501011E+00 4.95464337E+00 6.14091510E-01 1.49969529E+00 -3.32556975E+00 - 1.09880469E+00 1.09880469E+00 1.09880469E+00 1.08841798E+01 -1.03484442E+01 - -7.57501011E+00 4.95464337E+00 6.14091510E-01 1.49969529E+00 -3.32556975E+00 - 1.09880469E+00 1.09880469E+00 1.09880469E+00 1.08841798E+01 -1.03484442E+01 - -7.57501011E+00 4.95464337E+00 6.14091510E-01 1.49969529E+00 -3.32556975E+00 - 1.09880469E+00 1.09880469E+00 1.09880469E+00 1.08841798E+01 -1.03484442E+01 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 - 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 -1.51973982E+01 - 7.59869910E+00 7.59869910E+00 -1.51973982E+01 7.59869910E+00 7.59869910E+00 - -1.51973982E+01 7.59869910E+00 7.59869910E+00 -%FLAG ATOMIC_NUMBER -%FORMAT(10I8) - 7 1 6 1 6 1 1 1 6 8 - 7 1 6 1 6 1 1 1 6 8 - 7 1 6 1 6 1 1 1 6 8 - 8 1 1 8 1 1 8 1 1 8 - 1 1 8 1 1 8 1 1 8 1 - 1 8 1 1 8 1 1 8 1 1 - 8 1 1 8 1 1 8 1 1 8 - 1 1 8 1 1 8 1 1 8 1 - 1 8 1 1 8 1 1 8 1 1 - 8 1 1 8 1 1 8 1 1 8 - 1 1 8 1 1 8 1 1 8 1 - 1 8 1 1 8 1 1 8 1 1 - 8 1 1 8 1 1 8 1 1 8 - 1 1 8 1 1 8 1 1 8 1 - 1 8 1 1 8 1 1 8 1 1 - 8 1 1 8 1 1 8 1 1 8 - 1 1 8 1 1 8 1 1 8 1 - 1 8 1 1 8 1 1 8 1 1 - 8 1 1 8 1 1 8 1 1 8 - 1 1 8 1 1 8 1 1 8 1 - 1 8 1 1 8 1 1 8 1 1 - 8 1 1 8 1 1 8 1 1 8 - 1 1 8 1 1 8 1 1 8 1 - 1 8 1 1 8 1 1 8 1 1 - 8 1 1 8 1 1 8 1 1 8 - 1 1 8 1 1 8 1 1 8 1 - 1 8 1 1 8 1 1 8 1 1 - 8 1 1 8 1 1 8 1 1 8 - 1 1 8 1 1 8 1 1 8 1 - 1 8 1 1 8 1 1 8 1 1 - 8 1 1 8 1 1 8 1 1 8 - 1 1 8 1 1 8 1 1 8 1 - 1 8 1 1 8 1 1 8 1 1 - 8 1 1 8 1 1 8 1 1 8 - 1 1 8 1 1 8 1 1 8 1 - 1 8 1 1 8 1 1 8 1 1 - 8 1 1 8 1 1 8 1 1 8 - 1 1 8 1 1 8 1 1 8 1 - 1 8 1 1 8 1 1 8 1 1 - 8 1 1 8 1 1 8 1 1 8 - 1 1 8 1 1 8 1 1 8 1 - 1 8 1 1 8 1 1 8 1 1 - 8 1 1 8 1 1 8 1 1 8 - 1 1 8 1 1 8 1 1 8 1 - 1 8 1 1 8 1 1 8 1 1 - 8 1 1 8 1 1 8 1 1 8 - 1 1 8 1 1 8 1 1 8 1 - 1 8 1 1 8 1 1 8 1 1 - 8 1 1 8 1 1 8 1 1 8 - 1 1 8 1 1 8 1 1 8 1 - 1 8 1 1 8 1 1 8 1 1 - 8 1 1 8 1 1 8 1 1 8 - 1 1 8 1 1 8 1 1 8 1 - 1 8 1 1 8 1 1 8 1 1 - 8 1 1 8 1 1 8 1 1 8 - 1 1 8 1 1 8 1 1 8 1 - 1 8 1 1 8 1 1 8 1 1 - 8 1 1 8 1 1 8 1 1 8 - 1 1 8 1 1 8 1 1 8 1 - 1 8 1 1 8 1 1 8 1 1 - 8 1 1 8 1 1 8 1 1 8 - 1 1 8 1 1 8 1 1 8 1 - 1 8 1 1 8 1 1 8 1 1 - 8 1 1 8 1 1 8 1 1 8 - 1 1 8 1 1 8 1 1 8 1 - 1 8 1 1 8 1 1 8 1 1 - 8 1 1 8 1 1 8 1 1 8 - 1 1 8 1 1 8 1 1 8 1 - 1 8 1 1 8 1 1 8 1 1 - 8 1 1 8 1 1 8 1 1 8 - 1 1 8 1 1 8 1 1 8 1 - 1 8 1 1 8 1 1 8 1 1 - 8 1 1 8 1 1 8 1 1 8 - 1 1 8 1 1 8 1 1 8 1 - 1 8 1 1 8 1 1 8 1 1 - 8 1 1 8 1 1 8 1 1 8 - 1 1 8 1 1 8 1 1 8 1 - 1 8 1 1 8 1 1 8 1 1 - 8 1 1 8 1 1 8 1 1 8 - 1 1 8 1 1 8 1 1 8 1 - 1 8 1 1 8 1 1 8 1 1 - 8 1 1 8 1 1 8 1 1 8 - 1 1 8 1 1 8 1 1 8 1 - 1 8 1 1 8 1 1 8 1 1 - 8 1 1 8 1 1 8 1 1 8 - 1 1 8 1 1 8 1 1 8 1 - 1 8 1 1 8 1 1 8 1 1 - 8 1 1 8 1 1 8 1 1 8 - 1 1 8 1 1 8 1 1 8 1 - 1 8 1 1 8 1 1 8 1 1 - 8 1 1 8 1 1 8 1 1 8 - 1 1 8 1 1 8 1 1 8 1 - 1 8 1 1 8 1 1 8 1 1 - 8 1 1 8 1 1 8 1 1 8 - 1 1 8 1 1 8 1 1 8 1 - 1 8 1 1 8 1 1 8 1 1 - 8 1 1 8 1 1 8 1 1 8 - 1 1 8 1 1 8 1 1 8 1 - 1 8 1 1 8 1 1 8 1 1 - 8 1 1 8 1 1 8 1 1 8 - 1 1 8 1 1 8 1 1 8 1 - 1 8 1 1 8 1 1 8 1 1 - 8 1 1 8 1 1 8 1 1 8 - 1 1 8 1 1 8 1 1 8 1 - 1 8 1 1 8 1 1 8 1 1 - 8 1 1 8 1 1 8 1 1 8 - 1 1 8 1 1 8 1 1 8 1 - 1 8 1 1 8 1 1 8 1 1 - 8 1 1 8 1 1 8 1 1 8 - 1 1 8 1 1 8 1 1 8 1 - 1 8 1 1 8 1 1 8 1 1 - 8 1 1 8 1 1 8 1 1 8 - 1 1 8 1 1 8 1 1 8 1 - 1 8 1 1 8 1 1 8 1 1 - 8 1 1 8 1 1 8 1 1 8 - 1 1 8 1 1 8 1 1 8 1 - 1 8 1 1 8 1 1 8 1 1 - 8 1 1 8 1 1 8 1 1 8 - 1 1 8 1 1 8 1 1 8 1 - 1 8 1 1 8 1 1 8 1 1 - 8 1 1 8 1 1 8 1 1 8 - 1 1 8 1 1 8 1 1 8 1 - 1 8 1 1 8 1 1 8 1 1 - 8 1 1 8 1 1 8 1 1 8 - 1 1 8 1 1 8 1 1 8 1 - 1 8 1 1 8 1 1 8 1 1 - 8 1 1 8 1 1 8 1 1 8 - 1 1 8 1 1 8 1 1 8 1 - 1 8 1 1 8 1 1 8 1 1 - 8 1 1 8 1 1 8 1 1 8 - 1 1 8 1 1 8 1 1 8 1 - 1 8 1 1 8 1 1 8 1 1 - 8 1 1 8 1 1 8 1 1 8 - 1 1 8 1 1 8 1 1 8 1 - 1 8 1 1 8 1 1 8 1 1 - 8 1 1 8 1 1 8 1 1 8 - 1 1 8 1 1 8 1 1 8 1 - 1 8 1 1 8 1 1 8 1 1 - 8 1 1 8 1 1 8 1 1 8 - 1 1 8 1 1 8 1 1 8 1 - 1 8 1 1 8 1 1 8 1 1 - 8 1 1 8 1 1 8 1 1 8 - 1 1 8 1 1 8 1 1 8 1 - 1 8 1 1 8 1 1 8 1 1 - 8 1 1 8 1 1 8 1 1 8 - 1 1 8 1 1 8 1 1 8 1 - 1 8 1 1 8 1 1 8 1 1 - 8 1 1 8 1 1 8 1 1 8 - 1 1 8 1 1 8 1 1 8 1 - 1 8 1 1 8 1 1 8 1 1 - 8 1 1 8 1 1 8 1 1 8 - 1 1 8 1 1 8 1 1 8 1 - 1 8 1 1 8 1 1 8 1 1 - 8 1 1 8 1 1 8 1 1 8 - 1 1 8 1 1 8 1 1 8 1 - 1 8 1 1 8 1 1 8 1 1 - 8 1 1 8 1 1 8 1 1 8 - 1 1 8 1 1 8 1 1 8 1 - 1 8 1 1 8 1 1 8 1 1 - 8 1 1 8 1 1 8 1 1 8 - 1 1 8 1 1 8 1 1 8 1 - 1 8 1 1 8 1 1 8 1 1 - 8 1 1 8 1 1 8 1 1 8 - 1 1 8 1 1 8 1 1 8 1 - 1 8 1 1 8 1 1 8 1 1 - 8 1 1 8 1 1 8 1 1 8 - 1 1 8 1 1 8 1 1 8 1 - 1 8 1 1 8 1 1 8 1 1 - 8 1 1 8 1 1 8 1 1 8 - 1 1 8 1 1 8 1 1 8 1 - 1 8 1 1 8 1 1 8 1 1 - 8 1 1 8 1 1 8 1 1 8 - 1 1 8 1 1 8 1 1 8 1 - 1 8 1 1 8 1 1 8 1 1 - 8 1 1 8 1 1 8 1 1 8 - 1 1 8 1 1 8 1 1 8 1 - 1 8 1 1 8 1 1 8 1 1 - 8 1 1 8 1 1 8 1 1 8 - 1 1 8 1 1 8 1 1 8 1 - 1 8 1 1 8 1 1 8 1 1 - 8 1 1 8 1 1 8 1 1 8 - 1 1 8 1 1 8 1 1 8 1 - 1 8 1 1 8 1 1 8 1 1 - 8 1 1 8 1 1 8 1 1 8 - 1 1 8 1 1 8 1 1 8 1 - 1 8 1 1 8 1 1 8 1 1 - 8 1 1 8 1 1 8 1 1 8 - 1 1 8 1 1 8 1 1 8 1 - 1 8 1 1 8 1 1 8 1 1 - 8 1 1 8 1 1 8 1 1 8 - 1 1 8 1 1 8 1 1 8 1 - 1 8 1 1 8 1 1 8 1 1 - 8 1 1 8 1 1 8 1 1 8 - 1 1 8 1 1 8 1 1 8 1 - 1 8 1 1 8 1 1 8 1 1 - 8 1 1 8 1 1 8 1 1 8 - 1 1 8 1 1 8 1 1 8 1 - 1 8 1 1 8 1 1 8 1 1 - 8 1 1 8 1 1 8 1 1 8 - 1 1 8 1 1 8 1 1 8 1 - 1 8 1 1 8 1 1 8 1 1 - 8 1 1 8 1 1 8 1 1 8 - 1 1 8 1 1 8 1 1 8 1 - 1 8 1 1 8 1 1 8 1 1 - 8 1 1 8 1 1 8 1 1 8 - 1 1 8 1 1 8 1 1 8 1 - 1 8 1 1 8 1 1 8 1 1 - 8 1 1 8 1 1 8 1 1 8 - 1 1 8 1 1 8 1 1 8 1 - 1 8 1 1 8 1 1 8 1 1 - 8 1 1 8 1 1 8 1 1 8 - 1 1 8 1 1 8 1 1 8 1 - 1 8 1 1 8 1 1 8 1 1 - 8 1 1 8 1 1 8 1 1 8 - 1 1 8 1 1 8 1 1 8 1 - 1 8 1 1 8 1 1 8 1 1 - 8 1 1 8 1 1 8 1 1 8 - 1 1 8 1 1 8 1 1 8 1 - 1 8 1 1 8 1 1 8 1 1 - 8 1 1 8 1 1 8 1 1 8 - 1 1 8 1 1 8 1 1 8 1 - 1 8 1 1 8 1 1 8 1 1 - 8 1 1 8 1 1 8 1 1 8 - 1 1 8 1 1 8 1 1 8 1 - 1 8 1 1 8 1 1 8 1 1 - 8 1 1 8 1 1 8 1 1 8 - 1 1 8 1 1 8 1 1 8 1 - 1 8 1 1 8 1 1 8 1 1 - 8 1 1 8 1 1 8 1 1 8 - 1 1 8 1 1 8 1 1 -%FLAG MASS -%FORMAT(5E16.8) - 1.40100000E+01 1.00800000E+00 1.20100000E+01 1.00800000E+00 1.20100000E+01 - 1.00800000E+00 1.00800000E+00 1.00800000E+00 1.20100000E+01 1.60000000E+01 - 1.40100000E+01 1.00800000E+00 1.20100000E+01 1.00800000E+00 1.20100000E+01 - 1.00800000E+00 1.00800000E+00 1.00800000E+00 1.20100000E+01 1.60000000E+01 - 1.40100000E+01 1.00800000E+00 1.20100000E+01 1.00800000E+00 1.20100000E+01 - 1.00800000E+00 1.00800000E+00 1.00800000E+00 1.20100000E+01 1.60000000E+01 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 - 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 1.60000000E+01 - 1.00800000E+00 1.00800000E+00 1.60000000E+01 1.00800000E+00 1.00800000E+00 - 1.60000000E+01 1.00800000E+00 1.00800000E+00 -%FLAG ATOM_TYPE_INDEX -%FORMAT(10I8) - 1 2 3 4 3 5 5 5 6 7 - 1 2 3 4 3 5 5 5 6 7 - 1 2 3 4 3 5 5 5 6 7 - 8 9 9 8 9 9 8 9 9 8 - 9 9 8 9 9 8 9 9 8 9 - 9 8 9 9 8 9 9 8 9 9 - 8 9 9 8 9 9 8 9 9 8 - 9 9 8 9 9 8 9 9 8 9 - 9 8 9 9 8 9 9 8 9 9 - 8 9 9 8 9 9 8 9 9 8 - 9 9 8 9 9 8 9 9 8 9 - 9 8 9 9 8 9 9 8 9 9 - 8 9 9 8 9 9 8 9 9 8 - 9 9 8 9 9 8 9 9 8 9 - 9 8 9 9 8 9 9 8 9 9 - 8 9 9 8 9 9 8 9 9 8 - 9 9 8 9 9 8 9 9 8 9 - 9 8 9 9 8 9 9 8 9 9 - 8 9 9 8 9 9 8 9 9 8 - 9 9 8 9 9 8 9 9 8 9 - 9 8 9 9 8 9 9 8 9 9 - 8 9 9 8 9 9 8 9 9 8 - 9 9 8 9 9 8 9 9 8 9 - 9 8 9 9 8 9 9 8 9 9 - 8 9 9 8 9 9 8 9 9 8 - 9 9 8 9 9 8 9 9 8 9 - 9 8 9 9 8 9 9 8 9 9 - 8 9 9 8 9 9 8 9 9 8 - 9 9 8 9 9 8 9 9 8 9 - 9 8 9 9 8 9 9 8 9 9 - 8 9 9 8 9 9 8 9 9 8 - 9 9 8 9 9 8 9 9 8 9 - 9 8 9 9 8 9 9 8 9 9 - 8 9 9 8 9 9 8 9 9 8 - 9 9 8 9 9 8 9 9 8 9 - 9 8 9 9 8 9 9 8 9 9 - 8 9 9 8 9 9 8 9 9 8 - 9 9 8 9 9 8 9 9 8 9 - 9 8 9 9 8 9 9 8 9 9 - 8 9 9 8 9 9 8 9 9 8 - 9 9 8 9 9 8 9 9 8 9 - 9 8 9 9 8 9 9 8 9 9 - 8 9 9 8 9 9 8 9 9 8 - 9 9 8 9 9 8 9 9 8 9 - 9 8 9 9 8 9 9 8 9 9 - 8 9 9 8 9 9 8 9 9 8 - 9 9 8 9 9 8 9 9 8 9 - 9 8 9 9 8 9 9 8 9 9 - 8 9 9 8 9 9 8 9 9 8 - 9 9 8 9 9 8 9 9 8 9 - 9 8 9 9 8 9 9 8 9 9 - 8 9 9 8 9 9 8 9 9 8 - 9 9 8 9 9 8 9 9 8 9 - 9 8 9 9 8 9 9 8 9 9 - 8 9 9 8 9 9 8 9 9 8 - 9 9 8 9 9 8 9 9 8 9 - 9 8 9 9 8 9 9 8 9 9 - 8 9 9 8 9 9 8 9 9 8 - 9 9 8 9 9 8 9 9 8 9 - 9 8 9 9 8 9 9 8 9 9 - 8 9 9 8 9 9 8 9 9 8 - 9 9 8 9 9 8 9 9 8 9 - 9 8 9 9 8 9 9 8 9 9 - 8 9 9 8 9 9 8 9 9 8 - 9 9 8 9 9 8 9 9 8 9 - 9 8 9 9 8 9 9 8 9 9 - 8 9 9 8 9 9 8 9 9 8 - 9 9 8 9 9 8 9 9 8 9 - 9 8 9 9 8 9 9 8 9 9 - 8 9 9 8 9 9 8 9 9 8 - 9 9 8 9 9 8 9 9 8 9 - 9 8 9 9 8 9 9 8 9 9 - 8 9 9 8 9 9 8 9 9 8 - 9 9 8 9 9 8 9 9 8 9 - 9 8 9 9 8 9 9 8 9 9 - 8 9 9 8 9 9 8 9 9 8 - 9 9 8 9 9 8 9 9 8 9 - 9 8 9 9 8 9 9 8 9 9 - 8 9 9 8 9 9 8 9 9 8 - 9 9 8 9 9 8 9 9 8 9 - 9 8 9 9 8 9 9 8 9 9 - 8 9 9 8 9 9 8 9 9 8 - 9 9 8 9 9 8 9 9 8 9 - 9 8 9 9 8 9 9 8 9 9 - 8 9 9 8 9 9 8 9 9 8 - 9 9 8 9 9 8 9 9 8 9 - 9 8 9 9 8 9 9 8 9 9 - 8 9 9 8 9 9 8 9 9 8 - 9 9 8 9 9 8 9 9 8 9 - 9 8 9 9 8 9 9 8 9 9 - 8 9 9 8 9 9 8 9 9 8 - 9 9 8 9 9 8 9 9 8 9 - 9 8 9 9 8 9 9 8 9 9 - 8 9 9 8 9 9 8 9 9 8 - 9 9 8 9 9 8 9 9 8 9 - 9 8 9 9 8 9 9 8 9 9 - 8 9 9 8 9 9 8 9 9 8 - 9 9 8 9 9 8 9 9 8 9 - 9 8 9 9 8 9 9 8 9 9 - 8 9 9 8 9 9 8 9 9 8 - 9 9 8 9 9 8 9 9 8 9 - 9 8 9 9 8 9 9 8 9 9 - 8 9 9 8 9 9 8 9 9 8 - 9 9 8 9 9 8 9 9 8 9 - 9 8 9 9 8 9 9 8 9 9 - 8 9 9 8 9 9 8 9 9 8 - 9 9 8 9 9 8 9 9 8 9 - 9 8 9 9 8 9 9 8 9 9 - 8 9 9 8 9 9 8 9 9 8 - 9 9 8 9 9 8 9 9 8 9 - 9 8 9 9 8 9 9 8 9 9 - 8 9 9 8 9 9 8 9 9 8 - 9 9 8 9 9 8 9 9 8 9 - 9 8 9 9 8 9 9 8 9 9 - 8 9 9 8 9 9 8 9 9 8 - 9 9 8 9 9 8 9 9 8 9 - 9 8 9 9 8 9 9 8 9 9 - 8 9 9 8 9 9 8 9 9 8 - 9 9 8 9 9 8 9 9 8 9 - 9 8 9 9 8 9 9 8 9 9 - 8 9 9 8 9 9 8 9 9 8 - 9 9 8 9 9 8 9 9 8 9 - 9 8 9 9 8 9 9 8 9 9 - 8 9 9 8 9 9 8 9 9 8 - 9 9 8 9 9 8 9 9 8 9 - 9 8 9 9 8 9 9 8 9 9 - 8 9 9 8 9 9 8 9 9 8 - 9 9 8 9 9 8 9 9 8 9 - 9 8 9 9 8 9 9 8 9 9 - 8 9 9 8 9 9 8 9 9 8 - 9 9 8 9 9 8 9 9 8 9 - 9 8 9 9 8 9 9 8 9 9 - 8 9 9 8 9 9 8 9 9 8 - 9 9 8 9 9 8 9 9 8 9 - 9 8 9 9 8 9 9 8 9 9 - 8 9 9 8 9 9 8 9 9 8 - 9 9 8 9 9 8 9 9 8 9 - 9 8 9 9 8 9 9 8 9 9 - 8 9 9 8 9 9 8 9 9 8 - 9 9 8 9 9 8 9 9 8 9 - 9 8 9 9 8 9 9 8 9 9 - 8 9 9 8 9 9 8 9 9 8 - 9 9 8 9 9 8 9 9 8 9 - 9 8 9 9 8 9 9 8 9 9 - 8 9 9 8 9 9 8 9 9 8 - 9 9 8 9 9 8 9 9 8 9 - 9 8 9 9 8 9 9 8 9 9 - 8 9 9 8 9 9 8 9 9 8 - 9 9 8 9 9 8 9 9 8 9 - 9 8 9 9 8 9 9 8 9 9 - 8 9 9 8 9 9 8 9 9 8 - 9 9 8 9 9 8 9 9 8 9 - 9 8 9 9 8 9 9 8 9 9 - 8 9 9 8 9 9 8 9 9 8 - 9 9 8 9 9 8 9 9 8 9 - 9 8 9 9 8 9 9 8 9 9 - 8 9 9 8 9 9 8 9 9 8 - 9 9 8 9 9 8 9 9 8 9 - 9 8 9 9 8 9 9 8 9 9 - 8 9 9 8 9 9 8 9 9 8 - 9 9 8 9 9 8 9 9 8 9 - 9 8 9 9 8 9 9 8 9 9 - 8 9 9 8 9 9 8 9 9 8 - 9 9 8 9 9 8 9 9 8 9 - 9 8 9 9 8 9 9 8 9 9 - 8 9 9 8 9 9 8 9 9 8 - 9 9 8 9 9 8 9 9 8 9 - 9 8 9 9 8 9 9 8 9 9 - 8 9 9 8 9 9 8 9 9 8 - 9 9 8 9 9 8 9 9 8 9 - 9 8 9 9 8 9 9 8 9 9 - 8 9 9 8 9 9 8 9 9 8 - 9 9 8 9 9 8 9 9 8 9 - 9 8 9 9 8 9 9 8 9 9 - 8 9 9 8 9 9 8 9 9 8 - 9 9 8 9 9 8 9 9 8 9 - 9 8 9 9 8 9 9 8 9 9 - 8 9 9 8 9 9 8 9 9 8 - 9 9 8 9 9 8 9 9 8 9 - 9 8 9 9 8 9 9 8 9 9 - 8 9 9 8 9 9 8 9 9 8 - 9 9 8 9 9 8 9 9 8 9 - 9 8 9 9 8 9 9 8 9 9 - 8 9 9 8 9 9 8 9 9 8 - 9 9 8 9 9 8 9 9 8 9 - 9 8 9 9 8 9 9 8 9 9 - 8 9 9 8 9 9 8 9 9 8 - 9 9 8 9 9 8 9 9 8 9 - 9 8 9 9 8 9 9 8 9 9 - 8 9 9 8 9 9 8 9 9 8 - 9 9 8 9 9 8 9 9 8 9 - 9 8 9 9 8 9 9 8 9 9 - 8 9 9 8 9 9 8 9 9 8 - 9 9 8 9 9 8 9 9 8 9 - 9 8 9 9 8 9 9 8 9 9 - 8 9 9 8 9 9 8 9 9 8 - 9 9 8 9 9 8 9 9 8 9 - 9 8 9 9 8 9 9 8 9 9 - 8 9 9 8 9 9 8 9 9 8 - 9 9 8 9 9 8 9 9 8 9 - 9 8 9 9 8 9 9 8 9 9 - 8 9 9 8 9 9 8 9 9 8 - 9 9 8 9 9 8 9 9 8 9 - 9 8 9 9 8 9 9 8 9 9 - 8 9 9 8 9 9 8 9 9 8 - 9 9 8 9 9 8 9 9 8 9 - 9 8 9 9 8 9 9 8 9 9 - 8 9 9 8 9 9 8 9 9 8 - 9 9 8 9 9 8 9 9 8 9 - 9 8 9 9 8 9 9 8 9 9 - 8 9 9 8 9 9 8 9 9 8 - 9 9 8 9 9 8 9 9 8 9 - 9 8 9 9 8 9 9 8 9 9 - 8 9 9 8 9 9 8 9 9 8 - 9 9 8 9 9 8 9 9 8 9 - 9 8 9 9 8 9 9 8 9 9 - 8 9 9 8 9 9 8 9 9 8 - 9 9 8 9 9 8 9 9 8 9 - 9 8 9 9 8 9 9 8 9 9 - 8 9 9 8 9 9 8 9 9 8 - 9 9 8 9 9 8 9 9 8 9 - 9 8 9 9 8 9 9 8 9 9 - 8 9 9 8 9 9 8 9 9 8 - 9 9 8 9 9 8 9 9 8 9 - 9 8 9 9 8 9 9 8 9 9 - 8 9 9 8 9 9 8 9 9 8 - 9 9 8 9 9 8 9 9 8 9 - 9 8 9 9 8 9 9 8 9 9 - 8 9 9 8 9 9 8 9 9 8 - 9 9 8 9 9 8 9 9 -%FLAG NUMBER_EXCLUDED_ATOMS -%FORMAT(10I8) - 10 4 10 7 6 3 2 1 7 3 - 10 4 10 7 6 3 2 1 7 3 - 9 4 7 6 5 3 2 1 1 1 - 2 1 1 2 1 1 2 1 1 2 - 1 1 2 1 1 2 1 1 2 1 - 1 2 1 1 2 1 1 2 1 1 - 2 1 1 2 1 1 2 1 1 2 - 1 1 2 1 1 2 1 1 2 1 - 1 2 1 1 2 1 1 2 1 1 - 2 1 1 2 1 1 2 1 1 2 - 1 1 2 1 1 2 1 1 2 1 - 1 2 1 1 2 1 1 2 1 1 - 2 1 1 2 1 1 2 1 1 2 - 1 1 2 1 1 2 1 1 2 1 - 1 2 1 1 2 1 1 2 1 1 - 2 1 1 2 1 1 2 1 1 2 - 1 1 2 1 1 2 1 1 2 1 - 1 2 1 1 2 1 1 2 1 1 - 2 1 1 2 1 1 2 1 1 2 - 1 1 2 1 1 2 1 1 2 1 - 1 2 1 1 2 1 1 2 1 1 - 2 1 1 2 1 1 2 1 1 2 - 1 1 2 1 1 2 1 1 2 1 - 1 2 1 1 2 1 1 2 1 1 - 2 1 1 2 1 1 2 1 1 2 - 1 1 2 1 1 2 1 1 2 1 - 1 2 1 1 2 1 1 2 1 1 - 2 1 1 2 1 1 2 1 1 2 - 1 1 2 1 1 2 1 1 2 1 - 1 2 1 1 2 1 1 2 1 1 - 2 1 1 2 1 1 2 1 1 2 - 1 1 2 1 1 2 1 1 2 1 - 1 2 1 1 2 1 1 2 1 1 - 2 1 1 2 1 1 2 1 1 2 - 1 1 2 1 1 2 1 1 2 1 - 1 2 1 1 2 1 1 2 1 1 - 2 1 1 2 1 1 2 1 1 2 - 1 1 2 1 1 2 1 1 2 1 - 1 2 1 1 2 1 1 2 1 1 - 2 1 1 2 1 1 2 1 1 2 - 1 1 2 1 1 2 1 1 2 1 - 1 2 1 1 2 1 1 2 1 1 - 2 1 1 2 1 1 2 1 1 2 - 1 1 2 1 1 2 1 1 2 1 - 1 2 1 1 2 1 1 2 1 1 - 2 1 1 2 1 1 2 1 1 2 - 1 1 2 1 1 2 1 1 2 1 - 1 2 1 1 2 1 1 2 1 1 - 2 1 1 2 1 1 2 1 1 2 - 1 1 2 1 1 2 1 1 2 1 - 1 2 1 1 2 1 1 2 1 1 - 2 1 1 2 1 1 2 1 1 2 - 1 1 2 1 1 2 1 1 2 1 - 1 2 1 1 2 1 1 2 1 1 - 2 1 1 2 1 1 2 1 1 2 - 1 1 2 1 1 2 1 1 2 1 - 1 2 1 1 2 1 1 2 1 1 - 2 1 1 2 1 1 2 1 1 2 - 1 1 2 1 1 2 1 1 2 1 - 1 2 1 1 2 1 1 2 1 1 - 2 1 1 2 1 1 2 1 1 2 - 1 1 2 1 1 2 1 1 2 1 - 1 2 1 1 2 1 1 2 1 1 - 2 1 1 2 1 1 2 1 1 2 - 1 1 2 1 1 2 1 1 2 1 - 1 2 1 1 2 1 1 2 1 1 - 2 1 1 2 1 1 2 1 1 2 - 1 1 2 1 1 2 1 1 2 1 - 1 2 1 1 2 1 1 2 1 1 - 2 1 1 2 1 1 2 1 1 2 - 1 1 2 1 1 2 1 1 2 1 - 1 2 1 1 2 1 1 2 1 1 - 2 1 1 2 1 1 2 1 1 2 - 1 1 2 1 1 2 1 1 2 1 - 1 2 1 1 2 1 1 2 1 1 - 2 1 1 2 1 1 2 1 1 2 - 1 1 2 1 1 2 1 1 2 1 - 1 2 1 1 2 1 1 2 1 1 - 2 1 1 2 1 1 2 1 1 2 - 1 1 2 1 1 2 1 1 2 1 - 1 2 1 1 2 1 1 2 1 1 - 2 1 1 2 1 1 2 1 1 2 - 1 1 2 1 1 2 1 1 2 1 - 1 2 1 1 2 1 1 2 1 1 - 2 1 1 2 1 1 2 1 1 2 - 1 1 2 1 1 2 1 1 2 1 - 1 2 1 1 2 1 1 2 1 1 - 2 1 1 2 1 1 2 1 1 2 - 1 1 2 1 1 2 1 1 2 1 - 1 2 1 1 2 1 1 2 1 1 - 2 1 1 2 1 1 2 1 1 2 - 1 1 2 1 1 2 1 1 2 1 - 1 2 1 1 2 1 1 2 1 1 - 2 1 1 2 1 1 2 1 1 2 - 1 1 2 1 1 2 1 1 2 1 - 1 2 1 1 2 1 1 2 1 1 - 2 1 1 2 1 1 2 1 1 2 - 1 1 2 1 1 2 1 1 2 1 - 1 2 1 1 2 1 1 2 1 1 - 2 1 1 2 1 1 2 1 1 2 - 1 1 2 1 1 2 1 1 2 1 - 1 2 1 1 2 1 1 2 1 1 - 2 1 1 2 1 1 2 1 1 2 - 1 1 2 1 1 2 1 1 2 1 - 1 2 1 1 2 1 1 2 1 1 - 2 1 1 2 1 1 2 1 1 2 - 1 1 2 1 1 2 1 1 2 1 - 1 2 1 1 2 1 1 2 1 1 - 2 1 1 2 1 1 2 1 1 2 - 1 1 2 1 1 2 1 1 2 1 - 1 2 1 1 2 1 1 2 1 1 - 2 1 1 2 1 1 2 1 1 2 - 1 1 2 1 1 2 1 1 2 1 - 1 2 1 1 2 1 1 2 1 1 - 2 1 1 2 1 1 2 1 1 2 - 1 1 2 1 1 2 1 1 2 1 - 1 2 1 1 2 1 1 2 1 1 - 2 1 1 2 1 1 2 1 1 2 - 1 1 2 1 1 2 1 1 2 1 - 1 2 1 1 2 1 1 2 1 1 - 2 1 1 2 1 1 2 1 1 2 - 1 1 2 1 1 2 1 1 2 1 - 1 2 1 1 2 1 1 2 1 1 - 2 1 1 2 1 1 2 1 1 2 - 1 1 2 1 1 2 1 1 2 1 - 1 2 1 1 2 1 1 2 1 1 - 2 1 1 2 1 1 2 1 1 2 - 1 1 2 1 1 2 1 1 2 1 - 1 2 1 1 2 1 1 2 1 1 - 2 1 1 2 1 1 2 1 1 2 - 1 1 2 1 1 2 1 1 2 1 - 1 2 1 1 2 1 1 2 1 1 - 2 1 1 2 1 1 2 1 1 2 - 1 1 2 1 1 2 1 1 2 1 - 1 2 1 1 2 1 1 2 1 1 - 2 1 1 2 1 1 2 1 1 2 - 1 1 2 1 1 2 1 1 2 1 - 1 2 1 1 2 1 1 2 1 1 - 2 1 1 2 1 1 2 1 1 2 - 1 1 2 1 1 2 1 1 2 1 - 1 2 1 1 2 1 1 2 1 1 - 2 1 1 2 1 1 2 1 1 2 - 1 1 2 1 1 2 1 1 2 1 - 1 2 1 1 2 1 1 2 1 1 - 2 1 1 2 1 1 2 1 1 2 - 1 1 2 1 1 2 1 1 2 1 - 1 2 1 1 2 1 1 2 1 1 - 2 1 1 2 1 1 2 1 1 2 - 1 1 2 1 1 2 1 1 2 1 - 1 2 1 1 2 1 1 2 1 1 - 2 1 1 2 1 1 2 1 1 2 - 1 1 2 1 1 2 1 1 2 1 - 1 2 1 1 2 1 1 2 1 1 - 2 1 1 2 1 1 2 1 1 2 - 1 1 2 1 1 2 1 1 2 1 - 1 2 1 1 2 1 1 2 1 1 - 2 1 1 2 1 1 2 1 1 2 - 1 1 2 1 1 2 1 1 2 1 - 1 2 1 1 2 1 1 2 1 1 - 2 1 1 2 1 1 2 1 1 2 - 1 1 2 1 1 2 1 1 2 1 - 1 2 1 1 2 1 1 2 1 1 - 2 1 1 2 1 1 2 1 1 2 - 1 1 2 1 1 2 1 1 2 1 - 1 2 1 1 2 1 1 2 1 1 - 2 1 1 2 1 1 2 1 1 2 - 1 1 2 1 1 2 1 1 2 1 - 1 2 1 1 2 1 1 2 1 1 - 2 1 1 2 1 1 2 1 1 2 - 1 1 2 1 1 2 1 1 2 1 - 1 2 1 1 2 1 1 2 1 1 - 2 1 1 2 1 1 2 1 1 2 - 1 1 2 1 1 2 1 1 2 1 - 1 2 1 1 2 1 1 2 1 1 - 2 1 1 2 1 1 2 1 1 2 - 1 1 2 1 1 2 1 1 2 1 - 1 2 1 1 2 1 1 2 1 1 - 2 1 1 2 1 1 2 1 1 2 - 1 1 2 1 1 2 1 1 2 1 - 1 2 1 1 2 1 1 2 1 1 - 2 1 1 2 1 1 2 1 1 2 - 1 1 2 1 1 2 1 1 2 1 - 1 2 1 1 2 1 1 2 1 1 - 2 1 1 2 1 1 2 1 1 2 - 1 1 2 1 1 2 1 1 2 1 - 1 2 1 1 2 1 1 2 1 1 - 2 1 1 2 1 1 2 1 1 2 - 1 1 2 1 1 2 1 1 2 1 - 1 2 1 1 2 1 1 2 1 1 - 2 1 1 2 1 1 2 1 1 2 - 1 1 2 1 1 2 1 1 2 1 - 1 2 1 1 2 1 1 2 1 1 - 2 1 1 2 1 1 2 1 1 2 - 1 1 2 1 1 2 1 1 2 1 - 1 2 1 1 2 1 1 2 1 1 - 2 1 1 2 1 1 2 1 1 2 - 1 1 2 1 1 2 1 1 2 1 - 1 2 1 1 2 1 1 2 1 1 - 2 1 1 2 1 1 2 1 1 2 - 1 1 2 1 1 2 1 1 2 1 - 1 2 1 1 2 1 1 2 1 1 - 2 1 1 2 1 1 2 1 1 2 - 1 1 2 1 1 2 1 1 2 1 - 1 2 1 1 2 1 1 2 1 1 - 2 1 1 2 1 1 2 1 1 2 - 1 1 2 1 1 2 1 1 2 1 - 1 2 1 1 2 1 1 2 1 1 - 2 1 1 2 1 1 2 1 1 2 - 1 1 2 1 1 2 1 1 2 1 - 1 2 1 1 2 1 1 2 1 1 - 2 1 1 2 1 1 2 1 1 2 - 1 1 2 1 1 2 1 1 2 1 - 1 2 1 1 2 1 1 2 1 1 - 2 1 1 2 1 1 2 1 1 2 - 1 1 2 1 1 2 1 1 2 1 - 1 2 1 1 2 1 1 2 1 1 - 2 1 1 2 1 1 2 1 1 2 - 1 1 2 1 1 2 1 1 2 1 - 1 2 1 1 2 1 1 2 1 1 - 2 1 1 2 1 1 2 1 1 2 - 1 1 2 1 1 2 1 1 2 1 - 1 2 1 1 2 1 1 2 1 1 - 2 1 1 2 1 1 2 1 1 2 - 1 1 2 1 1 2 1 1 2 1 - 1 2 1 1 2 1 1 2 1 1 - 2 1 1 2 1 1 2 1 1 2 - 1 1 2 1 1 2 1 1 2 1 - 1 2 1 1 2 1 1 2 1 1 - 2 1 1 2 1 1 2 1 1 2 - 1 1 2 1 1 2 1 1 -%FLAG NONBONDED_PARM_INDEX -%FORMAT(10I8) - 1 2 4 7 11 16 22 29 37 2 - 3 5 8 12 17 23 30 38 4 5 - 6 9 13 18 24 31 39 7 8 9 - 10 14 19 25 32 40 11 12 13 14 - 15 20 26 33 41 16 17 18 19 20 - 21 27 34 42 22 23 24 25 26 27 - 28 35 43 29 30 31 32 33 34 35 - 36 -1 37 38 39 40 41 42 43 -1 - 45 -%FLAG RESIDUE_LABEL -%FORMAT(20a4) -ALA ALA ALA WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT -WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT -WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT -WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT -WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT -WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT -WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT -WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT -WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT -WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT -WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT -WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT -WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT -WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT -WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT -WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT -WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT -WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT -WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT -WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT -WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT -WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT -WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT -WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT -WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT -WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT -WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT -WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT -WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT -WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT -WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT -WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT -WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT -WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT -WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT -WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT -WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT -WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT WAT -%FLAG RESIDUE_POINTER -%FORMAT(10I8) - 1 11 21 31 34 37 40 43 46 49 - 52 55 58 61 64 67 70 73 76 79 - 82 85 88 91 94 97 100 103 106 109 - 112 115 118 121 124 127 130 133 136 139 - 142 145 148 151 154 157 160 163 166 169 - 172 175 178 181 184 187 190 193 196 199 - 202 205 208 211 214 217 220 223 226 229 - 232 235 238 241 244 247 250 253 256 259 - 262 265 268 271 274 277 280 283 286 289 - 292 295 298 301 304 307 310 313 316 319 - 322 325 328 331 334 337 340 343 346 349 - 352 355 358 361 364 367 370 373 376 379 - 382 385 388 391 394 397 400 403 406 409 - 412 415 418 421 424 427 430 433 436 439 - 442 445 448 451 454 457 460 463 466 469 - 472 475 478 481 484 487 490 493 496 499 - 502 505 508 511 514 517 520 523 526 529 - 532 535 538 541 544 547 550 553 556 559 - 562 565 568 571 574 577 580 583 586 589 - 592 595 598 601 604 607 610 613 616 619 - 622 625 628 631 634 637 640 643 646 649 - 652 655 658 661 664 667 670 673 676 679 - 682 685 688 691 694 697 700 703 706 709 - 712 715 718 721 724 727 730 733 736 739 - 742 745 748 751 754 757 760 763 766 769 - 772 775 778 781 784 787 790 793 796 799 - 802 805 808 811 814 817 820 823 826 829 - 832 835 838 841 844 847 850 853 856 859 - 862 865 868 871 874 877 880 883 886 889 - 892 895 898 901 904 907 910 913 916 919 - 922 925 928 931 934 937 940 943 946 949 - 952 955 958 961 964 967 970 973 976 979 - 982 985 988 991 994 997 1000 1003 1006 1009 - 1012 1015 1018 1021 1024 1027 1030 1033 1036 1039 - 1042 1045 1048 1051 1054 1057 1060 1063 1066 1069 - 1072 1075 1078 1081 1084 1087 1090 1093 1096 1099 - 1102 1105 1108 1111 1114 1117 1120 1123 1126 1129 - 1132 1135 1138 1141 1144 1147 1150 1153 1156 1159 - 1162 1165 1168 1171 1174 1177 1180 1183 1186 1189 - 1192 1195 1198 1201 1204 1207 1210 1213 1216 1219 - 1222 1225 1228 1231 1234 1237 1240 1243 1246 1249 - 1252 1255 1258 1261 1264 1267 1270 1273 1276 1279 - 1282 1285 1288 1291 1294 1297 1300 1303 1306 1309 - 1312 1315 1318 1321 1324 1327 1330 1333 1336 1339 - 1342 1345 1348 1351 1354 1357 1360 1363 1366 1369 - 1372 1375 1378 1381 1384 1387 1390 1393 1396 1399 - 1402 1405 1408 1411 1414 1417 1420 1423 1426 1429 - 1432 1435 1438 1441 1444 1447 1450 1453 1456 1459 - 1462 1465 1468 1471 1474 1477 1480 1483 1486 1489 - 1492 1495 1498 1501 1504 1507 1510 1513 1516 1519 - 1522 1525 1528 1531 1534 1537 1540 1543 1546 1549 - 1552 1555 1558 1561 1564 1567 1570 1573 1576 1579 - 1582 1585 1588 1591 1594 1597 1600 1603 1606 1609 - 1612 1615 1618 1621 1624 1627 1630 1633 1636 1639 - 1642 1645 1648 1651 1654 1657 1660 1663 1666 1669 - 1672 1675 1678 1681 1684 1687 1690 1693 1696 1699 - 1702 1705 1708 1711 1714 1717 1720 1723 1726 1729 - 1732 1735 1738 1741 1744 1747 1750 1753 1756 1759 - 1762 1765 1768 1771 1774 1777 1780 1783 1786 1789 - 1792 1795 1798 1801 1804 1807 1810 1813 1816 1819 - 1822 1825 1828 1831 1834 1837 1840 1843 1846 1849 - 1852 1855 1858 1861 1864 1867 1870 1873 1876 1879 - 1882 1885 1888 1891 1894 1897 1900 1903 1906 1909 - 1912 1915 1918 1921 1924 1927 1930 1933 1936 1939 - 1942 1945 1948 1951 1954 1957 1960 1963 1966 1969 - 1972 1975 1978 1981 1984 1987 1990 1993 1996 1999 - 2002 2005 2008 2011 2014 2017 2020 2023 2026 2029 - 2032 2035 2038 2041 2044 2047 2050 2053 2056 2059 - 2062 2065 2068 2071 2074 2077 2080 2083 2086 2089 - 2092 2095 2098 2101 2104 2107 2110 2113 2116 2119 - 2122 2125 2128 2131 2134 2137 2140 2143 2146 2149 - 2152 2155 2158 2161 2164 2167 2170 2173 2176 2179 - 2182 2185 2188 2191 2194 2197 2200 2203 2206 2209 - 2212 2215 2218 2221 2224 2227 2230 2233 2236 2239 - 2242 2245 2248 2251 2254 2257 2260 2263 2266 2269 - 2272 2275 2278 2281 2284 2287 2290 2293 2296 -%FLAG BOND_FORCE_CONSTANT -%FORMAT(5E16.8) - 5.70000000E+02 4.90000000E+02 3.40000000E+02 3.40000000E+02 3.10000000E+02 - 3.17000000E+02 4.34000000E+02 3.37000000E+02 5.53000000E+02 5.53000000E+02 -%FLAG BOND_EQUIL_VALUE -%FORMAT(5E16.8) - 1.22900000E+00 1.33500000E+00 1.09000000E+00 1.09000000E+00 1.52600000E+00 - 1.52200000E+00 1.01000000E+00 1.44900000E+00 9.57200000E-01 1.51360000E+00 -%FLAG ANGLE_FORCE_CONSTANT -%FORMAT(5E16.8) - 8.00000000E+01 5.00000000E+01 5.00000000E+01 3.50000000E+01 6.30000000E+01 - 5.00000000E+01 5.00000000E+01 5.00000000E+01 8.00000000E+01 7.00000000E+01 - 5.00000000E+01 5.00000000E+01 8.00000000E+01 6.30000000E+01 -%FLAG ANGLE_EQUIL_VALUE -%FORMAT(5E16.8) - 2.14501057E+00 2.09439600E+00 2.12755727E+00 1.91113635E+00 1.93906163E+00 - 1.91113635E+00 1.91113635E+00 1.91113635E+00 2.10137732E+00 2.03505478E+00 - 2.06018753E+00 1.91113635E+00 1.91462701E+00 1.92160833E+00 -%FLAG DIHEDRAL_FORCE_CONSTANT -%FORMAT(5E16.8) - 2.00000000E+00 2.50000000E+00 0.00000000E+00 1.80000000E+00 8.00000000E-01 - 0.00000000E+00 0.00000000E+00 2.70000000E-01 4.20000000E-01 1.55555556E-01 - 2.00000000E-01 2.00000000E-01 4.00000000E-01 8.00000000E-01 8.00000000E-02 - 4.50000000E-01 1.58000000E+00 5.50000000E-01 1.05000000E+01 1.10000000E+00 -%FLAG DIHEDRAL_PERIODICITY -%FORMAT(5E16.8) - 1.00000000E+00 2.00000000E+00 2.00000000E+00 2.00000000E+00 3.00000000E+00 - 4.00000000E+00 1.00000000E+00 2.00000000E+00 3.00000000E+00 3.00000000E+00 - 1.00000000E+00 2.00000000E+00 3.00000000E+00 1.00000000E+00 3.00000000E+00 - 1.00000000E+00 2.00000000E+00 3.00000000E+00 2.00000000E+00 2.00000000E+00 -%FLAG DIHEDRAL_PHASE -%FORMAT(5E16.8) - 0.00000000E+00 3.14159400E+00 0.00000000E+00 0.00000000E+00 0.00000000E+00 - 0.00000000E+00 0.00000000E+00 0.00000000E+00 0.00000000E+00 0.00000000E+00 - 0.00000000E+00 0.00000000E+00 0.00000000E+00 0.00000000E+00 3.14159400E+00 - 3.14159400E+00 3.14159400E+00 3.14159400E+00 3.14159400E+00 3.14159400E+00 -%FLAG SCEE_SCALE_FACTOR -%FORMAT(5E16.8) - 1.20000000E+00 1.20000000E+00 1.20000000E+00 1.20000000E+00 1.20000000E+00 - 1.20000000E+00 1.20000000E+00 1.20000000E+00 1.20000000E+00 1.20000000E+00 - 1.20000000E+00 1.20000000E+00 1.20000000E+00 1.20000000E+00 1.20000000E+00 - 1.20000000E+00 1.20000000E+00 1.20000000E+00 0.00000000E+00 0.00000000E+00 -%FLAG SCNB_SCALE_FACTOR -%FORMAT(5E16.8) - 2.00000000E+00 2.00000000E+00 2.00000000E+00 2.00000000E+00 2.00000000E+00 - 2.00000000E+00 2.00000000E+00 2.00000000E+00 2.00000000E+00 2.00000000E+00 - 2.00000000E+00 2.00000000E+00 2.00000000E+00 2.00000000E+00 2.00000000E+00 - 2.00000000E+00 2.00000000E+00 2.00000000E+00 0.00000000E+00 0.00000000E+00 -%FLAG SOLTY -%FORMAT(5E16.8) - 0.00000000E+00 0.00000000E+00 0.00000000E+00 0.00000000E+00 0.00000000E+00 - 0.00000000E+00 0.00000000E+00 0.00000000E+00 0.00000000E+00 0.00000000E+00 -%FLAG LENNARD_JONES_ACOEF -%FORMAT(5E16.8) - 9.44293233E+05 2.12601181E+03 1.39982777E-01 9.95480466E+05 2.56678134E+03 - 1.04308023E+06 6.20665997E+04 5.94667300E+01 6.78771368E+04 3.25969625E+03 - 8.96776989E+04 1.07193646E+02 9.71708117E+04 4.98586848E+03 7.51607703E+03 - 8.82619071E+05 2.27577561E+03 9.24822270E+05 6.01816484E+04 8.61541883E+04 - 8.19971662E+05 6.06829342E+05 1.02595236E+03 6.47841731E+05 3.69471530E+04 - 5.44261042E+04 5.74393458E+05 3.79876399E+05 7.42364908E+05 1.52094041E+03 - 7.85890042E+05 4.75728872E+04 6.91773368E+04 6.96790708E+05 4.72934643E+05 - 5.81935564E+05 0.00000000E+00 0.00000000E+00 0.00000000E+00 0.00000000E+00 - 0.00000000E+00 0.00000000E+00 0.00000000E+00 0.00000000E+00 0.00000000E+00 -%FLAG LENNARD_JONES_BCOEF -%FORMAT(5E16.8) - 8.01323529E+02 2.09604198E+01 9.37598976E-02 7.36907417E+02 2.06278363E+01 - 6.75612247E+02 1.13252061E+02 1.93248820E+00 1.06076943E+02 1.43076527E+01 - 1.36131731E+02 2.59456373E+00 1.26919150E+02 1.76949863E+01 2.17257828E+01 - 6.53361429E+02 1.82891803E+01 5.99015525E+02 9.40505980E+01 1.12529845E+02 - 5.31102864E+02 6.77220874E+02 1.53505284E+01 6.26720080E+02 9.21192136E+01 - 1.11805549E+02 5.55666448E+02 5.64885984E+02 6.90894667E+02 1.72393904E+01 - 6.36687196E+02 9.64152120E+01 1.16264660E+02 5.64503554E+02 5.81361517E+02 - 5.94825035E+02 0.00000000E+00 0.00000000E+00 0.00000000E+00 0.00000000E+00 - 0.00000000E+00 0.00000000E+00 0.00000000E+00 0.00000000E+00 0.00000000E+00 -%FLAG BONDS_INC_HYDROGEN -%FORMAT(10I8) - 12 15 3 12 18 3 12 21 3 6 - 9 4 0 3 7 42 45 3 42 48 - 3 42 51 3 36 39 4 30 33 7 - 72 75 3 72 78 3 72 81 3 66 - 69 4 60 63 7 93 90 9 96 90 - 9 96 93 10 102 99 9 105 99 9 - 105 102 10 111 108 9 114 108 9 114 - 111 10 120 117 9 123 117 9 123 120 - 10 129 126 9 132 126 9 132 129 10 - 138 135 9 141 135 9 141 138 10 147 - 144 9 150 144 9 150 147 10 156 153 - 9 159 153 9 159 156 10 165 162 9 - 168 162 9 168 165 10 174 171 9 177 - 171 9 177 174 10 183 180 9 186 180 - 9 186 183 10 192 189 9 195 189 9 - 195 192 10 201 198 9 204 198 9 204 - 201 10 210 207 9 213 207 9 213 210 - 10 219 216 9 222 216 9 222 219 10 - 228 225 9 231 225 9 231 228 10 237 - 234 9 240 234 9 240 237 10 246 243 - 9 249 243 9 249 246 10 255 252 9 - 258 252 9 258 255 10 264 261 9 267 - 261 9 267 264 10 273 270 9 276 270 - 9 276 273 10 282 279 9 285 279 9 - 285 282 10 291 288 9 294 288 9 294 - 291 10 300 297 9 303 297 9 303 300 - 10 309 306 9 312 306 9 312 309 10 - 318 315 9 321 315 9 321 318 10 327 - 324 9 330 324 9 330 327 10 336 333 - 9 339 333 9 339 336 10 345 342 9 - 348 342 9 348 345 10 354 351 9 357 - 351 9 357 354 10 363 360 9 366 360 - 9 366 363 10 372 369 9 375 369 9 - 375 372 10 381 378 9 384 378 9 384 - 381 10 390 387 9 393 387 9 393 390 - 10 399 396 9 402 396 9 402 399 10 - 408 405 9 411 405 9 411 408 10 417 - 414 9 420 414 9 420 417 10 426 423 - 9 429 423 9 429 426 10 435 432 9 - 438 432 9 438 435 10 444 441 9 447 - 441 9 447 444 10 453 450 9 456 450 - 9 456 453 10 462 459 9 465 459 9 - 465 462 10 471 468 9 474 468 9 474 - 471 10 480 477 9 483 477 9 483 480 - 10 489 486 9 492 486 9 492 489 10 - 498 495 9 501 495 9 501 498 10 507 - 504 9 510 504 9 510 507 10 516 513 - 9 519 513 9 519 516 10 525 522 9 - 528 522 9 528 525 10 534 531 9 537 - 531 9 537 534 10 543 540 9 546 540 - 9 546 543 10 552 549 9 555 549 9 - 555 552 10 561 558 9 564 558 9 564 - 561 10 570 567 9 573 567 9 573 570 - 10 579 576 9 582 576 9 582 579 10 - 588 585 9 591 585 9 591 588 10 597 - 594 9 600 594 9 600 597 10 606 603 - 9 609 603 9 609 606 10 615 612 9 - 618 612 9 618 615 10 624 621 9 627 - 621 9 627 624 10 633 630 9 636 630 - 9 636 633 10 642 639 9 645 639 9 - 645 642 10 651 648 9 654 648 9 654 - 651 10 660 657 9 663 657 9 663 660 - 10 669 666 9 672 666 9 672 669 10 - 678 675 9 681 675 9 681 678 10 687 - 684 9 690 684 9 690 687 10 696 693 - 9 699 693 9 699 696 10 705 702 9 - 708 702 9 708 705 10 714 711 9 717 - 711 9 717 714 10 723 720 9 726 720 - 9 726 723 10 732 729 9 735 729 9 - 735 732 10 741 738 9 744 738 9 744 - 741 10 750 747 9 753 747 9 753 750 - 10 759 756 9 762 756 9 762 759 10 - 768 765 9 771 765 9 771 768 10 777 - 774 9 780 774 9 780 777 10 786 783 - 9 789 783 9 789 786 10 795 792 9 - 798 792 9 798 795 10 804 801 9 807 - 801 9 807 804 10 813 810 9 816 810 - 9 816 813 10 822 819 9 825 819 9 - 825 822 10 831 828 9 834 828 9 834 - 831 10 840 837 9 843 837 9 843 840 - 10 849 846 9 852 846 9 852 849 10 - 858 855 9 861 855 9 861 858 10 867 - 864 9 870 864 9 870 867 10 876 873 - 9 879 873 9 879 876 10 885 882 9 - 888 882 9 888 885 10 894 891 9 897 - 891 9 897 894 10 903 900 9 906 900 - 9 906 903 10 912 909 9 915 909 9 - 915 912 10 921 918 9 924 918 9 924 - 921 10 930 927 9 933 927 9 933 930 - 10 939 936 9 942 936 9 942 939 10 - 948 945 9 951 945 9 951 948 10 957 - 954 9 960 954 9 960 957 10 966 963 - 9 969 963 9 969 966 10 975 972 9 - 978 972 9 978 975 10 984 981 9 987 - 981 9 987 984 10 993 990 9 996 990 - 9 996 993 10 1002 999 9 1005 999 9 - 1005 1002 10 1011 1008 9 1014 1008 9 1014 - 1011 10 1020 1017 9 1023 1017 9 1023 1020 - 10 1029 1026 9 1032 1026 9 1032 1029 10 - 1038 1035 9 1041 1035 9 1041 1038 10 1047 - 1044 9 1050 1044 9 1050 1047 10 1056 1053 - 9 1059 1053 9 1059 1056 10 1065 1062 9 - 1068 1062 9 1068 1065 10 1074 1071 9 1077 - 1071 9 1077 1074 10 1083 1080 9 1086 1080 - 9 1086 1083 10 1092 1089 9 1095 1089 9 - 1095 1092 10 1101 1098 9 1104 1098 9 1104 - 1101 10 1110 1107 9 1113 1107 9 1113 1110 - 10 1119 1116 9 1122 1116 9 1122 1119 10 - 1128 1125 9 1131 1125 9 1131 1128 10 1137 - 1134 9 1140 1134 9 1140 1137 10 1146 1143 - 9 1149 1143 9 1149 1146 10 1155 1152 9 - 1158 1152 9 1158 1155 10 1164 1161 9 1167 - 1161 9 1167 1164 10 1173 1170 9 1176 1170 - 9 1176 1173 10 1182 1179 9 1185 1179 9 - 1185 1182 10 1191 1188 9 1194 1188 9 1194 - 1191 10 1200 1197 9 1203 1197 9 1203 1200 - 10 1209 1206 9 1212 1206 9 1212 1209 10 - 1218 1215 9 1221 1215 9 1221 1218 10 1227 - 1224 9 1230 1224 9 1230 1227 10 1236 1233 - 9 1239 1233 9 1239 1236 10 1245 1242 9 - 1248 1242 9 1248 1245 10 1254 1251 9 1257 - 1251 9 1257 1254 10 1263 1260 9 1266 1260 - 9 1266 1263 10 1272 1269 9 1275 1269 9 - 1275 1272 10 1281 1278 9 1284 1278 9 1284 - 1281 10 1290 1287 9 1293 1287 9 1293 1290 - 10 1299 1296 9 1302 1296 9 1302 1299 10 - 1308 1305 9 1311 1305 9 1311 1308 10 1317 - 1314 9 1320 1314 9 1320 1317 10 1326 1323 - 9 1329 1323 9 1329 1326 10 1335 1332 9 - 1338 1332 9 1338 1335 10 1344 1341 9 1347 - 1341 9 1347 1344 10 1353 1350 9 1356 1350 - 9 1356 1353 10 1362 1359 9 1365 1359 9 - 1365 1362 10 1371 1368 9 1374 1368 9 1374 - 1371 10 1380 1377 9 1383 1377 9 1383 1380 - 10 1389 1386 9 1392 1386 9 1392 1389 10 - 1398 1395 9 1401 1395 9 1401 1398 10 1407 - 1404 9 1410 1404 9 1410 1407 10 1416 1413 - 9 1419 1413 9 1419 1416 10 1425 1422 9 - 1428 1422 9 1428 1425 10 1434 1431 9 1437 - 1431 9 1437 1434 10 1443 1440 9 1446 1440 - 9 1446 1443 10 1452 1449 9 1455 1449 9 - 1455 1452 10 1461 1458 9 1464 1458 9 1464 - 1461 10 1470 1467 9 1473 1467 9 1473 1470 - 10 1479 1476 9 1482 1476 9 1482 1479 10 - 1488 1485 9 1491 1485 9 1491 1488 10 1497 - 1494 9 1500 1494 9 1500 1497 10 1506 1503 - 9 1509 1503 9 1509 1506 10 1515 1512 9 - 1518 1512 9 1518 1515 10 1524 1521 9 1527 - 1521 9 1527 1524 10 1533 1530 9 1536 1530 - 9 1536 1533 10 1542 1539 9 1545 1539 9 - 1545 1542 10 1551 1548 9 1554 1548 9 1554 - 1551 10 1560 1557 9 1563 1557 9 1563 1560 - 10 1569 1566 9 1572 1566 9 1572 1569 10 - 1578 1575 9 1581 1575 9 1581 1578 10 1587 - 1584 9 1590 1584 9 1590 1587 10 1596 1593 - 9 1599 1593 9 1599 1596 10 1605 1602 9 - 1608 1602 9 1608 1605 10 1614 1611 9 1617 - 1611 9 1617 1614 10 1623 1620 9 1626 1620 - 9 1626 1623 10 1632 1629 9 1635 1629 9 - 1635 1632 10 1641 1638 9 1644 1638 9 1644 - 1641 10 1650 1647 9 1653 1647 9 1653 1650 - 10 1659 1656 9 1662 1656 9 1662 1659 10 - 1668 1665 9 1671 1665 9 1671 1668 10 1677 - 1674 9 1680 1674 9 1680 1677 10 1686 1683 - 9 1689 1683 9 1689 1686 10 1695 1692 9 - 1698 1692 9 1698 1695 10 1704 1701 9 1707 - 1701 9 1707 1704 10 1713 1710 9 1716 1710 - 9 1716 1713 10 1722 1719 9 1725 1719 9 - 1725 1722 10 1731 1728 9 1734 1728 9 1734 - 1731 10 1740 1737 9 1743 1737 9 1743 1740 - 10 1749 1746 9 1752 1746 9 1752 1749 10 - 1758 1755 9 1761 1755 9 1761 1758 10 1767 - 1764 9 1770 1764 9 1770 1767 10 1776 1773 - 9 1779 1773 9 1779 1776 10 1785 1782 9 - 1788 1782 9 1788 1785 10 1794 1791 9 1797 - 1791 9 1797 1794 10 1803 1800 9 1806 1800 - 9 1806 1803 10 1812 1809 9 1815 1809 9 - 1815 1812 10 1821 1818 9 1824 1818 9 1824 - 1821 10 1830 1827 9 1833 1827 9 1833 1830 - 10 1839 1836 9 1842 1836 9 1842 1839 10 - 1848 1845 9 1851 1845 9 1851 1848 10 1857 - 1854 9 1860 1854 9 1860 1857 10 1866 1863 - 9 1869 1863 9 1869 1866 10 1875 1872 9 - 1878 1872 9 1878 1875 10 1884 1881 9 1887 - 1881 9 1887 1884 10 1893 1890 9 1896 1890 - 9 1896 1893 10 1902 1899 9 1905 1899 9 - 1905 1902 10 1911 1908 9 1914 1908 9 1914 - 1911 10 1920 1917 9 1923 1917 9 1923 1920 - 10 1929 1926 9 1932 1926 9 1932 1929 10 - 1938 1935 9 1941 1935 9 1941 1938 10 1947 - 1944 9 1950 1944 9 1950 1947 10 1956 1953 - 9 1959 1953 9 1959 1956 10 1965 1962 9 - 1968 1962 9 1968 1965 10 1974 1971 9 1977 - 1971 9 1977 1974 10 1983 1980 9 1986 1980 - 9 1986 1983 10 1992 1989 9 1995 1989 9 - 1995 1992 10 2001 1998 9 2004 1998 9 2004 - 2001 10 2010 2007 9 2013 2007 9 2013 2010 - 10 2019 2016 9 2022 2016 9 2022 2019 10 - 2028 2025 9 2031 2025 9 2031 2028 10 2037 - 2034 9 2040 2034 9 2040 2037 10 2046 2043 - 9 2049 2043 9 2049 2046 10 2055 2052 9 - 2058 2052 9 2058 2055 10 2064 2061 9 2067 - 2061 9 2067 2064 10 2073 2070 9 2076 2070 - 9 2076 2073 10 2082 2079 9 2085 2079 9 - 2085 2082 10 2091 2088 9 2094 2088 9 2094 - 2091 10 2100 2097 9 2103 2097 9 2103 2100 - 10 2109 2106 9 2112 2106 9 2112 2109 10 - 2118 2115 9 2121 2115 9 2121 2118 10 2127 - 2124 9 2130 2124 9 2130 2127 10 2136 2133 - 9 2139 2133 9 2139 2136 10 2145 2142 9 - 2148 2142 9 2148 2145 10 2154 2151 9 2157 - 2151 9 2157 2154 10 2163 2160 9 2166 2160 - 9 2166 2163 10 2172 2169 9 2175 2169 9 - 2175 2172 10 2181 2178 9 2184 2178 9 2184 - 2181 10 2190 2187 9 2193 2187 9 2193 2190 - 10 2199 2196 9 2202 2196 9 2202 2199 10 - 2208 2205 9 2211 2205 9 2211 2208 10 2217 - 2214 9 2220 2214 9 2220 2217 10 2226 2223 - 9 2229 2223 9 2229 2226 10 2235 2232 9 - 2238 2232 9 2238 2235 10 2244 2241 9 2247 - 2241 9 2247 2244 10 2253 2250 9 2256 2250 - 9 2256 2253 10 2262 2259 9 2265 2259 9 - 2265 2262 10 2271 2268 9 2274 2268 9 2274 - 2271 10 2280 2277 9 2283 2277 9 2283 2280 - 10 2289 2286 9 2292 2286 9 2292 2289 10 - 2298 2295 9 2301 2295 9 2301 2298 10 2307 - 2304 9 2310 2304 9 2310 2307 10 2316 2313 - 9 2319 2313 9 2319 2316 10 2325 2322 9 - 2328 2322 9 2328 2325 10 2334 2331 9 2337 - 2331 9 2337 2334 10 2343 2340 9 2346 2340 - 9 2346 2343 10 2352 2349 9 2355 2349 9 - 2355 2352 10 2361 2358 9 2364 2358 9 2364 - 2361 10 2370 2367 9 2373 2367 9 2373 2370 - 10 2379 2376 9 2382 2376 9 2382 2379 10 - 2388 2385 9 2391 2385 9 2391 2388 10 2397 - 2394 9 2400 2394 9 2400 2397 10 2406 2403 - 9 2409 2403 9 2409 2406 10 2415 2412 9 - 2418 2412 9 2418 2415 10 2424 2421 9 2427 - 2421 9 2427 2424 10 2433 2430 9 2436 2430 - 9 2436 2433 10 2442 2439 9 2445 2439 9 - 2445 2442 10 2451 2448 9 2454 2448 9 2454 - 2451 10 2460 2457 9 2463 2457 9 2463 2460 - 10 2469 2466 9 2472 2466 9 2472 2469 10 - 2478 2475 9 2481 2475 9 2481 2478 10 2487 - 2484 9 2490 2484 9 2490 2487 10 2496 2493 - 9 2499 2493 9 2499 2496 10 2505 2502 9 - 2508 2502 9 2508 2505 10 2514 2511 9 2517 - 2511 9 2517 2514 10 2523 2520 9 2526 2520 - 9 2526 2523 10 2532 2529 9 2535 2529 9 - 2535 2532 10 2541 2538 9 2544 2538 9 2544 - 2541 10 2550 2547 9 2553 2547 9 2553 2550 - 10 2559 2556 9 2562 2556 9 2562 2559 10 - 2568 2565 9 2571 2565 9 2571 2568 10 2577 - 2574 9 2580 2574 9 2580 2577 10 2586 2583 - 9 2589 2583 9 2589 2586 10 2595 2592 9 - 2598 2592 9 2598 2595 10 2604 2601 9 2607 - 2601 9 2607 2604 10 2613 2610 9 2616 2610 - 9 2616 2613 10 2622 2619 9 2625 2619 9 - 2625 2622 10 2631 2628 9 2634 2628 9 2634 - 2631 10 2640 2637 9 2643 2637 9 2643 2640 - 10 2649 2646 9 2652 2646 9 2652 2649 10 - 2658 2655 9 2661 2655 9 2661 2658 10 2667 - 2664 9 2670 2664 9 2670 2667 10 2676 2673 - 9 2679 2673 9 2679 2676 10 2685 2682 9 - 2688 2682 9 2688 2685 10 2694 2691 9 2697 - 2691 9 2697 2694 10 2703 2700 9 2706 2700 - 9 2706 2703 10 2712 2709 9 2715 2709 9 - 2715 2712 10 2721 2718 9 2724 2718 9 2724 - 2721 10 2730 2727 9 2733 2727 9 2733 2730 - 10 2739 2736 9 2742 2736 9 2742 2739 10 - 2748 2745 9 2751 2745 9 2751 2748 10 2757 - 2754 9 2760 2754 9 2760 2757 10 2766 2763 - 9 2769 2763 9 2769 2766 10 2775 2772 9 - 2778 2772 9 2778 2775 10 2784 2781 9 2787 - 2781 9 2787 2784 10 2793 2790 9 2796 2790 - 9 2796 2793 10 2802 2799 9 2805 2799 9 - 2805 2802 10 2811 2808 9 2814 2808 9 2814 - 2811 10 2820 2817 9 2823 2817 9 2823 2820 - 10 2829 2826 9 2832 2826 9 2832 2829 10 - 2838 2835 9 2841 2835 9 2841 2838 10 2847 - 2844 9 2850 2844 9 2850 2847 10 2856 2853 - 9 2859 2853 9 2859 2856 10 2865 2862 9 - 2868 2862 9 2868 2865 10 2874 2871 9 2877 - 2871 9 2877 2874 10 2883 2880 9 2886 2880 - 9 2886 2883 10 2892 2889 9 2895 2889 9 - 2895 2892 10 2901 2898 9 2904 2898 9 2904 - 2901 10 2910 2907 9 2913 2907 9 2913 2910 - 10 2919 2916 9 2922 2916 9 2922 2919 10 - 2928 2925 9 2931 2925 9 2931 2928 10 2937 - 2934 9 2940 2934 9 2940 2937 10 2946 2943 - 9 2949 2943 9 2949 2946 10 2955 2952 9 - 2958 2952 9 2958 2955 10 2964 2961 9 2967 - 2961 9 2967 2964 10 2973 2970 9 2976 2970 - 9 2976 2973 10 2982 2979 9 2985 2979 9 - 2985 2982 10 2991 2988 9 2994 2988 9 2994 - 2991 10 3000 2997 9 3003 2997 9 3003 3000 - 10 3009 3006 9 3012 3006 9 3012 3009 10 - 3018 3015 9 3021 3015 9 3021 3018 10 3027 - 3024 9 3030 3024 9 3030 3027 10 3036 3033 - 9 3039 3033 9 3039 3036 10 3045 3042 9 - 3048 3042 9 3048 3045 10 3054 3051 9 3057 - 3051 9 3057 3054 10 3063 3060 9 3066 3060 - 9 3066 3063 10 3072 3069 9 3075 3069 9 - 3075 3072 10 3081 3078 9 3084 3078 9 3084 - 3081 10 3090 3087 9 3093 3087 9 3093 3090 - 10 3099 3096 9 3102 3096 9 3102 3099 10 - 3108 3105 9 3111 3105 9 3111 3108 10 3117 - 3114 9 3120 3114 9 3120 3117 10 3126 3123 - 9 3129 3123 9 3129 3126 10 3135 3132 9 - 3138 3132 9 3138 3135 10 3144 3141 9 3147 - 3141 9 3147 3144 10 3153 3150 9 3156 3150 - 9 3156 3153 10 3162 3159 9 3165 3159 9 - 3165 3162 10 3171 3168 9 3174 3168 9 3174 - 3171 10 3180 3177 9 3183 3177 9 3183 3180 - 10 3189 3186 9 3192 3186 9 3192 3189 10 - 3198 3195 9 3201 3195 9 3201 3198 10 3207 - 3204 9 3210 3204 9 3210 3207 10 3216 3213 - 9 3219 3213 9 3219 3216 10 3225 3222 9 - 3228 3222 9 3228 3225 10 3234 3231 9 3237 - 3231 9 3237 3234 10 3243 3240 9 3246 3240 - 9 3246 3243 10 3252 3249 9 3255 3249 9 - 3255 3252 10 3261 3258 9 3264 3258 9 3264 - 3261 10 3270 3267 9 3273 3267 9 3273 3270 - 10 3279 3276 9 3282 3276 9 3282 3279 10 - 3288 3285 9 3291 3285 9 3291 3288 10 3297 - 3294 9 3300 3294 9 3300 3297 10 3306 3303 - 9 3309 3303 9 3309 3306 10 3315 3312 9 - 3318 3312 9 3318 3315 10 3324 3321 9 3327 - 3321 9 3327 3324 10 3333 3330 9 3336 3330 - 9 3336 3333 10 3342 3339 9 3345 3339 9 - 3345 3342 10 3351 3348 9 3354 3348 9 3354 - 3351 10 3360 3357 9 3363 3357 9 3363 3360 - 10 3369 3366 9 3372 3366 9 3372 3369 10 - 3378 3375 9 3381 3375 9 3381 3378 10 3387 - 3384 9 3390 3384 9 3390 3387 10 3396 3393 - 9 3399 3393 9 3399 3396 10 3405 3402 9 - 3408 3402 9 3408 3405 10 3414 3411 9 3417 - 3411 9 3417 3414 10 3423 3420 9 3426 3420 - 9 3426 3423 10 3432 3429 9 3435 3429 9 - 3435 3432 10 3441 3438 9 3444 3438 9 3444 - 3441 10 3450 3447 9 3453 3447 9 3453 3450 - 10 3459 3456 9 3462 3456 9 3462 3459 10 - 3468 3465 9 3471 3465 9 3471 3468 10 3477 - 3474 9 3480 3474 9 3480 3477 10 3486 3483 - 9 3489 3483 9 3489 3486 10 3495 3492 9 - 3498 3492 9 3498 3495 10 3504 3501 9 3507 - 3501 9 3507 3504 10 3513 3510 9 3516 3510 - 9 3516 3513 10 3522 3519 9 3525 3519 9 - 3525 3522 10 3531 3528 9 3534 3528 9 3534 - 3531 10 3540 3537 9 3543 3537 9 3543 3540 - 10 3549 3546 9 3552 3546 9 3552 3549 10 - 3558 3555 9 3561 3555 9 3561 3558 10 3567 - 3564 9 3570 3564 9 3570 3567 10 3576 3573 - 9 3579 3573 9 3579 3576 10 3585 3582 9 - 3588 3582 9 3588 3585 10 3594 3591 9 3597 - 3591 9 3597 3594 10 3603 3600 9 3606 3600 - 9 3606 3603 10 3612 3609 9 3615 3609 9 - 3615 3612 10 3621 3618 9 3624 3618 9 3624 - 3621 10 3630 3627 9 3633 3627 9 3633 3630 - 10 3639 3636 9 3642 3636 9 3642 3639 10 - 3648 3645 9 3651 3645 9 3651 3648 10 3657 - 3654 9 3660 3654 9 3660 3657 10 3666 3663 - 9 3669 3663 9 3669 3666 10 3675 3672 9 - 3678 3672 9 3678 3675 10 3684 3681 9 3687 - 3681 9 3687 3684 10 3693 3690 9 3696 3690 - 9 3696 3693 10 3702 3699 9 3705 3699 9 - 3705 3702 10 3711 3708 9 3714 3708 9 3714 - 3711 10 3720 3717 9 3723 3717 9 3723 3720 - 10 3729 3726 9 3732 3726 9 3732 3729 10 - 3738 3735 9 3741 3735 9 3741 3738 10 3747 - 3744 9 3750 3744 9 3750 3747 10 3756 3753 - 9 3759 3753 9 3759 3756 10 3765 3762 9 - 3768 3762 9 3768 3765 10 3774 3771 9 3777 - 3771 9 3777 3774 10 3783 3780 9 3786 3780 - 9 3786 3783 10 3792 3789 9 3795 3789 9 - 3795 3792 10 3801 3798 9 3804 3798 9 3804 - 3801 10 3810 3807 9 3813 3807 9 3813 3810 - 10 3819 3816 9 3822 3816 9 3822 3819 10 - 3828 3825 9 3831 3825 9 3831 3828 10 3837 - 3834 9 3840 3834 9 3840 3837 10 3846 3843 - 9 3849 3843 9 3849 3846 10 3855 3852 9 - 3858 3852 9 3858 3855 10 3864 3861 9 3867 - 3861 9 3867 3864 10 3873 3870 9 3876 3870 - 9 3876 3873 10 3882 3879 9 3885 3879 9 - 3885 3882 10 3891 3888 9 3894 3888 9 3894 - 3891 10 3900 3897 9 3903 3897 9 3903 3900 - 10 3909 3906 9 3912 3906 9 3912 3909 10 - 3918 3915 9 3921 3915 9 3921 3918 10 3927 - 3924 9 3930 3924 9 3930 3927 10 3936 3933 - 9 3939 3933 9 3939 3936 10 3945 3942 9 - 3948 3942 9 3948 3945 10 3954 3951 9 3957 - 3951 9 3957 3954 10 3963 3960 9 3966 3960 - 9 3966 3963 10 3972 3969 9 3975 3969 9 - 3975 3972 10 3981 3978 9 3984 3978 9 3984 - 3981 10 3990 3987 9 3993 3987 9 3993 3990 - 10 3999 3996 9 4002 3996 9 4002 3999 10 - 4008 4005 9 4011 4005 9 4011 4008 10 4017 - 4014 9 4020 4014 9 4020 4017 10 4026 4023 - 9 4029 4023 9 4029 4026 10 4035 4032 9 - 4038 4032 9 4038 4035 10 4044 4041 9 4047 - 4041 9 4047 4044 10 4053 4050 9 4056 4050 - 9 4056 4053 10 4062 4059 9 4065 4059 9 - 4065 4062 10 4071 4068 9 4074 4068 9 4074 - 4071 10 4080 4077 9 4083 4077 9 4083 4080 - 10 4089 4086 9 4092 4086 9 4092 4089 10 - 4098 4095 9 4101 4095 9 4101 4098 10 4107 - 4104 9 4110 4104 9 4110 4107 10 4116 4113 - 9 4119 4113 9 4119 4116 10 4125 4122 9 - 4128 4122 9 4128 4125 10 4134 4131 9 4137 - 4131 9 4137 4134 10 4143 4140 9 4146 4140 - 9 4146 4143 10 4152 4149 9 4155 4149 9 - 4155 4152 10 4161 4158 9 4164 4158 9 4164 - 4161 10 4170 4167 9 4173 4167 9 4173 4170 - 10 4179 4176 9 4182 4176 9 4182 4179 10 - 4188 4185 9 4191 4185 9 4191 4188 10 4197 - 4194 9 4200 4194 9 4200 4197 10 4206 4203 - 9 4209 4203 9 4209 4206 10 4215 4212 9 - 4218 4212 9 4218 4215 10 4224 4221 9 4227 - 4221 9 4227 4224 10 4233 4230 9 4236 4230 - 9 4236 4233 10 4242 4239 9 4245 4239 9 - 4245 4242 10 4251 4248 9 4254 4248 9 4254 - 4251 10 4260 4257 9 4263 4257 9 4263 4260 - 10 4269 4266 9 4272 4266 9 4272 4269 10 - 4278 4275 9 4281 4275 9 4281 4278 10 4287 - 4284 9 4290 4284 9 4290 4287 10 4296 4293 - 9 4299 4293 9 4299 4296 10 4305 4302 9 - 4308 4302 9 4308 4305 10 4314 4311 9 4317 - 4311 9 4317 4314 10 4323 4320 9 4326 4320 - 9 4326 4323 10 4332 4329 9 4335 4329 9 - 4335 4332 10 4341 4338 9 4344 4338 9 4344 - 4341 10 4350 4347 9 4353 4347 9 4353 4350 - 10 4359 4356 9 4362 4356 9 4362 4359 10 - 4368 4365 9 4371 4365 9 4371 4368 10 4377 - 4374 9 4380 4374 9 4380 4377 10 4386 4383 - 9 4389 4383 9 4389 4386 10 4395 4392 9 - 4398 4392 9 4398 4395 10 4404 4401 9 4407 - 4401 9 4407 4404 10 4413 4410 9 4416 4410 - 9 4416 4413 10 4422 4419 9 4425 4419 9 - 4425 4422 10 4431 4428 9 4434 4428 9 4434 - 4431 10 4440 4437 9 4443 4437 9 4443 4440 - 10 4449 4446 9 4452 4446 9 4452 4449 10 - 4458 4455 9 4461 4455 9 4461 4458 10 4467 - 4464 9 4470 4464 9 4470 4467 10 4476 4473 - 9 4479 4473 9 4479 4476 10 4485 4482 9 - 4488 4482 9 4488 4485 10 4494 4491 9 4497 - 4491 9 4497 4494 10 4503 4500 9 4506 4500 - 9 4506 4503 10 4512 4509 9 4515 4509 9 - 4515 4512 10 4521 4518 9 4524 4518 9 4524 - 4521 10 4530 4527 9 4533 4527 9 4533 4530 - 10 4539 4536 9 4542 4536 9 4542 4539 10 - 4548 4545 9 4551 4545 9 4551 4548 10 4557 - 4554 9 4560 4554 9 4560 4557 10 4566 4563 - 9 4569 4563 9 4569 4566 10 4575 4572 9 - 4578 4572 9 4578 4575 10 4584 4581 9 4587 - 4581 9 4587 4584 10 4593 4590 9 4596 4590 - 9 4596 4593 10 4602 4599 9 4605 4599 9 - 4605 4602 10 4611 4608 9 4614 4608 9 4614 - 4611 10 4620 4617 9 4623 4617 9 4623 4620 - 10 4629 4626 9 4632 4626 9 4632 4629 10 - 4638 4635 9 4641 4635 9 4641 4638 10 4647 - 4644 9 4650 4644 9 4650 4647 10 4656 4653 - 9 4659 4653 9 4659 4656 10 4665 4662 9 - 4668 4662 9 4668 4665 10 4674 4671 9 4677 - 4671 9 4677 4674 10 4683 4680 9 4686 4680 - 9 4686 4683 10 4692 4689 9 4695 4689 9 - 4695 4692 10 4701 4698 9 4704 4698 9 4704 - 4701 10 4710 4707 9 4713 4707 9 4713 4710 - 10 4719 4716 9 4722 4716 9 4722 4719 10 - 4728 4725 9 4731 4725 9 4731 4728 10 4737 - 4734 9 4740 4734 9 4740 4737 10 4746 4743 - 9 4749 4743 9 4749 4746 10 4755 4752 9 - 4758 4752 9 4758 4755 10 4764 4761 9 4767 - 4761 9 4767 4764 10 4773 4770 9 4776 4770 - 9 4776 4773 10 4782 4779 9 4785 4779 9 - 4785 4782 10 4791 4788 9 4794 4788 9 4794 - 4791 10 4800 4797 9 4803 4797 9 4803 4800 - 10 4809 4806 9 4812 4806 9 4812 4809 10 - 4818 4815 9 4821 4815 9 4821 4818 10 4827 - 4824 9 4830 4824 9 4830 4827 10 4836 4833 - 9 4839 4833 9 4839 4836 10 4845 4842 9 - 4848 4842 9 4848 4845 10 4854 4851 9 4857 - 4851 9 4857 4854 10 4863 4860 9 4866 4860 - 9 4866 4863 10 4872 4869 9 4875 4869 9 - 4875 4872 10 4881 4878 9 4884 4878 9 4884 - 4881 10 4890 4887 9 4893 4887 9 4893 4890 - 10 4899 4896 9 4902 4896 9 4902 4899 10 - 4908 4905 9 4911 4905 9 4911 4908 10 4917 - 4914 9 4920 4914 9 4920 4917 10 4926 4923 - 9 4929 4923 9 4929 4926 10 4935 4932 9 - 4938 4932 9 4938 4935 10 4944 4941 9 4947 - 4941 9 4947 4944 10 4953 4950 9 4956 4950 - 9 4956 4953 10 4962 4959 9 4965 4959 9 - 4965 4962 10 4971 4968 9 4974 4968 9 4974 - 4971 10 4980 4977 9 4983 4977 9 4983 4980 - 10 4989 4986 9 4992 4986 9 4992 4989 10 - 4998 4995 9 5001 4995 9 5001 4998 10 5007 - 5004 9 5010 5004 9 5010 5007 10 5016 5013 - 9 5019 5013 9 5019 5016 10 5025 5022 9 - 5028 5022 9 5028 5025 10 5034 5031 9 5037 - 5031 9 5037 5034 10 5043 5040 9 5046 5040 - 9 5046 5043 10 5052 5049 9 5055 5049 9 - 5055 5052 10 5061 5058 9 5064 5058 9 5064 - 5061 10 5070 5067 9 5073 5067 9 5073 5070 - 10 5079 5076 9 5082 5076 9 5082 5079 10 - 5088 5085 9 5091 5085 9 5091 5088 10 5097 - 5094 9 5100 5094 9 5100 5097 10 5106 5103 - 9 5109 5103 9 5109 5106 10 5115 5112 9 - 5118 5112 9 5118 5115 10 5124 5121 9 5127 - 5121 9 5127 5124 10 5133 5130 9 5136 5130 - 9 5136 5133 10 5142 5139 9 5145 5139 9 - 5145 5142 10 5151 5148 9 5154 5148 9 5154 - 5151 10 5160 5157 9 5163 5157 9 5163 5160 - 10 5169 5166 9 5172 5166 9 5172 5169 10 - 5178 5175 9 5181 5175 9 5181 5178 10 5187 - 5184 9 5190 5184 9 5190 5187 10 5196 5193 - 9 5199 5193 9 5199 5196 10 5205 5202 9 - 5208 5202 9 5208 5205 10 5214 5211 9 5217 - 5211 9 5217 5214 10 5223 5220 9 5226 5220 - 9 5226 5223 10 5232 5229 9 5235 5229 9 - 5235 5232 10 5241 5238 9 5244 5238 9 5244 - 5241 10 5250 5247 9 5253 5247 9 5253 5250 - 10 5259 5256 9 5262 5256 9 5262 5259 10 - 5268 5265 9 5271 5265 9 5271 5268 10 5277 - 5274 9 5280 5274 9 5280 5277 10 5286 5283 - 9 5289 5283 9 5289 5286 10 5295 5292 9 - 5298 5292 9 5298 5295 10 5304 5301 9 5307 - 5301 9 5307 5304 10 5313 5310 9 5316 5310 - 9 5316 5313 10 5322 5319 9 5325 5319 9 - 5325 5322 10 5331 5328 9 5334 5328 9 5334 - 5331 10 5340 5337 9 5343 5337 9 5343 5340 - 10 5349 5346 9 5352 5346 9 5352 5349 10 - 5358 5355 9 5361 5355 9 5361 5358 10 5367 - 5364 9 5370 5364 9 5370 5367 10 5376 5373 - 9 5379 5373 9 5379 5376 10 5385 5382 9 - 5388 5382 9 5388 5385 10 5394 5391 9 5397 - 5391 9 5397 5394 10 5403 5400 9 5406 5400 - 9 5406 5403 10 5412 5409 9 5415 5409 9 - 5415 5412 10 5421 5418 9 5424 5418 9 5424 - 5421 10 5430 5427 9 5433 5427 9 5433 5430 - 10 5439 5436 9 5442 5436 9 5442 5439 10 - 5448 5445 9 5451 5445 9 5451 5448 10 5457 - 5454 9 5460 5454 9 5460 5457 10 5466 5463 - 9 5469 5463 9 5469 5466 10 5475 5472 9 - 5478 5472 9 5478 5475 10 5484 5481 9 5487 - 5481 9 5487 5484 10 5493 5490 9 5496 5490 - 9 5496 5493 10 5502 5499 9 5505 5499 9 - 5505 5502 10 5511 5508 9 5514 5508 9 5514 - 5511 10 5520 5517 9 5523 5517 9 5523 5520 - 10 5529 5526 9 5532 5526 9 5532 5529 10 - 5538 5535 9 5541 5535 9 5541 5538 10 5547 - 5544 9 5550 5544 9 5550 5547 10 5556 5553 - 9 5559 5553 9 5559 5556 10 5565 5562 9 - 5568 5562 9 5568 5565 10 5574 5571 9 5577 - 5571 9 5577 5574 10 5583 5580 9 5586 5580 - 9 5586 5583 10 5592 5589 9 5595 5589 9 - 5595 5592 10 5601 5598 9 5604 5598 9 5604 - 5601 10 5610 5607 9 5613 5607 9 5613 5610 - 10 5619 5616 9 5622 5616 9 5622 5619 10 - 5628 5625 9 5631 5625 9 5631 5628 10 5637 - 5634 9 5640 5634 9 5640 5637 10 5646 5643 - 9 5649 5643 9 5649 5646 10 5655 5652 9 - 5658 5652 9 5658 5655 10 5664 5661 9 5667 - 5661 9 5667 5664 10 5673 5670 9 5676 5670 - 9 5676 5673 10 5682 5679 9 5685 5679 9 - 5685 5682 10 5691 5688 9 5694 5688 9 5694 - 5691 10 5700 5697 9 5703 5697 9 5703 5700 - 10 5709 5706 9 5712 5706 9 5712 5709 10 - 5718 5715 9 5721 5715 9 5721 5718 10 5727 - 5724 9 5730 5724 9 5730 5727 10 5736 5733 - 9 5739 5733 9 5739 5736 10 5745 5742 9 - 5748 5742 9 5748 5745 10 5754 5751 9 5757 - 5751 9 5757 5754 10 5763 5760 9 5766 5760 - 9 5766 5763 10 5772 5769 9 5775 5769 9 - 5775 5772 10 5781 5778 9 5784 5778 9 5784 - 5781 10 5790 5787 9 5793 5787 9 5793 5790 - 10 5799 5796 9 5802 5796 9 5802 5799 10 - 5808 5805 9 5811 5805 9 5811 5808 10 5817 - 5814 9 5820 5814 9 5820 5817 10 5826 5823 - 9 5829 5823 9 5829 5826 10 5835 5832 9 - 5838 5832 9 5838 5835 10 5844 5841 9 5847 - 5841 9 5847 5844 10 5853 5850 9 5856 5850 - 9 5856 5853 10 5862 5859 9 5865 5859 9 - 5865 5862 10 5871 5868 9 5874 5868 9 5874 - 5871 10 5880 5877 9 5883 5877 9 5883 5880 - 10 5889 5886 9 5892 5886 9 5892 5889 10 - 5898 5895 9 5901 5895 9 5901 5898 10 5907 - 5904 9 5910 5904 9 5910 5907 10 5916 5913 - 9 5919 5913 9 5919 5916 10 5925 5922 9 - 5928 5922 9 5928 5925 10 5934 5931 9 5937 - 5931 9 5937 5934 10 5943 5940 9 5946 5940 - 9 5946 5943 10 5952 5949 9 5955 5949 9 - 5955 5952 10 5961 5958 9 5964 5958 9 5964 - 5961 10 5970 5967 9 5973 5967 9 5973 5970 - 10 5979 5976 9 5982 5976 9 5982 5979 10 - 5988 5985 9 5991 5985 9 5991 5988 10 5997 - 5994 9 6000 5994 9 6000 5997 10 6006 6003 - 9 6009 6003 9 6009 6006 10 6015 6012 9 - 6018 6012 9 6018 6015 10 6024 6021 9 6027 - 6021 9 6027 6024 10 6033 6030 9 6036 6030 - 9 6036 6033 10 6042 6039 9 6045 6039 9 - 6045 6042 10 6051 6048 9 6054 6048 9 6054 - 6051 10 6060 6057 9 6063 6057 9 6063 6060 - 10 6069 6066 9 6072 6066 9 6072 6069 10 - 6078 6075 9 6081 6075 9 6081 6078 10 6087 - 6084 9 6090 6084 9 6090 6087 10 6096 6093 - 9 6099 6093 9 6099 6096 10 6105 6102 9 - 6108 6102 9 6108 6105 10 6114 6111 9 6117 - 6111 9 6117 6114 10 6123 6120 9 6126 6120 - 9 6126 6123 10 6132 6129 9 6135 6129 9 - 6135 6132 10 6141 6138 9 6144 6138 9 6144 - 6141 10 6150 6147 9 6153 6147 9 6153 6150 - 10 6159 6156 9 6162 6156 9 6162 6159 10 - 6168 6165 9 6171 6165 9 6171 6168 10 6177 - 6174 9 6180 6174 9 6180 6177 10 6186 6183 - 9 6189 6183 9 6189 6186 10 6195 6192 9 - 6198 6192 9 6198 6195 10 6204 6201 9 6207 - 6201 9 6207 6204 10 6213 6210 9 6216 6210 - 9 6216 6213 10 6222 6219 9 6225 6219 9 - 6225 6222 10 6231 6228 9 6234 6228 9 6234 - 6231 10 6240 6237 9 6243 6237 9 6243 6240 - 10 6249 6246 9 6252 6246 9 6252 6249 10 - 6258 6255 9 6261 6255 9 6261 6258 10 6267 - 6264 9 6270 6264 9 6270 6267 10 6276 6273 - 9 6279 6273 9 6279 6276 10 6285 6282 9 - 6288 6282 9 6288 6285 10 6294 6291 9 6297 - 6291 9 6297 6294 10 6303 6300 9 6306 6300 - 9 6306 6303 10 6312 6309 9 6315 6309 9 - 6315 6312 10 6321 6318 9 6324 6318 9 6324 - 6321 10 6330 6327 9 6333 6327 9 6333 6330 - 10 6339 6336 9 6342 6336 9 6342 6339 10 - 6348 6345 9 6351 6345 9 6351 6348 10 6357 - 6354 9 6360 6354 9 6360 6357 10 6366 6363 - 9 6369 6363 9 6369 6366 10 6375 6372 9 - 6378 6372 9 6378 6375 10 6384 6381 9 6387 - 6381 9 6387 6384 10 6393 6390 9 6396 6390 - 9 6396 6393 10 6402 6399 9 6405 6399 9 - 6405 6402 10 6411 6408 9 6414 6408 9 6414 - 6411 10 6420 6417 9 6423 6417 9 6423 6420 - 10 6429 6426 9 6432 6426 9 6432 6429 10 - 6438 6435 9 6441 6435 9 6441 6438 10 6447 - 6444 9 6450 6444 9 6450 6447 10 6456 6453 - 9 6459 6453 9 6459 6456 10 6465 6462 9 - 6468 6462 9 6468 6465 10 6474 6471 9 6477 - 6471 9 6477 6474 10 6483 6480 9 6486 6480 - 9 6486 6483 10 6492 6489 9 6495 6489 9 - 6495 6492 10 6501 6498 9 6504 6498 9 6504 - 6501 10 6510 6507 9 6513 6507 9 6513 6510 - 10 6519 6516 9 6522 6516 9 6522 6519 10 - 6528 6525 9 6531 6525 9 6531 6528 10 6537 - 6534 9 6540 6534 9 6540 6537 10 6546 6543 - 9 6549 6543 9 6549 6546 10 6555 6552 9 - 6558 6552 9 6558 6555 10 6564 6561 9 6567 - 6561 9 6567 6564 10 6573 6570 9 6576 6570 - 9 6576 6573 10 6582 6579 9 6585 6579 9 - 6585 6582 10 6591 6588 9 6594 6588 9 6594 - 6591 10 6600 6597 9 6603 6597 9 6603 6600 - 10 6609 6606 9 6612 6606 9 6612 6609 10 - 6618 6615 9 6621 6615 9 6621 6618 10 6627 - 6624 9 6630 6624 9 6630 6627 10 6636 6633 - 9 6639 6633 9 6639 6636 10 6645 6642 9 - 6648 6642 9 6648 6645 10 6654 6651 9 6657 - 6651 9 6657 6654 10 6663 6660 9 6666 6660 - 9 6666 6663 10 6672 6669 9 6675 6669 9 - 6675 6672 10 6681 6678 9 6684 6678 9 6684 - 6681 10 6690 6687 9 6693 6687 9 6693 6690 - 10 6699 6696 9 6702 6696 9 6702 6699 10 - 6708 6705 9 6711 6705 9 6711 6708 10 6717 - 6714 9 6720 6714 9 6720 6717 10 6726 6723 - 9 6729 6723 9 6729 6726 10 6735 6732 9 - 6738 6732 9 6738 6735 10 6744 6741 9 6747 - 6741 9 6747 6744 10 6753 6750 9 6756 6750 - 9 6756 6753 10 6762 6759 9 6765 6759 9 - 6765 6762 10 6771 6768 9 6774 6768 9 6774 - 6771 10 6780 6777 9 6783 6777 9 6783 6780 - 10 6789 6786 9 6792 6786 9 6792 6789 10 - 6798 6795 9 6801 6795 9 6801 6798 10 6807 - 6804 9 6810 6804 9 6810 6807 10 6816 6813 - 9 6819 6813 9 6819 6816 10 6825 6822 9 - 6828 6822 9 6828 6825 10 6834 6831 9 6837 - 6831 9 6837 6834 10 6843 6840 9 6846 6840 - 9 6846 6843 10 6852 6849 9 6855 6849 9 - 6855 6852 10 6861 6858 9 6864 6858 9 6864 - 6861 10 6870 6867 9 6873 6867 9 6873 6870 - 10 6879 6876 9 6882 6876 9 6882 6879 10 - 6888 6885 9 6891 6885 9 6891 6888 10 -%FLAG BONDS_WITHOUT_HYDROGEN -%FORMAT(10I8) - 24 27 1 24 30 2 6 12 5 6 - 24 6 0 6 8 54 57 1 54 60 - 2 36 42 5 36 54 6 30 36 8 - 84 87 1 66 72 5 66 84 6 60 - 66 8 -%FLAG ANGLES_INC_HYDROGEN -%FORMAT(10I8) - 24 30 33 2 18 12 21 4 15 12 - 18 4 15 12 21 4 9 6 12 6 - 9 6 24 7 6 12 15 8 6 12 - 18 8 6 12 21 8 3 0 6 11 - 0 6 9 12 54 60 63 2 48 42 - 51 4 45 42 48 4 45 42 51 4 - 39 36 42 6 39 36 54 7 36 42 - 45 8 36 42 48 8 36 42 51 8 - 33 30 36 11 30 36 39 12 78 72 - 81 4 75 72 78 4 75 72 81 4 - 69 66 72 6 69 66 84 7 66 72 - 75 8 66 72 78 8 66 72 81 8 - 63 60 66 11 60 66 69 12 -%FLAG ANGLES_WITHOUT_HYDROGEN -%FORMAT(10I8) - 27 24 30 1 24 30 36 3 12 6 - 24 5 6 24 27 9 6 24 30 10 - 0 6 12 13 0 6 24 14 57 54 - 60 1 54 60 66 3 42 36 54 5 - 36 54 57 9 36 54 60 10 30 36 - 42 13 30 36 54 14 72 66 84 5 - 66 84 87 9 60 66 72 13 60 66 - 84 14 -%FLAG DIHEDRALS_INC_HYDROGEN -%FORMAT(10I8) - 27 24 30 33 1 27 24 -30 33 2 - 24 30 36 39 3 21 12 6 24 10 - 18 12 6 24 10 15 12 6 24 10 - 9 6 12 15 10 9 6 12 18 10 - 9 6 12 21 10 9 6 24 27 14 - 9 6 -24 27 3 9 6 -24 27 15 - 9 6 24 30 3 6 24 30 33 2 - 3 0 6 9 3 3 0 6 12 3 - 3 0 6 24 3 0 6 12 15 10 - 0 6 12 18 10 0 6 12 21 10 - 57 54 60 63 1 57 54 -60 63 2 - 54 60 66 69 3 51 42 36 54 10 - 48 42 36 54 10 45 42 36 54 10 - 39 36 42 45 10 39 36 42 48 10 - 39 36 42 51 10 39 36 54 57 14 - 39 36 -54 57 3 39 36 -54 57 15 - 39 36 54 60 3 36 54 60 63 2 - 33 30 36 39 3 33 30 36 42 3 - 33 30 36 54 3 30 36 42 45 10 - 30 36 42 48 10 30 36 42 51 10 - 81 72 66 84 10 78 72 66 84 10 - 75 72 66 84 10 69 66 72 75 10 - 69 66 72 78 10 69 66 72 81 10 - 69 66 84 87 14 69 66 -84 87 3 - 69 66 -84 87 15 63 60 66 69 3 - 63 60 66 72 3 63 60 66 84 3 - 60 66 72 75 10 60 66 72 78 10 - 60 66 72 81 10 24 36 -30 -33 20 - 54 66 -60 -63 20 -%FLAG DIHEDRALS_WITHOUT_HYDROGEN -%FORMAT(10I8) - 27 24 30 36 2 24 30 36 42 1 - 24 30 -36 42 4 24 30 -36 42 5 - 24 30 -36 42 6 24 30 36 54 7 - 24 30 -36 54 8 24 30 -36 54 9 - 24 30 -36 54 6 12 6 24 27 3 - 12 6 24 30 11 12 6 -24 30 12 - 12 6 -24 30 13 12 6 -24 30 6 - 6 24 30 36 2 0 6 24 27 3 - 0 6 24 30 16 0 6 -24 30 17 - 0 6 -24 30 18 0 6 -24 30 6 - 57 54 60 66 2 54 60 66 72 1 - 54 60 -66 72 4 54 60 -66 72 5 - 54 60 -66 72 6 54 60 66 84 7 - 54 60 -66 84 8 54 60 -66 84 9 - 54 60 -66 84 6 42 36 54 57 3 - 42 36 54 60 11 42 36 -54 60 12 - 42 36 -54 60 13 42 36 -54 60 6 - 36 54 60 66 2 30 36 54 57 3 - 30 36 54 60 16 30 36 -54 60 17 - 30 36 -54 60 18 30 36 -54 60 6 - 72 66 84 87 3 60 66 84 87 3 - 6 30 -24 -27 19 36 60 -54 -57 19 -%FLAG EXCLUDED_ATOMS_LIST -%FORMAT(10I8) - 2 3 4 5 6 7 8 9 10 11 - 3 4 5 9 4 5 6 7 8 9 - 10 11 12 13 5 6 7 8 9 10 - 11 6 7 8 9 10 11 7 8 9 - 8 9 9 10 11 12 13 14 15 19 - 11 12 13 12 13 14 15 16 17 18 - 19 20 21 13 14 15 19 14 15 16 - 17 18 19 20 21 22 23 15 16 17 - 18 19 20 21 16 17 18 19 20 21 - 17 18 19 18 19 19 20 21 22 23 - 24 25 29 21 22 23 22 23 24 25 - 26 27 28 29 30 23 24 25 29 24 - 25 26 27 28 29 30 25 26 27 28 - 29 30 26 27 28 29 30 27 28 29 - 28 29 29 30 0 32 33 33 0 35 - 36 36 0 38 39 39 0 41 42 42 - 0 44 45 45 0 47 48 48 0 50 - 51 51 0 53 54 54 0 56 57 57 - 0 59 60 60 0 62 63 63 0 65 - 66 66 0 68 69 69 0 71 72 72 - 0 74 75 75 0 77 78 78 0 80 - 81 81 0 83 84 84 0 86 87 87 - 0 89 90 90 0 92 93 93 0 95 - 96 96 0 98 99 99 0 101 102 102 - 0 104 105 105 0 107 108 108 0 110 - 111 111 0 113 114 114 0 116 117 117 - 0 119 120 120 0 122 123 123 0 125 - 126 126 0 128 129 129 0 131 132 132 - 0 134 135 135 0 137 138 138 0 140 - 141 141 0 143 144 144 0 146 147 147 - 0 149 150 150 0 152 153 153 0 155 - 156 156 0 158 159 159 0 161 162 162 - 0 164 165 165 0 167 168 168 0 170 - 171 171 0 173 174 174 0 176 177 177 - 0 179 180 180 0 182 183 183 0 185 - 186 186 0 188 189 189 0 191 192 192 - 0 194 195 195 0 197 198 198 0 200 - 201 201 0 203 204 204 0 206 207 207 - 0 209 210 210 0 212 213 213 0 215 - 216 216 0 218 219 219 0 221 222 222 - 0 224 225 225 0 227 228 228 0 230 - 231 231 0 233 234 234 0 236 237 237 - 0 239 240 240 0 242 243 243 0 245 - 246 246 0 248 249 249 0 251 252 252 - 0 254 255 255 0 257 258 258 0 260 - 261 261 0 263 264 264 0 266 267 267 - 0 269 270 270 0 272 273 273 0 275 - 276 276 0 278 279 279 0 281 282 282 - 0 284 285 285 0 287 288 288 0 290 - 291 291 0 293 294 294 0 296 297 297 - 0 299 300 300 0 302 303 303 0 305 - 306 306 0 308 309 309 0 311 312 312 - 0 314 315 315 0 317 318 318 0 320 - 321 321 0 323 324 324 0 326 327 327 - 0 329 330 330 0 332 333 333 0 335 - 336 336 0 338 339 339 0 341 342 342 - 0 344 345 345 0 347 348 348 0 350 - 351 351 0 353 354 354 0 356 357 357 - 0 359 360 360 0 362 363 363 0 365 - 366 366 0 368 369 369 0 371 372 372 - 0 374 375 375 0 377 378 378 0 380 - 381 381 0 383 384 384 0 386 387 387 - 0 389 390 390 0 392 393 393 0 395 - 396 396 0 398 399 399 0 401 402 402 - 0 404 405 405 0 407 408 408 0 410 - 411 411 0 413 414 414 0 416 417 417 - 0 419 420 420 0 422 423 423 0 425 - 426 426 0 428 429 429 0 431 432 432 - 0 434 435 435 0 437 438 438 0 440 - 441 441 0 443 444 444 0 446 447 447 - 0 449 450 450 0 452 453 453 0 455 - 456 456 0 458 459 459 0 461 462 462 - 0 464 465 465 0 467 468 468 0 470 - 471 471 0 473 474 474 0 476 477 477 - 0 479 480 480 0 482 483 483 0 485 - 486 486 0 488 489 489 0 491 492 492 - 0 494 495 495 0 497 498 498 0 500 - 501 501 0 503 504 504 0 506 507 507 - 0 509 510 510 0 512 513 513 0 515 - 516 516 0 518 519 519 0 521 522 522 - 0 524 525 525 0 527 528 528 0 530 - 531 531 0 533 534 534 0 536 537 537 - 0 539 540 540 0 542 543 543 0 545 - 546 546 0 548 549 549 0 551 552 552 - 0 554 555 555 0 557 558 558 0 560 - 561 561 0 563 564 564 0 566 567 567 - 0 569 570 570 0 572 573 573 0 575 - 576 576 0 578 579 579 0 581 582 582 - 0 584 585 585 0 587 588 588 0 590 - 591 591 0 593 594 594 0 596 597 597 - 0 599 600 600 0 602 603 603 0 605 - 606 606 0 608 609 609 0 611 612 612 - 0 614 615 615 0 617 618 618 0 620 - 621 621 0 623 624 624 0 626 627 627 - 0 629 630 630 0 632 633 633 0 635 - 636 636 0 638 639 639 0 641 642 642 - 0 644 645 645 0 647 648 648 0 650 - 651 651 0 653 654 654 0 656 657 657 - 0 659 660 660 0 662 663 663 0 665 - 666 666 0 668 669 669 0 671 672 672 - 0 674 675 675 0 677 678 678 0 680 - 681 681 0 683 684 684 0 686 687 687 - 0 689 690 690 0 692 693 693 0 695 - 696 696 0 698 699 699 0 701 702 702 - 0 704 705 705 0 707 708 708 0 710 - 711 711 0 713 714 714 0 716 717 717 - 0 719 720 720 0 722 723 723 0 725 - 726 726 0 728 729 729 0 731 732 732 - 0 734 735 735 0 737 738 738 0 740 - 741 741 0 743 744 744 0 746 747 747 - 0 749 750 750 0 752 753 753 0 755 - 756 756 0 758 759 759 0 761 762 762 - 0 764 765 765 0 767 768 768 0 770 - 771 771 0 773 774 774 0 776 777 777 - 0 779 780 780 0 782 783 783 0 785 - 786 786 0 788 789 789 0 791 792 792 - 0 794 795 795 0 797 798 798 0 800 - 801 801 0 803 804 804 0 806 807 807 - 0 809 810 810 0 812 813 813 0 815 - 816 816 0 818 819 819 0 821 822 822 - 0 824 825 825 0 827 828 828 0 830 - 831 831 0 833 834 834 0 836 837 837 - 0 839 840 840 0 842 843 843 0 845 - 846 846 0 848 849 849 0 851 852 852 - 0 854 855 855 0 857 858 858 0 860 - 861 861 0 863 864 864 0 866 867 867 - 0 869 870 870 0 872 873 873 0 875 - 876 876 0 878 879 879 0 881 882 882 - 0 884 885 885 0 887 888 888 0 890 - 891 891 0 893 894 894 0 896 897 897 - 0 899 900 900 0 902 903 903 0 905 - 906 906 0 908 909 909 0 911 912 912 - 0 914 915 915 0 917 918 918 0 920 - 921 921 0 923 924 924 0 926 927 927 - 0 929 930 930 0 932 933 933 0 935 - 936 936 0 938 939 939 0 941 942 942 - 0 944 945 945 0 947 948 948 0 950 - 951 951 0 953 954 954 0 956 957 957 - 0 959 960 960 0 962 963 963 0 965 - 966 966 0 968 969 969 0 971 972 972 - 0 974 975 975 0 977 978 978 0 980 - 981 981 0 983 984 984 0 986 987 987 - 0 989 990 990 0 992 993 993 0 995 - 996 996 0 998 999 999 0 1001 1002 1002 - 0 1004 1005 1005 0 1007 1008 1008 0 1010 - 1011 1011 0 1013 1014 1014 0 1016 1017 1017 - 0 1019 1020 1020 0 1022 1023 1023 0 1025 - 1026 1026 0 1028 1029 1029 0 1031 1032 1032 - 0 1034 1035 1035 0 1037 1038 1038 0 1040 - 1041 1041 0 1043 1044 1044 0 1046 1047 1047 - 0 1049 1050 1050 0 1052 1053 1053 0 1055 - 1056 1056 0 1058 1059 1059 0 1061 1062 1062 - 0 1064 1065 1065 0 1067 1068 1068 0 1070 - 1071 1071 0 1073 1074 1074 0 1076 1077 1077 - 0 1079 1080 1080 0 1082 1083 1083 0 1085 - 1086 1086 0 1088 1089 1089 0 1091 1092 1092 - 0 1094 1095 1095 0 1097 1098 1098 0 1100 - 1101 1101 0 1103 1104 1104 0 1106 1107 1107 - 0 1109 1110 1110 0 1112 1113 1113 0 1115 - 1116 1116 0 1118 1119 1119 0 1121 1122 1122 - 0 1124 1125 1125 0 1127 1128 1128 0 1130 - 1131 1131 0 1133 1134 1134 0 1136 1137 1137 - 0 1139 1140 1140 0 1142 1143 1143 0 1145 - 1146 1146 0 1148 1149 1149 0 1151 1152 1152 - 0 1154 1155 1155 0 1157 1158 1158 0 1160 - 1161 1161 0 1163 1164 1164 0 1166 1167 1167 - 0 1169 1170 1170 0 1172 1173 1173 0 1175 - 1176 1176 0 1178 1179 1179 0 1181 1182 1182 - 0 1184 1185 1185 0 1187 1188 1188 0 1190 - 1191 1191 0 1193 1194 1194 0 1196 1197 1197 - 0 1199 1200 1200 0 1202 1203 1203 0 1205 - 1206 1206 0 1208 1209 1209 0 1211 1212 1212 - 0 1214 1215 1215 0 1217 1218 1218 0 1220 - 1221 1221 0 1223 1224 1224 0 1226 1227 1227 - 0 1229 1230 1230 0 1232 1233 1233 0 1235 - 1236 1236 0 1238 1239 1239 0 1241 1242 1242 - 0 1244 1245 1245 0 1247 1248 1248 0 1250 - 1251 1251 0 1253 1254 1254 0 1256 1257 1257 - 0 1259 1260 1260 0 1262 1263 1263 0 1265 - 1266 1266 0 1268 1269 1269 0 1271 1272 1272 - 0 1274 1275 1275 0 1277 1278 1278 0 1280 - 1281 1281 0 1283 1284 1284 0 1286 1287 1287 - 0 1289 1290 1290 0 1292 1293 1293 0 1295 - 1296 1296 0 1298 1299 1299 0 1301 1302 1302 - 0 1304 1305 1305 0 1307 1308 1308 0 1310 - 1311 1311 0 1313 1314 1314 0 1316 1317 1317 - 0 1319 1320 1320 0 1322 1323 1323 0 1325 - 1326 1326 0 1328 1329 1329 0 1331 1332 1332 - 0 1334 1335 1335 0 1337 1338 1338 0 1340 - 1341 1341 0 1343 1344 1344 0 1346 1347 1347 - 0 1349 1350 1350 0 1352 1353 1353 0 1355 - 1356 1356 0 1358 1359 1359 0 1361 1362 1362 - 0 1364 1365 1365 0 1367 1368 1368 0 1370 - 1371 1371 0 1373 1374 1374 0 1376 1377 1377 - 0 1379 1380 1380 0 1382 1383 1383 0 1385 - 1386 1386 0 1388 1389 1389 0 1391 1392 1392 - 0 1394 1395 1395 0 1397 1398 1398 0 1400 - 1401 1401 0 1403 1404 1404 0 1406 1407 1407 - 0 1409 1410 1410 0 1412 1413 1413 0 1415 - 1416 1416 0 1418 1419 1419 0 1421 1422 1422 - 0 1424 1425 1425 0 1427 1428 1428 0 1430 - 1431 1431 0 1433 1434 1434 0 1436 1437 1437 - 0 1439 1440 1440 0 1442 1443 1443 0 1445 - 1446 1446 0 1448 1449 1449 0 1451 1452 1452 - 0 1454 1455 1455 0 1457 1458 1458 0 1460 - 1461 1461 0 1463 1464 1464 0 1466 1467 1467 - 0 1469 1470 1470 0 1472 1473 1473 0 1475 - 1476 1476 0 1478 1479 1479 0 1481 1482 1482 - 0 1484 1485 1485 0 1487 1488 1488 0 1490 - 1491 1491 0 1493 1494 1494 0 1496 1497 1497 - 0 1499 1500 1500 0 1502 1503 1503 0 1505 - 1506 1506 0 1508 1509 1509 0 1511 1512 1512 - 0 1514 1515 1515 0 1517 1518 1518 0 1520 - 1521 1521 0 1523 1524 1524 0 1526 1527 1527 - 0 1529 1530 1530 0 1532 1533 1533 0 1535 - 1536 1536 0 1538 1539 1539 0 1541 1542 1542 - 0 1544 1545 1545 0 1547 1548 1548 0 1550 - 1551 1551 0 1553 1554 1554 0 1556 1557 1557 - 0 1559 1560 1560 0 1562 1563 1563 0 1565 - 1566 1566 0 1568 1569 1569 0 1571 1572 1572 - 0 1574 1575 1575 0 1577 1578 1578 0 1580 - 1581 1581 0 1583 1584 1584 0 1586 1587 1587 - 0 1589 1590 1590 0 1592 1593 1593 0 1595 - 1596 1596 0 1598 1599 1599 0 1601 1602 1602 - 0 1604 1605 1605 0 1607 1608 1608 0 1610 - 1611 1611 0 1613 1614 1614 0 1616 1617 1617 - 0 1619 1620 1620 0 1622 1623 1623 0 1625 - 1626 1626 0 1628 1629 1629 0 1631 1632 1632 - 0 1634 1635 1635 0 1637 1638 1638 0 1640 - 1641 1641 0 1643 1644 1644 0 1646 1647 1647 - 0 1649 1650 1650 0 1652 1653 1653 0 1655 - 1656 1656 0 1658 1659 1659 0 1661 1662 1662 - 0 1664 1665 1665 0 1667 1668 1668 0 1670 - 1671 1671 0 1673 1674 1674 0 1676 1677 1677 - 0 1679 1680 1680 0 1682 1683 1683 0 1685 - 1686 1686 0 1688 1689 1689 0 1691 1692 1692 - 0 1694 1695 1695 0 1697 1698 1698 0 1700 - 1701 1701 0 1703 1704 1704 0 1706 1707 1707 - 0 1709 1710 1710 0 1712 1713 1713 0 1715 - 1716 1716 0 1718 1719 1719 0 1721 1722 1722 - 0 1724 1725 1725 0 1727 1728 1728 0 1730 - 1731 1731 0 1733 1734 1734 0 1736 1737 1737 - 0 1739 1740 1740 0 1742 1743 1743 0 1745 - 1746 1746 0 1748 1749 1749 0 1751 1752 1752 - 0 1754 1755 1755 0 1757 1758 1758 0 1760 - 1761 1761 0 1763 1764 1764 0 1766 1767 1767 - 0 1769 1770 1770 0 1772 1773 1773 0 1775 - 1776 1776 0 1778 1779 1779 0 1781 1782 1782 - 0 1784 1785 1785 0 1787 1788 1788 0 1790 - 1791 1791 0 1793 1794 1794 0 1796 1797 1797 - 0 1799 1800 1800 0 1802 1803 1803 0 1805 - 1806 1806 0 1808 1809 1809 0 1811 1812 1812 - 0 1814 1815 1815 0 1817 1818 1818 0 1820 - 1821 1821 0 1823 1824 1824 0 1826 1827 1827 - 0 1829 1830 1830 0 1832 1833 1833 0 1835 - 1836 1836 0 1838 1839 1839 0 1841 1842 1842 - 0 1844 1845 1845 0 1847 1848 1848 0 1850 - 1851 1851 0 1853 1854 1854 0 1856 1857 1857 - 0 1859 1860 1860 0 1862 1863 1863 0 1865 - 1866 1866 0 1868 1869 1869 0 1871 1872 1872 - 0 1874 1875 1875 0 1877 1878 1878 0 1880 - 1881 1881 0 1883 1884 1884 0 1886 1887 1887 - 0 1889 1890 1890 0 1892 1893 1893 0 1895 - 1896 1896 0 1898 1899 1899 0 1901 1902 1902 - 0 1904 1905 1905 0 1907 1908 1908 0 1910 - 1911 1911 0 1913 1914 1914 0 1916 1917 1917 - 0 1919 1920 1920 0 1922 1923 1923 0 1925 - 1926 1926 0 1928 1929 1929 0 1931 1932 1932 - 0 1934 1935 1935 0 1937 1938 1938 0 1940 - 1941 1941 0 1943 1944 1944 0 1946 1947 1947 - 0 1949 1950 1950 0 1952 1953 1953 0 1955 - 1956 1956 0 1958 1959 1959 0 1961 1962 1962 - 0 1964 1965 1965 0 1967 1968 1968 0 1970 - 1971 1971 0 1973 1974 1974 0 1976 1977 1977 - 0 1979 1980 1980 0 1982 1983 1983 0 1985 - 1986 1986 0 1988 1989 1989 0 1991 1992 1992 - 0 1994 1995 1995 0 1997 1998 1998 0 2000 - 2001 2001 0 2003 2004 2004 0 2006 2007 2007 - 0 2009 2010 2010 0 2012 2013 2013 0 2015 - 2016 2016 0 2018 2019 2019 0 2021 2022 2022 - 0 2024 2025 2025 0 2027 2028 2028 0 2030 - 2031 2031 0 2033 2034 2034 0 2036 2037 2037 - 0 2039 2040 2040 0 2042 2043 2043 0 2045 - 2046 2046 0 2048 2049 2049 0 2051 2052 2052 - 0 2054 2055 2055 0 2057 2058 2058 0 2060 - 2061 2061 0 2063 2064 2064 0 2066 2067 2067 - 0 2069 2070 2070 0 2072 2073 2073 0 2075 - 2076 2076 0 2078 2079 2079 0 2081 2082 2082 - 0 2084 2085 2085 0 2087 2088 2088 0 2090 - 2091 2091 0 2093 2094 2094 0 2096 2097 2097 - 0 2099 2100 2100 0 2102 2103 2103 0 2105 - 2106 2106 0 2108 2109 2109 0 2111 2112 2112 - 0 2114 2115 2115 0 2117 2118 2118 0 2120 - 2121 2121 0 2123 2124 2124 0 2126 2127 2127 - 0 2129 2130 2130 0 2132 2133 2133 0 2135 - 2136 2136 0 2138 2139 2139 0 2141 2142 2142 - 0 2144 2145 2145 0 2147 2148 2148 0 2150 - 2151 2151 0 2153 2154 2154 0 2156 2157 2157 - 0 2159 2160 2160 0 2162 2163 2163 0 2165 - 2166 2166 0 2168 2169 2169 0 2171 2172 2172 - 0 2174 2175 2175 0 2177 2178 2178 0 2180 - 2181 2181 0 2183 2184 2184 0 2186 2187 2187 - 0 2189 2190 2190 0 2192 2193 2193 0 2195 - 2196 2196 0 2198 2199 2199 0 2201 2202 2202 - 0 2204 2205 2205 0 2207 2208 2208 0 2210 - 2211 2211 0 2213 2214 2214 0 2216 2217 2217 - 0 2219 2220 2220 0 2222 2223 2223 0 2225 - 2226 2226 0 2228 2229 2229 0 2231 2232 2232 - 0 2234 2235 2235 0 2237 2238 2238 0 2240 - 2241 2241 0 2243 2244 2244 0 2246 2247 2247 - 0 2249 2250 2250 0 2252 2253 2253 0 2255 - 2256 2256 0 2258 2259 2259 0 2261 2262 2262 - 0 2264 2265 2265 0 2267 2268 2268 0 2270 - 2271 2271 0 2273 2274 2274 0 2276 2277 2277 - 0 2279 2280 2280 0 2282 2283 2283 0 2285 - 2286 2286 0 2288 2289 2289 0 2291 2292 2292 - 0 2294 2295 2295 0 2297 2298 2298 0 -%FLAG HBOND_ACOEF -%FORMAT(5E16.8) - 0.00000000E+00 -%FLAG HBOND_BCOEF -%FORMAT(5E16.8) - 0.00000000E+00 -%FLAG HBCUT -%FORMAT(5E16.8) - 0.00000000E+00 -%FLAG AMBER_ATOM_TYPE -%FORMAT(20a4) -N H CX H1 CT HC HC HC C O N H CX H1 CT HC HC HC C O -N H CX H1 CT HC HC HC C O OW HW HW OW HW HW OW HW HW OW -HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW -OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW -HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW -HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW -OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW -HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW -HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW -OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW -HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW -HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW -OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW -HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW -HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW -OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW -HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW -HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW -OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW -HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW -HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW -OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW -HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW -HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW -OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW -HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW -HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW -OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW -HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW -HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW -OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW -HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW -HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW -OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW -HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW -HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW -OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW -HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW -HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW -OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW -HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW -HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW -OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW -HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW -HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW -OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW -HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW -HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW -OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW -HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW -HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW -OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW -HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW -HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW -OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW -HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW -HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW -OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW -HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW -HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW -OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW -HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW -HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW -OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW -HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW -HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW -OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW -HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW -HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW -OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW -HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW -HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW -OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW -HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW -HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW -OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW -HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW -HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW -OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW -HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW -HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW -OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW -HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW -HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW -OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW -HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW -HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW -OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW -HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW -HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW -OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW -HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW -HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW -OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW -HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW -HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW -OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW -HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW -HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW -OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW -HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW -HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW -OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW -HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW -HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW -OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW -HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW -HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW -OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW -HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW -HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW -OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW -HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW -HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW -OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW OW HW HW -%FLAG TREE_CHAIN_CLASSIFICATION -%FORMAT(20a4) -M E M E 3 E E E M E M E M E 3 E E E M E -M E M E 3 E E E M E BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA -BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA -BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA -BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA -BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA -BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA -BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA -BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA -BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA -BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA -BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA -BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA -BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA -BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA -BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA -BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA -BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA -BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA -BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA -BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA -BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA -BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA -BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA -BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA -BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA -BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA -BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA -BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA -BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA -BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA -BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA -BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA -BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA -BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA -BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA -BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA -BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA -BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA -BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA -BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA -BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA -BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA -BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA -BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA -BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA -BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA -BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA -BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA -BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA -BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA -BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA -BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA -BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA -BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA -BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA -BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA -BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA -BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA -BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA -BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA -BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA -BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA -BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA -BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA -BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA -BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA -BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA -BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA -BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA -BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA -BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA -BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA -BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA -BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA -BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA -BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA -BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA -BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA -BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA -BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA -BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA -BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA -BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA -BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA -BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA -BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA -BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA -BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA -BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA -BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA -BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA -BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA -BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA -BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA -BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA -BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA -BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA -BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA -BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA -BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA -BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA -BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA -BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA -BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA -BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA -BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA -BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA -BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA -BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA -BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA -BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA -BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA -BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA -BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA BLA -%FLAG JOIN_ARRAY -%FORMAT(10I8) - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 -%FLAG IROTAT -%FORMAT(10I8) - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 -%FLAG SOLVENT_POINTERS -%FORMAT(3I8) - 3 757 2 -%FLAG ATOMS_PER_MOLECULE -%FORMAT(10I8) - 30 3 3 3 3 3 3 3 3 3 - 3 3 3 3 3 3 3 3 3 3 - 3 3 3 3 3 3 3 3 3 3 - 3 3 3 3 3 3 3 3 3 3 - 3 3 3 3 3 3 3 3 3 3 - 3 3 3 3 3 3 3 3 3 3 - 3 3 3 3 3 3 3 3 3 3 - 3 3 3 3 3 3 3 3 3 3 - 3 3 3 3 3 3 3 3 3 3 - 3 3 3 3 3 3 3 3 3 3 - 3 3 3 3 3 3 3 3 3 3 - 3 3 3 3 3 3 3 3 3 3 - 3 3 3 3 3 3 3 3 3 3 - 3 3 3 3 3 3 3 3 3 3 - 3 3 3 3 3 3 3 3 3 3 - 3 3 3 3 3 3 3 3 3 3 - 3 3 3 3 3 3 3 3 3 3 - 3 3 3 3 3 3 3 3 3 3 - 3 3 3 3 3 3 3 3 3 3 - 3 3 3 3 3 3 3 3 3 3 - 3 3 3 3 3 3 3 3 3 3 - 3 3 3 3 3 3 3 3 3 3 - 3 3 3 3 3 3 3 3 3 3 - 3 3 3 3 3 3 3 3 3 3 - 3 3 3 3 3 3 3 3 3 3 - 3 3 3 3 3 3 3 3 3 3 - 3 3 3 3 3 3 3 3 3 3 - 3 3 3 3 3 3 3 3 3 3 - 3 3 3 3 3 3 3 3 3 3 - 3 3 3 3 3 3 3 3 3 3 - 3 3 3 3 3 3 3 3 3 3 - 3 3 3 3 3 3 3 3 3 3 - 3 3 3 3 3 3 3 3 3 3 - 3 3 3 3 3 3 3 3 3 3 - 3 3 3 3 3 3 3 3 3 3 - 3 3 3 3 3 3 3 3 3 3 - 3 3 3 3 3 3 3 3 3 3 - 3 3 3 3 3 3 3 3 3 3 - 3 3 3 3 3 3 3 3 3 3 - 3 3 3 3 3 3 3 3 3 3 - 3 3 3 3 3 3 3 3 3 3 - 3 3 3 3 3 3 3 3 3 3 - 3 3 3 3 3 3 3 3 3 3 - 3 3 3 3 3 3 3 3 3 3 - 3 3 3 3 3 3 3 3 3 3 - 3 3 3 3 3 3 3 3 3 3 - 3 3 3 3 3 3 3 3 3 3 - 3 3 3 3 3 3 3 3 3 3 - 3 3 3 3 3 3 3 3 3 3 - 3 3 3 3 3 3 3 3 3 3 - 3 3 3 3 3 3 3 3 3 3 - 3 3 3 3 3 3 3 3 3 3 - 3 3 3 3 3 3 3 3 3 3 - 3 3 3 3 3 3 3 3 3 3 - 3 3 3 3 3 3 3 3 3 3 - 3 3 3 3 3 3 3 3 3 3 - 3 3 3 3 3 3 3 3 3 3 - 3 3 3 3 3 3 3 3 3 3 - 3 3 3 3 3 3 3 3 3 3 - 3 3 3 3 3 3 3 3 3 3 - 3 3 3 3 3 3 3 3 3 3 - 3 3 3 3 3 3 3 3 3 3 - 3 3 3 3 3 3 3 3 3 3 - 3 3 3 3 3 3 3 3 3 3 - 3 3 3 3 3 3 3 3 3 3 - 3 3 3 3 3 3 3 3 3 3 - 3 3 3 3 3 3 3 3 3 3 - 3 3 3 3 3 3 3 3 3 3 - 3 3 3 3 3 3 3 3 3 3 - 3 3 3 3 3 3 3 3 3 3 - 3 3 3 3 3 3 3 3 3 3 - 3 3 3 3 3 3 3 3 3 3 - 3 3 3 3 3 3 3 3 3 3 - 3 3 3 3 3 3 3 3 3 3 - 3 3 3 3 3 3 3 3 3 3 - 3 3 3 3 3 3 3 -%FLAG BOX_DIMENSIONS -%FORMAT(5E16.8) - 9.00000000E+01 3.60047690E+01 3.33989010E+01 3.02224290E+01 -%FLAG RADIUS_SET -%FORMAT(1a80) -modified Bondi radii (mbondi) -%FLAG RADII -%FORMAT(5E16.8) - 1.55000000E+00 1.30000000E+00 1.70000000E+00 1.30000000E+00 1.70000000E+00 - 1.30000000E+00 1.30000000E+00 1.30000000E+00 1.70000000E+00 1.50000000E+00 - 1.55000000E+00 1.30000000E+00 1.70000000E+00 1.30000000E+00 1.70000000E+00 - 1.30000000E+00 1.30000000E+00 1.30000000E+00 1.70000000E+00 1.50000000E+00 - 1.55000000E+00 1.30000000E+00 1.70000000E+00 1.30000000E+00 1.70000000E+00 - 1.30000000E+00 1.30000000E+00 1.30000000E+00 1.70000000E+00 1.50000000E+00 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 - 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 1.50000000E+00 - 8.00000000E-01 8.00000000E-01 1.50000000E+00 8.00000000E-01 8.00000000E-01 - 1.50000000E+00 8.00000000E-01 8.00000000E-01 -%FLAG SCREEN -%FORMAT(5E16.8) - 7.90000000E-01 8.50000000E-01 7.20000000E-01 8.50000000E-01 7.20000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 7.20000000E-01 8.50000000E-01 - 7.90000000E-01 8.50000000E-01 7.20000000E-01 8.50000000E-01 7.20000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 7.20000000E-01 8.50000000E-01 - 7.90000000E-01 8.50000000E-01 7.20000000E-01 8.50000000E-01 7.20000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 7.20000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 8.50000000E-01 - 8.50000000E-01 8.50000000E-01 8.50000000E-01 -%FLAG IPOL -%FORMAT(1I8) - 0 diff --git a/tests/st/sponge/data/WATER_ALA_350_cool_290.rst7 b/tests/st/sponge/data/WATER_ALA_350_cool_290.rst7 deleted file mode 100644 index 3745d796148..00000000000 --- a/tests/st/sponge/data/WATER_ALA_350_cool_290.rst7 +++ /dev/null @@ -1,1152 +0,0 @@ -ALA - 2298 - 13.9408841 13.5690108 15.1325142 14.5245211 12.7447128 15.1325131 - 14.5851621 14.8668968 15.1325157 14.2867771 15.4212308 14.2426958 - 14.1920791 15.6749398 16.3646588 14.4925981 15.1368968 17.2637128 - 14.6901731 16.6441188 16.3383018 13.1121091 15.8221768 16.3738948 - 16.1006551 14.7263088 15.1325114 16.6239381 13.6142768 15.1325074 - 16.8061213 15.8596854 15.1325107 16.3304010 16.7506341 15.1325130 - 18.2551217 15.8596855 15.1325085 18.6189727 15.3459409 16.0223276 - 18.8041154 15.1483127 13.9003643 18.4558053 15.6567252 13.0013110 - 19.8936905 15.1635291 13.9267207 18.4558035 14.1155045 13.8911275 - 18.8030384 17.2796402 15.1325138 18.0396493 18.2428003 15.1325192 - 20.1318923 17.4075940 15.1325139 20.7184027 16.5853380 15.1325105 - 20.7761703 18.7054800 15.1325170 20.4777843 19.2598157 14.2426986 - 20.3830888 19.5135207 16.3646622 20.6836088 18.9754760 17.2637148 - 20.8811827 20.4826997 16.3383064 19.3031188 19.6607577 16.3738997 - 22.2916633 18.5648920 15.1325107 22.8149463 17.4528600 15.1325040 - 30.1665490 25.4466800 22.9128460 30.0859540 24.5321410 22.6420170 - 29.7529910 25.9433870 22.2068170 28.2983530 27.2245820 24.7937730 - 27.5225630 26.7446260 25.0836300 28.9496460 26.5451470 24.6193890 - 22.3149360 27.5365630 25.6713490 21.4834240 27.2814340 26.0710000 - 22.9230340 27.6076400 26.4071410 32.3190960 31.6306010 23.5736860 - 31.8572490 30.8593250 23.9024030 33.1316510 31.6572080 24.0789310 - 20.7241310 22.4967520 19.4555880 19.8358010 22.8383490 19.3535760 - 21.0082500 22.2967840 18.5636710 18.6874320 25.0973170 25.3395580 - 19.2590430 24.3295370 25.3399220 18.6875860 25.3961930 24.4302190 - 27.7392520 29.9003010 24.8007890 27.6700800 28.9592950 24.6397020 - 28.5012380 29.9904420 25.3730460 19.9299190 20.5057530 27.1264510 - 19.7502030 19.6083280 27.4067270 19.5279270 21.0529400 27.8011460 - 28.7697690 22.7073390 25.0276820 29.1462510 21.8345270 24.9150490 - 28.6717200 23.0432090 24.1367260 22.9002700 21.0799020 20.4894260 - 23.0069450 21.9802750 20.7963020 21.9895060 21.0316990 20.1988910 - 32.5521810 20.2023830 21.9605160 33.3731430 19.7120400 22.0030640 - 32.5855890 20.7921490 22.7137000 20.5485860 23.0018030 25.7346850 - 21.2295970 23.2656400 26.3534250 20.2333910 22.1618900 26.0684860 - 24.7410430 24.0485390 24.8219440 25.2488650 24.5466360 25.4624460 - 25.2723670 23.2720590 24.6458800 29.4117620 28.8003260 20.5872760 - 28.7934890 29.2652710 20.0235480 29.9240870 29.4930740 21.0042270 - 24.2749380 28.3279130 27.2656360 23.9418760 28.9548050 27.9077430 - 24.4516060 28.8541010 26.4858030 32.7883290 22.1424210 24.7332700 - 32.0838860 22.4309100 25.3135810 33.1495450 22.9524320 24.3732260 - 22.7061860 20.3993910 27.0953310 23.0478670 21.2435960 26.8007180 - 21.7677670 20.5477650 27.2118650 18.7800820 29.4859790 17.9612390 - 18.4028800 29.9746890 17.2297290 19.0373890 30.1585070 18.5918920 - 26.3889750 17.8876180 23.4197900 26.6964730 17.3323070 22.7033400 - 25.4590450 18.0220640 23.2370890 33.2350230 30.3259690 20.7995530 - 33.1246560 29.3834020 20.9244950 33.2318480 30.6875330 21.6858290 - 31.3390250 28.5073310 27.0387970 31.7559040 28.2933420 26.2041430 - 32.0630070 28.7646100 27.6096590 28.7770510 25.9525010 20.1558020 - 27.8499790 25.8862390 20.3846490 28.9295330 26.8905140 20.0413260 - 28.7639590 22.2242010 17.1525180 27.8243260 22.3855970 17.2377640 - 29.0822850 22.9427690 16.6061170 32.8067800 27.5145310 20.6593270 - 32.1880220 27.1764860 20.0119570 32.7892250 26.8705070 21.3672460 - 32.2494360 23.6929850 28.0759080 31.8365540 24.5183560 28.3299070 - 31.5170310 23.1074890 27.8835440 24.3827150 26.4789750 22.9377750 - 23.9784150 25.7667000 22.4423650 24.4690270 26.1342990 23.8265790 - 24.8508850 30.1657310 25.1016960 24.5537140 29.5602780 24.4224740 - 25.8008670 30.2034620 24.9906320 23.0955770 23.2532960 26.7176790 - 23.6470360 23.3628300 25.9430040 23.6099280 23.6270560 27.4332020 - 30.9832500 26.7532120 17.6666020 31.4225780 25.9154550 17.5203840 - 31.6960920 27.3861520 17.7530160 21.0831780 25.3734730 20.4384400 - 20.9629380 26.0996820 19.8265670 20.7732670 24.6037640 19.9612310 - 18.7217150 28.5449870 26.2453040 18.7997570 28.6798640 25.3008760 - 19.3358830 27.8365760 26.4381160 19.9664780 30.2450060 22.6559220 - 19.5592860 30.3864650 21.8012820 20.8577790 29.9610220 22.4530430 - 23.5495440 18.7169800 23.8208070 23.1063590 19.4979340 23.4892590 - 24.1080430 19.0399690 24.5279010 26.6654260 18.3380500 18.7013900 - 26.0408440 18.3340370 19.4267210 26.5784380 17.4711460 18.3049870 - 25.5871340 19.2617130 25.6797670 25.8843670 20.1679960 25.5989690 - 26.1356900 18.7735910 25.0657240 23.3409660 23.8403370 21.5489320 - 23.1150580 23.6240540 22.4535940 22.5602190 24.2731830 21.2035290 - 30.5707420 23.8466740 20.1479970 29.9716310 24.5702540 19.9643590 - 31.1187030 23.7843520 19.3656420 29.1772290 22.4830280 22.1783250 - 29.5701860 22.8002480 21.3651940 29.5389140 21.6042160 22.2927770 - 18.2136460 19.0731080 24.4013450 17.9249510 19.6433940 25.1138430 - 18.9582220 18.5935530 24.7644680 18.4006690 23.0151200 15.6116170 - 18.3181360 23.9581960 15.7531160 18.0489750 22.8726110 14.7328520 - 29.7627430 25.0736030 26.2734030 29.0308550 25.5822840 26.6224130 - 29.3651770 24.2566010 25.9722810 26.2004820 21.7490230 24.6786520 - 26.2040880 21.3080180 23.8291060 27.0955940 22.0707560 24.7858340 - 22.0507200 21.6316720 23.6380730 21.7107250 22.1091950 24.3947780 - 21.2884520 21.1781840 23.2781830 26.2624970 22.7502310 17.6693280 - 26.3837790 22.7088790 18.6179100 25.8773480 23.6126460 17.5140010 - 30.2253340 20.5269710 25.6856970 30.2426080 20.9778360 26.5298820 - 29.9768920 19.6275350 25.8990410 25.2746470 25.0603450 17.0676580 - 24.7063370 24.6121700 16.4412490 25.9835470 25.4196230 16.5341730 - 26.5012790 25.8092010 26.4251930 26.8682920 25.3350580 27.1713270 - 26.3117850 26.6833580 26.7660270 19.3653280 20.5535700 22.5545400 - 19.0800200 20.1891370 23.3924020 19.3032140 19.8216160 21.9408540 - 22.8193950 29.0561640 19.3402990 22.0456210 28.5159260 19.1801710 - 23.5488580 28.5368250 19.0020750 33.5781930 28.0527000 25.1329380 - 34.2981050 28.5482290 24.7425490 33.3763750 27.3744890 24.4883250 - 29.0391870 19.4464960 19.2758770 28.2256480 18.9478130 19.2004080 - 28.8453820 20.2863210 18.8595120 31.7540260 23.8184610 17.5720930 - 32.6419880 24.1301840 17.3972270 31.7245350 22.9456860 17.1801510 - 22.8631350 28.7242020 22.9781800 23.3810020 27.9210280 22.9238690 - 22.4115370 28.6621160 23.8198630 27.7500850 29.9908300 18.2509100 - 28.0916550 30.7350530 17.7552420 26.9213560 30.3039420 18.6133930 - 30.6328260 29.3124140 23.6154610 30.9852080 28.7152270 22.9555980 - 29.7490590 28.9868370 23.7862860 26.7191540 30.0721830 21.0225020 - 27.4062670 30.6191040 21.4032610 25.9323960 30.6162560 21.0574630 - 18.8583940 25.6894650 16.5142100 19.7278790 25.9719450 16.7978020 - 18.3674260 26.5030520 16.3990910 26.0218750 27.0767860 20.6350380 - 26.4781530 27.8873110 20.8610570 25.6706960 26.7591000 21.4668870 - 21.9230530 17.4205510 21.1072900 21.7945160 17.4123410 22.0557820 - 21.0992550 17.7588600 20.7563910 29.5983440 30.5906550 26.5820130 - 30.2405670 29.8899710 26.6952300 30.1232790 31.3704990 26.4017020 - 33.4439160 30.8755170 16.3059270 33.2176740 30.1585610 16.8983910 - 33.0535360 30.6267610 15.4681020 33.0255410 28.7927110 17.9673430 - 32.7377110 29.2539800 18.7551310 33.8207580 28.3324880 18.2357650 - 20.8430450 27.5068120 18.6424160 20.2358400 28.2467020 18.6519460 - 21.3179800 27.5967480 17.8162360 30.4163240 21.6718940 28.3181050 - 30.7844060 20.8514150 28.6460620 29.5561440 21.7320830 28.7336700 - 33.7825270 20.6275880 19.1268610 33.1538430 20.7321550 18.4126840 - 33.2877190 20.8472660 19.9162490 25.7509300 22.9061830 20.5426490 - 25.0425450 23.3748170 20.9840100 26.0353780 22.2483840 21.1771680 - 29.7824730 19.6312490 21.9822280 29.6592510 19.6498620 21.0331790 - 30.7309290 19.6791430 22.1020680 23.9544320 21.2146910 17.9177200 - 24.8034850 21.6517060 17.8517390 23.5875590 21.5267170 18.7449260 - 32.8754100 25.7859670 22.8308770 31.9344240 25.8009900 23.0056460 - 33.1898170 24.9995630 23.2769070 22.8535740 27.4679270 16.3912730 - 23.6199010 27.3560630 16.9538160 22.9310430 26.7717240 15.7389500 - 21.5482500 24.9004340 17.0745620 21.6286090 23.9531850 17.1863020 - 22.4402250 25.2311220 17.1806280 19.2342690 31.1173300 20.0521670 - 18.3082470 31.3594440 20.0427080 19.6999120 31.9447560 19.9306420 - 25.9776920 29.4433450 15.8648080 25.8727560 28.8285800 16.5909460 - 25.8101140 28.9199340 15.0811090 29.2179650 28.5053480 16.4690070 - 28.6689750 28.9522780 17.1132810 29.8398910 28.0002730 16.9927740 - 24.8854770 27.4644400 18.3655370 25.3071190 27.2984270 19.2086760 - 25.0105610 26.6544630 17.8710510 27.3982410 26.4882010 15.6132470 - 27.9188070 25.7731030 15.2473620 28.0450810 27.1267880 15.9132980 - 23.7813670 23.5377630 15.3113530 23.5802730 22.6744990 15.6726870 - 23.6105720 23.4510990 14.3735130 31.2601980 18.3485860 17.8400410 - 31.5404270 17.5053760 18.1959830 30.4867710 18.5808970 18.3539160 - 25.7981400 19.5812870 16.2847610 25.4503400 18.9036860 15.7050060 - 25.9994170 19.1202380 17.0991000 21.6026600 29.9420670 16.1321210 - 22.0401470 30.4850340 16.7878790 22.0874330 29.1167620 16.1417550 - 23.4134320 31.0618710 21.2443440 23.1129860 30.9383680 22.1447350 - 23.3071810 30.2027300 20.8359170 19.1387040 26.1100520 22.6811980 - 18.4835880 25.8412160 22.0371680 19.9764990 26.0005000 22.2313960 - 32.0877480 20.9041810 16.9030470 32.5062360 20.7172790 16.0627130 - 31.7813540 20.0520860 17.2133400 20.1762970 26.4953090 27.2215320 - 20.5436010 26.4080490 28.1011340 19.6073790 25.7319640 27.1222180 - 18.3525120 23.3199450 18.6237310 17.4489200 23.5829650 18.4488970 - 18.8365740 23.5910620 17.8437270 20.2318680 17.6810510 25.4756960 - 20.8794870 17.1307890 25.0352120 20.6520520 17.9353660 26.2972770 - 30.8758100 17.2480260 27.7063280 30.0017940 17.5815370 27.5036210 - 30.7668850 16.7753440 28.5315150 21.0615310 31.3206410 28.4541290 - 20.3349370 30.7011700 28.3867240 21.1224460 31.7147020 27.5839370 - 28.6232920 18.1650100 26.6456190 27.8404710 18.6395600 26.9252950 - 28.3295850 17.2604930 26.5369370 33.1753990 18.8673200 24.6905280 - 33.6162080 18.8118300 23.8426880 32.9061580 19.7831210 24.7615460 - 26.5087140 20.5836850 22.1572220 25.6961650 20.2093740 21.8168100 - 27.0128780 19.8281930 22.4593410 31.0112120 17.2479300 23.3955290 - 31.6141490 17.8199510 23.8703810 30.2705110 17.8118740 23.1729190 - 34.1546720 17.6500860 18.6695570 34.3497350 18.5484310 18.9363000 - 33.3676590 17.4187940 19.1628520 24.5336320 18.7336770 20.7617270 - 23.9039060 18.0324080 20.9287400 23.9941460 19.4940900 20.5450430 - 22.7115770 31.3961140 17.9883770 22.0824650 31.8646240 18.5369600 - 22.8263600 30.5510360 18.4229960 30.1665490 25.4466800 4.1384970 - 30.0859540 24.5321410 3.8676680 29.7529910 25.9433870 3.4324680 - 28.2983530 27.2245820 6.0194240 27.5225630 26.7446260 6.3092810 - 28.9496460 26.5451470 5.8450400 22.3149360 27.5365630 6.8970000 - 21.4834240 27.2814340 7.2966510 22.9230340 27.6076400 7.6327920 - 32.3190960 31.6306010 4.7993370 31.8572490 30.8593250 5.1280540 - 33.1316510 31.6572080 5.3045820 18.6874320 25.0973170 6.5652090 - 19.2590430 24.3295370 6.5655730 18.6875860 25.3961930 5.6558700 - 27.7392520 29.9003010 6.0264400 27.6700800 28.9592950 5.8653530 - 28.5012380 29.9904420 6.5986970 19.9299190 20.5057530 8.3521020 - 19.7502030 19.6083280 8.6323780 19.5279270 21.0529400 9.0267970 - 28.7697690 22.7073390 6.2533330 29.1462510 21.8345270 6.1407000 - 28.6717200 23.0432090 5.3623770 32.5521810 20.2023830 3.1861670 - 33.3731430 19.7120400 3.2287150 32.5855890 20.7921490 3.9393510 - 20.5485860 23.0018030 6.9603360 21.2295970 23.2656400 7.5790760 - 20.2333910 22.1618900 7.2941370 24.7410430 24.0485390 6.0475950 - 25.2488650 24.5466360 6.6880970 25.2723670 23.2720590 5.8715310 - 23.0246530 18.6883950 10.3139370 23.0264970 19.2056340 9.5085270 - 22.1950840 18.2114130 10.2909300 24.2749380 28.3279130 8.4912870 - 23.9418760 28.9548050 9.1333940 24.4516060 28.8541010 7.7114540 - 32.7883290 22.1424210 5.9589210 32.0838860 22.4309100 6.5392320 - 33.1495450 22.9524320 5.5988770 22.7061860 20.3993910 8.3209820 - 23.0478670 21.2435960 8.0263690 21.7677670 20.5477650 8.4375160 - 23.4868480 30.2959930 10.3062050 24.1042480 30.9711010 10.0246500 - 22.6261600 30.7047910 10.2149550 26.3889750 17.8876180 4.6454410 - 26.6964730 17.3323070 3.9289910 25.4590450 18.0220640 4.4627400 - 33.2350230 30.3259690 2.0252040 33.1246560 29.3834020 2.1501460 - 33.2318480 30.6875330 2.9114800 31.3390250 28.5073310 8.2644480 - 31.7559040 28.2933420 7.4297940 32.0630070 28.7646100 8.8353100 - 32.2494360 23.6929850 9.3015590 31.8365540 24.5183560 9.5555580 - 31.5170310 23.1074890 9.1091950 24.3827150 26.4789750 4.1634260 - 23.9784150 25.7667000 3.6680160 24.4690270 26.1342990 5.0522300 - 24.8508850 30.1657310 6.3273470 24.5537140 29.5602780 5.6481250 - 25.8008670 30.2034620 6.2162830 23.0955770 23.2532960 7.9433300 - 23.6470360 23.3628300 7.1686550 23.6099280 23.6270560 8.6588530 - 23.9427280 29.9039000 12.9781270 23.0455810 30.1278970 13.2254670 - 23.9680150 30.0273940 12.0292670 22.7698500 26.7752320 11.5538640 - 22.6306270 27.6475120 11.9226040 21.8879860 26.4325130 11.4086580 - 18.7217150 28.5449870 7.4709550 18.7997570 28.6798640 6.5265270 - 19.3358830 27.8365760 7.6637670 19.9664780 30.2450060 3.8815730 - 19.5592860 30.3864650 3.0269330 20.8577790 29.9610220 3.6786940 - 18.7776670 29.5864040 10.0104010 18.9407710 29.1169400 9.1923380 - 19.2429120 29.0787600 10.6752850 23.5495440 18.7169800 5.0464580 - 23.1063590 19.4979340 4.7149100 24.1080430 19.0399690 5.7535520 - 27.8365250 21.5838320 10.6499900 27.3744620 21.7921340 11.4619840 - 27.4343360 20.7697670 10.3470570 32.8369760 22.8047270 11.7796560 - 31.9863350 22.5053020 12.1005630 32.6771550 23.0489280 10.8680380 - 29.9319470 24.3049020 13.8795350 30.2572880 23.4712770 13.5397580 - 30.7199800 24.8277540 14.0273680 25.5871340 19.2617130 6.9054180 - 25.8843670 20.1679960 6.8246200 26.1356900 18.7735910 6.2913750 - 23.3409660 23.8403370 2.7745830 23.1150580 23.6240540 3.6792450 - 22.5602190 24.2731830 2.4291800 25.7808870 24.7388950 13.4406100 - 25.6852470 23.7983680 13.2906500 26.6240850 24.9604020 13.0454170 - 29.1772290 22.4830280 3.4039760 29.5701860 22.8002480 2.5908450 - 29.5389140 21.6042160 3.5184280 20.9119410 21.1502620 11.5939880 - 20.5182800 21.9977830 11.8012660 20.2237810 20.5128990 11.7848520 - 27.4864460 21.2337590 14.6636620 26.7962780 21.0409790 15.2982710 - 28.2296760 21.5172230 15.1960920 18.2136460 19.0731080 5.6269960 - 17.9249510 19.6433940 6.3394940 18.9582220 18.5935530 5.9901190 - 29.7627430 25.0736030 7.4990540 29.0308550 25.5822840 7.8480640 - 29.3651770 24.2566010 7.1979320 26.2004820 21.7490230 5.9043030 - 26.2040880 21.3080180 5.0547570 27.0955940 22.0707560 6.0114850 - 22.0507200 21.6316720 4.8637240 21.7107250 22.1091950 5.6204290 - 21.2884520 21.1781840 4.5038340 30.2253340 20.5269710 6.9113480 - 30.2426080 20.9778360 7.7555330 29.9768920 19.6275350 7.1246920 - 33.6524090 17.9401890 12.7499860 33.9769860 17.2091550 13.2757840 - 33.6880600 17.6215530 11.8480850 20.3515650 28.9615470 12.1699820 - 19.9670440 28.3209140 12.7682780 20.6627840 29.6651590 12.7394550 - 26.5012790 25.8092010 7.6508440 26.8682920 25.3350580 8.3969780 - 26.3117850 26.6833580 7.9916780 19.3653280 20.5535700 3.7801910 - 19.0800200 20.1891370 4.6180530 19.3032140 19.8216160 3.1665050 - 24.2912160 24.8500650 9.9692650 23.9869230 25.7292630 10.1943010 - 25.2458620 24.9100440 10.0050090 33.5781930 28.0527000 6.3585890 - 34.2981050 28.5482290 5.9682000 33.3763750 27.3744890 5.7139760 - 28.9600730 28.1236060 12.2454890 28.8938040 27.2031730 11.9912480 - 28.3305870 28.2211500 12.9599530 22.8631350 28.7242020 4.2038310 - 23.3810020 27.9210280 4.1495200 22.4115370 28.6621160 5.0455140 - 20.3475890 25.6903390 11.1164010 19.9890830 26.3557240 11.7037350 - 20.0530450 24.8586940 11.4876630 23.5489010 26.1487630 14.0003560 - 23.2489150 26.2944810 13.1031380 24.3200250 25.5892920 13.9076950 - 28.8917460 30.9456630 11.9958690 28.9062000 30.0137590 11.7777600 - 28.1842560 31.0316390 12.6348460 30.6328260 29.3124140 4.8411120 - 30.9852080 28.7152270 4.1812490 29.7490590 28.9868370 5.0119370 - 26.7191540 30.0721830 2.2481530 27.4062670 30.6191040 2.6289120 - 25.9323960 30.6162560 2.2831140 26.0218750 27.0767860 1.8606890 - 26.4781530 27.8873110 2.0867080 25.6706960 26.7591000 2.6925380 - 21.9230530 17.4205510 2.3329410 21.7945160 17.4123410 3.2814330 - 21.0992550 17.7588600 1.9820420 29.5983440 30.5906550 7.8076640 - 30.2405670 29.8899710 7.9208810 30.1232790 31.3704990 7.6273530 - 30.4163240 21.6718940 9.5437560 30.7844060 20.8514150 9.8717130 - 29.5561440 21.7320830 9.9593210 25.7509300 22.9061830 1.7683000 - 25.0425450 23.3748170 2.2096610 26.0353780 22.2483840 2.4028190 - 27.3765230 24.3258530 9.7675530 27.8750480 24.8004350 10.4327380 - 27.5188180 23.4022050 9.9745560 29.7824730 19.6312490 3.2078790 - 29.6592510 19.6498620 2.2588300 30.7309290 19.6791430 3.3277190 - 32.8754100 25.7859670 4.0565280 31.9344240 25.8009900 4.2312970 - 33.1898170 24.9995630 4.5025580 30.1920120 30.3425670 14.5397660 - 29.5858280 30.3657270 13.7993410 29.8874230 29.6108710 15.0764910 - 19.0753300 26.7219260 13.5358300 19.5792320 26.1426540 14.1074530 - 18.3360720 27.0059490 14.0734670 26.1291240 28.1290250 13.0693820 - 25.3636910 28.6975250 12.9848490 26.3107200 27.8366460 12.1762050 - 33.2714590 29.8595580 13.6268840 34.0124060 30.1971340 13.1236300 - 32.5087270 30.0287640 13.0738610 21.1076380 24.8648850 14.4308370 - 21.1481720 24.5662720 15.3393600 21.9621880 25.2656460 14.2715890 - 23.0482190 23.4440950 12.3982860 22.5178650 22.9531900 11.7706200 - 23.7170260 23.8725150 11.8640790 31.9460250 27.9110410 12.1624050 - 32.3601270 28.0655490 13.0114470 31.0111250 28.0365630 12.3249830 - 32.4182420 25.2506570 14.1663200 33.0003190 24.4958890 14.0783680 - 32.5944190 25.7814010 13.3894680 26.3015930 18.9676560 9.5107540 - 25.8533230 18.9900820 8.6653090 25.6681130 18.5695580 10.1077840 - 26.7075480 27.8630890 10.1319980 27.1263590 28.7063620 9.9596220 - 25.7884110 28.0002930 9.9026760 29.0439380 25.5947070 11.5157820 - 29.1635590 24.9988210 12.2552660 29.9033380 25.6316760 11.0959120 - 31.3827280 26.1484260 10.0920330 30.8562500 26.6726880 9.4885470 - 31.7258620 26.7811650 10.7230060 25.8108150 22.1368420 12.7733640 - 26.4007840 21.8284270 13.4611460 24.9928390 21.6648170 12.9293540 - 30.6301580 21.7534230 13.0066660 29.9417100 21.0915770 12.9416650 - 31.3507600 21.3072500 13.4515120 23.4134320 31.0618710 2.4699950 - 23.1129860 30.9383680 3.3703860 23.3071810 30.2027300 2.0615680 - 19.1387040 26.1100520 3.9068490 18.4835880 25.8412160 3.2628190 - 19.9764990 26.0005000 3.4570470 33.6276430 29.8249270 9.9333080 - 32.8136270 30.2546430 10.1958870 34.0350630 30.4367680 9.3202130 - 20.1762970 26.4953090 8.4471830 20.5436010 26.4080490 9.3267850 - 19.6073790 25.7319640 8.3478690 19.7278970 23.4062180 12.3530250 - 18.8335020 23.3728940 12.6923960 20.2676520 23.6048970 13.1181500 - 33.4154900 20.1893900 14.2382000 34.2925080 20.1977300 14.6216000 - 33.4275740 19.4524740 13.6274350 20.2318680 17.6810510 6.7013470 - 20.8794870 17.1307890 6.2608630 20.6520520 17.9353660 7.5229280 - 19.9850680 17.8366180 9.4438790 19.6365500 18.2930000 10.2096970 - 19.6538380 16.9423310 9.5261220 21.5915050 31.0085780 13.6091980 - 21.5484680 30.6378690 14.4906440 21.7226060 31.9463350 13.7493800 - 27.4975500 19.0278060 12.9658210 27.4364390 19.6199890 13.7153620 - 28.4375880 18.9204020 12.8208470 30.1350060 18.6848120 12.6050500 - 30.7490470 19.0027410 11.9431580 30.6799620 18.1996710 13.2246360 - 30.8758100 17.2480260 8.9319790 30.0017940 17.5815370 8.7292720 - 30.7668850 16.7753440 9.7571660 31.7830080 19.4551130 10.3161510 - 31.4543440 18.6402890 9.9363240 32.7312380 19.4120940 10.1927200 - 21.0615310 31.3206410 9.6797800 20.3349370 30.7011700 9.6123750 - 21.1224460 31.7147020 8.8095880 28.6232920 18.1650100 7.8712700 - 27.8404710 18.6395600 8.1509460 28.3295850 17.2604930 7.7625880 - 33.1753990 18.8673200 5.9161790 33.6162080 18.8118300 5.0683390 - 32.9061580 19.7831210 5.9871970 26.5087140 20.5836850 3.3828730 - 25.6961650 20.2093740 3.0424610 27.0128780 19.8281930 3.6849920 - 31.0112120 17.2479300 4.6211800 31.6141490 17.8199510 5.0960320 - 30.2705110 17.8118740 4.3985700 24.5336320 18.7336770 1.9873780 - 23.9039060 18.0324080 2.1543910 23.9941460 19.4940900 1.7706940 - 31.2607340 17.4274330 15.0006740 31.1472790 17.7468940 15.8958260 - 32.0777080 17.8286840 14.7044020 30.1665490 6.6723310 22.9128460 - 30.0859540 5.7577920 22.6420170 29.7529910 7.1690380 22.2068170 - 28.2983530 8.4502330 24.7937730 27.5225630 7.9702770 25.0836300 - 28.9496460 7.7707980 24.6193890 22.3149360 8.7622140 25.6713490 - 21.4834240 8.5070850 26.0710000 22.9230340 8.8332910 26.4071410 - 32.3190960 12.8562520 23.5736860 31.8572490 12.0849760 23.9024030 - 33.1316510 12.8828590 24.0789310 22.2950620 12.7962230 25.8671510 - 22.5978660 11.9059970 25.6881730 23.0394210 13.3532450 25.6393760 - 20.7241310 3.7224030 19.4555880 19.8358010 4.0640000 19.3535760 - 21.0082500 3.5224350 18.5636710 18.6874320 6.3229680 25.3395580 - 19.2590430 5.5551880 25.3399220 18.6875860 6.6218440 24.4302190 - 28.5454270 15.1420530 23.1940850 27.9265710 15.5978030 22.6235290 - 29.4006050 15.2971990 22.7930550 27.7392520 11.1259520 24.8007890 - 27.6700800 10.1849460 24.6397020 28.5012380 11.2160930 25.3730460 - 28.7697690 3.9329900 25.0276820 29.1462510 3.0601780 24.9150490 - 28.6717200 4.2688600 24.1367260 22.9002700 2.3055530 20.4894260 - 23.0069450 3.2059260 20.7963020 21.9895060 2.2573500 20.1988910 - 20.5485860 4.2274540 25.7346850 21.2295970 4.4912910 26.3534250 - 20.2333910 3.3875410 26.0684860 24.7410430 5.2741900 24.8219440 - 25.2488650 5.7722870 25.4624460 25.2723670 4.4977100 24.6458800 - 29.4117620 10.0259770 20.5872760 28.7934890 10.4909220 20.0235480 - 29.9240870 10.7187250 21.0042270 24.2749380 9.5535640 27.2656360 - 23.9418760 10.1804560 27.9077430 24.4516060 10.0797520 26.4858030 - 32.7883290 3.3680720 24.7332700 32.0838860 3.6565610 25.3135810 - 33.1495450 4.1780830 24.3732260 18.7800820 10.7116300 17.9612390 - 18.4028800 11.2003400 17.2297290 19.0373890 11.3841580 18.5918920 - 31.9404810 14.8299980 25.7105360 31.9531970 14.4262270 24.8427620 - 31.3880030 15.6047270 25.6066790 33.2350230 11.5516200 20.7995530 - 33.1246560 10.6090530 20.9244950 33.2318480 11.9131840 21.6858290 - 31.3390250 9.7329820 27.0387970 31.7559040 9.5189930 26.2041430 - 32.0630070 9.9902610 27.6096590 28.7770510 7.1781520 20.1558020 - 27.8499790 7.1118900 20.3846490 28.9295330 8.1161650 20.0413260 - 28.7639590 3.4498520 17.1525180 27.8243260 3.6112480 17.2377640 - 29.0822850 4.1684200 16.6061170 32.8067800 8.7401820 20.6593270 - 32.1880220 8.4021370 20.0119570 32.7892250 8.0961580 21.3672460 - 32.2494360 4.9186360 28.0759080 31.8365540 5.7440070 28.3299070 - 31.5170310 4.3331400 27.8835440 24.3827150 7.7046260 22.9377750 - 23.9784150 6.9923510 22.4423650 24.4690270 7.3599500 23.8265790 - 24.8508850 11.3913820 25.1016960 24.5537140 10.7859290 24.4224740 - 25.8008670 11.4291130 24.9906320 23.0955770 4.4789470 26.7176790 - 23.6470360 4.5884810 25.9430040 23.6099280 4.8527070 27.4332020 - 30.9832500 7.9788630 17.6666020 31.4225780 7.1411060 17.5203840 - 31.6960920 8.6118030 17.7530160 29.0927880 12.9184700 21.6347260 - 29.5323790 13.6609030 21.2202650 29.0314770 13.1611270 22.5586220 - 21.0831780 6.5991240 20.4384400 20.9629380 7.3253330 19.8265670 - 20.7732670 5.8294150 19.9612310 18.7217150 9.7706380 26.2453040 - 18.7997570 9.9055150 25.3008760 19.3358830 9.0622270 26.4381160 - 19.9664780 11.4706570 22.6559220 19.5592860 11.6121160 21.8012820 - 20.8577790 11.1866730 22.4530430 31.7598060 15.9053820 18.9910270 - 31.5999200 15.6156760 19.8892100 31.5944770 15.1293950 18.4555500 - 24.2386440 15.6852350 20.7681890 24.0628920 14.7687890 20.5549690 - 23.3842180 16.0439640 21.0079690 31.4624190 13.4141380 17.5508270 - 30.7689350 13.4118670 16.8910550 32.2047050 12.9900700 17.1202440 - 31.0274120 15.0404090 21.6256760 31.4831070 14.2593470 21.9395380 - 31.3666640 15.7510910 22.1697940 23.3409660 5.0659880 21.5489320 - 23.1150580 4.8497050 22.4535940 22.5602190 5.4988340 21.2035290 - 30.5707420 5.0723250 20.1479970 29.9716310 5.7959050 19.9643590 - 31.1187030 5.0100030 19.3656420 29.1772290 3.7086790 22.1783250 - 29.5701860 4.0258990 21.3651940 29.5389140 2.8298670 22.2927770 - 18.4006690 4.2407710 15.6116170 18.3181360 5.1838470 15.7531160 - 18.0489750 4.0982620 14.7328520 29.7627430 6.2992540 26.2734030 - 29.0308550 6.8079350 26.6224130 29.3651770 5.4822520 25.9722810 - 26.2004820 2.9746740 24.6786520 26.2040880 2.5336690 23.8291060 - 27.0955940 3.2964070 24.7858340 22.0507200 2.8573230 23.6380730 - 21.7107250 3.3348460 24.3947780 21.2884520 2.4038350 23.2781830 - 26.2624970 3.9758820 17.6693280 26.3837790 3.9345300 18.6179100 - 25.8773480 4.8382970 17.5140010 25.2746470 6.2859960 17.0676580 - 24.7063370 5.8378210 16.4412490 25.9835470 6.6452740 16.5341730 - 26.5012790 7.0348520 26.4251930 26.8682920 6.5607090 27.1713270 - 26.3117850 7.9090090 26.7660270 22.8193950 10.2818150 19.3402990 - 22.0456210 9.7415770 19.1801710 23.5488580 9.7624760 19.0020750 - 33.5781930 9.2783510 25.1329380 34.2981050 9.7738800 24.7425490 - 33.3763750 8.6001400 24.4883250 31.7540260 5.0441120 17.5720930 - 32.6419880 5.3558350 17.3972270 31.7245350 4.1713370 17.1801510 - 22.8631350 9.9498530 22.9781800 23.3810020 9.1466790 22.9238690 - 22.4115370 9.8877670 23.8198630 27.7500850 11.2164810 18.2509100 - 28.0916550 11.9607040 17.7552420 26.9213560 11.5295930 18.6133930 - 30.6328260 10.5380650 23.6154610 30.9852080 9.9408780 22.9555980 - 29.7490590 10.2124880 23.7862860 26.7191540 11.2978340 21.0225020 - 27.4062670 11.8447550 21.4032610 25.9323960 11.8419070 21.0574630 - 18.8583940 6.9151160 16.5142100 19.7278790 7.1975960 16.7978020 - 18.3674260 7.7287030 16.3990910 26.0218750 8.3024370 20.6350380 - 26.4781530 9.1129620 20.8610570 25.6706960 7.9847510 21.4668870 - 28.3251360 13.8130710 27.8642680 28.9693580 14.0190090 28.5416100 - 28.6880800 13.0519110 27.4113530 29.5983440 11.8163060 26.5820130 - 30.2405670 11.1156220 26.6952300 30.1232790 12.5961500 26.4017020 - 33.4439160 12.1011680 16.3059270 33.2176740 11.3842120 16.8983910 - 33.0535360 11.8524120 15.4681020 33.0255410 10.0183620 17.9673430 - 32.7377110 10.4796310 18.7551310 33.8207580 9.5581390 18.2357650 - 25.2423810 13.5109740 17.8977400 25.2333910 14.2439430 17.2821910 - 24.3222170 13.2641320 17.9904360 20.8430450 8.7324630 18.6424160 - 20.2358400 9.4723530 18.6519460 21.3179800 8.8223990 17.8162360 - 30.4163240 2.8975450 28.3181050 30.7844060 2.0770660 28.6460620 - 29.5561440 2.9577340 28.7336700 33.7825270 1.8532390 19.1268610 - 33.1538430 1.9578060 18.4126840 33.2877190 2.0729170 19.9162490 - 28.2283410 13.5737980 16.5602590 28.3295390 13.3701970 15.6304570 - 27.3285150 13.3196260 16.7650370 25.7509300 4.1318340 20.5426490 - 25.0425450 4.6004680 20.9840100 26.0353780 3.4740350 21.1771680 - 26.8701520 16.0924300 21.2550260 27.1243020 16.0545690 20.3329630 - 25.9214000 15.9657630 21.2478510 23.9544320 2.4403420 17.9177200 - 24.8034850 2.8773570 17.8517390 23.5875590 2.7523680 18.7449260 - 32.8754100 7.0116180 22.8308770 31.9344240 7.0266410 23.0056460 - 33.1898170 6.2252140 23.2769070 22.8535740 8.6935780 16.3912730 - 23.6199010 8.5817140 16.9538160 22.9310430 7.9973750 15.7389500 - 21.5482500 6.1260850 17.0745620 21.6286090 5.1788360 17.1863020 - 22.4402250 6.4567730 17.1806280 22.3497210 16.2756240 24.1780160 - 23.0859690 15.7271700 24.4488730 22.7338130 17.1412250 24.0386150 - 19.2342690 12.3429810 20.0521670 18.3082470 12.5850950 20.0427080 - 19.6999120 13.1704070 19.9306420 25.4599520 15.4486550 16.0005730 - 26.2528130 15.7375380 15.5487520 24.9403870 15.0178720 15.3218210 - 25.9776920 10.6689960 15.8648080 25.8727560 10.0542310 16.5909460 - 25.8101140 10.1455850 15.0811090 29.2179650 9.7309990 16.4690070 - 28.6689750 10.1779290 17.1132810 29.8398910 9.2259240 16.9927740 - 24.8854770 8.6900910 18.3655370 25.3071190 8.5240780 19.2086760 - 25.0105610 7.8801140 17.8710510 27.3982410 7.7138520 15.6132470 - 27.9188070 6.9987540 15.2473620 28.0450810 8.3524390 15.9132980 - 23.7813670 4.7634140 15.3113530 23.5802730 3.9001500 15.6726870 - 23.6105720 4.6767500 14.3735130 27.9630440 15.7950130 17.9034210 - 28.8581230 16.1335100 17.9252970 27.9948070 15.0729540 17.2758490 - 21.6026600 11.1677180 16.1321210 22.0401470 11.7106850 16.7878790 - 22.0874330 10.3424130 16.1417550 18.8172400 13.9566210 23.5841470 - 19.1475410 13.0887270 23.3520110 19.2463400 14.1624730 24.4146430 - 23.4134320 12.2875220 21.2443440 23.1129860 12.1640190 22.1447350 - 23.3071810 11.4283810 20.8359170 19.3062610 14.8236800 26.3223100 - 18.8693600 15.0037600 27.1547250 19.8740290 15.5811420 26.1804820 - 19.1387040 7.3357030 22.6811980 18.4835880 7.0668670 22.0371680 - 19.9764990 7.2261510 22.2313960 20.1762970 7.7209600 27.2215320 - 20.5436010 7.6337000 28.1011340 19.6073790 6.9576150 27.1222180 - 18.3525120 4.5455960 18.6237310 17.4489200 4.8086160 18.4488970 - 18.8365740 4.8167130 17.8437270 20.8952330 3.6021500 16.5331690 - 21.6044030 3.1804720 16.0478970 20.1792270 3.6696730 15.9015040 - 33.6852480 15.8024380 22.2344840 34.2811550 15.4808950 21.5579260 - 33.9430630 15.3296200 23.0258110 25.2921080 13.7901880 27.9845190 - 25.0512440 14.0576440 27.0975710 26.2323720 13.9607610 28.0396050 - 34.2364690 16.4519680 26.0642570 33.7767660 17.2035490 25.6900590 - 33.6077150 15.7326150 26.0057370 21.0615310 12.5462920 28.4541290 - 20.3349370 11.9268210 28.3867240 21.1224460 12.9403530 27.5839370 - 24.6660410 14.7607920 25.2390700 25.1825430 14.1495280 24.7138960 - 25.3157790 15.2636700 25.7301710 27.4055910 15.6516410 26.0659680 - 27.6933720 15.3970710 25.1892690 27.9179130 15.0993770 26.6565250 - 19.3445310 15.8637310 21.6638050 19.1767350 15.2307990 22.3619940 - 18.4779820 16.0696710 21.3132240 20.9508010 14.5570540 19.1088190 - 20.4255630 14.9378570 18.4050150 20.8752590 15.1821310 19.8297900 - 22.7115770 12.6217650 17.9883770 22.0824650 13.0902750 18.5369600 - 22.8263600 11.7766870 18.4229960 30.1665490 6.6723310 4.1384970 - 30.0859540 5.7577920 3.8676680 29.7529910 7.1690380 3.4324680 - 28.2983530 8.4502330 6.0194240 27.5225630 7.9702770 6.3092810 - 28.9496460 7.7707980 5.8450400 22.3149360 8.7622140 6.8970000 - 21.4834240 8.5070850 7.2966510 22.9230340 8.8332910 7.6327920 - 32.3190960 12.8562520 4.7993370 31.8572490 12.0849760 5.1280540 - 33.1316510 12.8828590 5.3045820 22.2950620 12.7962230 7.0928020 - 22.5978660 11.9059970 6.9138240 23.0394210 13.3532450 6.8650270 - 18.6874320 6.3229680 6.5652090 19.2590430 5.5551880 6.5655730 - 18.6875860 6.6218440 5.6558700 28.5454270 15.1420530 4.4197360 - 27.9265710 15.5978030 3.8491800 29.4006050 15.2971990 4.0187060 - 27.7392520 11.1259520 6.0264400 27.6700800 10.1849460 5.8653530 - 28.5012380 11.2160930 6.5986970 28.7697690 3.9329900 6.2533330 - 29.1462510 3.0601780 6.1407000 28.6717200 4.2688600 5.3623770 - 20.5485860 4.2274540 6.9603360 21.2295970 4.4912910 7.5790760 - 20.2333910 3.3875410 7.2941370 24.7410430 5.2741900 6.0475950 - 25.2488650 5.7722870 6.6880970 25.2723670 4.4977100 5.8715310 - 24.2749380 9.5535640 8.4912870 23.9418760 10.1804560 9.1333940 - 24.4516060 10.0797520 7.7114540 32.7883290 3.3680720 5.9589210 - 32.0838860 3.6565610 6.5392320 33.1495450 4.1780830 5.5988770 - 23.4868480 11.5216440 10.3062050 24.1042480 12.1967520 10.0246500 - 22.6261600 11.9304420 10.2149550 31.9404810 14.8299980 6.9361870 - 31.9531970 14.4262270 6.0684130 31.3880030 15.6047270 6.8323300 - 33.2350230 11.5516200 2.0252040 33.1246560 10.6090530 2.1501460 - 33.2318480 11.9131840 2.9114800 31.3390250 9.7329820 8.2644480 - 31.7559040 9.5189930 7.4297940 32.0630070 9.9902610 8.8353100 - 32.2494360 4.9186360 9.3015590 31.8365540 5.7440070 9.5555580 - 31.5170310 4.3331400 9.1091950 24.3827150 7.7046260 4.1634260 - 23.9784150 6.9923510 3.6680160 24.4690270 7.3599500 5.0522300 - 24.8508850 11.3913820 6.3273470 24.5537140 10.7859290 5.6481250 - 25.8008670 11.4291130 6.2162830 23.0955770 4.4789470 7.9433300 - 23.6470360 4.5884810 7.1686550 23.6099280 4.8527070 8.6588530 - 32.9394460 14.9645690 9.9896570 32.4625020 15.7717880 9.7969180 - 32.2556250 14.3124020 10.1422780 23.9427280 11.1295510 12.9781270 - 23.0455810 11.3535480 13.2254670 23.9680150 11.2530450 12.0292670 - 22.7698500 8.0008830 11.5538640 22.6306270 8.8731630 11.9226040 - 21.8879860 7.6581640 11.4086580 29.0927880 12.9184700 2.8603770 - 29.5323790 13.6609030 2.4459160 29.0314770 13.1611270 3.7842730 - 24.5678850 13.9430530 13.7623790 24.5394280 14.3473480 12.8952220 - 24.4043820 13.0140890 13.5995390 18.7217150 9.7706380 7.4709550 - 18.7997570 9.9055150 6.5265270 19.3358830 9.0622270 7.6637670 - 19.9664780 11.4706570 3.8815730 19.5592860 11.6121160 3.0269330 - 20.8577790 11.1866730 3.6786940 29.7843660 15.6198480 10.7882780 - 30.2988330 15.5306490 11.5905210 28.9412740 15.9658180 11.0810690 - 24.2386440 15.6852350 1.9938400 24.0628920 14.7687890 1.7806200 - 23.3842180 16.0439640 2.2336200 31.0274120 15.0404090 2.8513270 - 31.4831070 14.2593470 3.1651890 31.3666640 15.7510910 3.3954450 - 18.7776670 10.8120550 10.0104010 18.9407710 10.3425910 9.1923380 - 19.2429120 10.3044110 10.6752850 27.8365250 2.8094830 10.6499900 - 27.3744620 3.0177850 11.4619840 27.4343360 1.9954180 10.3470570 - 32.8369760 4.0303780 11.7796560 31.9863350 3.7309530 12.1005630 - 32.6771550 4.2745790 10.8680380 29.9319470 5.5305530 13.8795350 - 30.2572880 4.6969280 13.5397580 30.7199800 6.0534050 14.0273680 - 23.3409660 5.0659880 2.7745830 23.1150580 4.8497050 3.6792450 - 22.5602190 5.4988340 2.4291800 25.7808870 5.9645460 13.4406100 - 25.6852470 5.0240190 13.2906500 26.6240850 6.1860530 13.0454170 - 29.1772290 3.7086790 3.4039760 29.5701860 4.0258990 2.5908450 - 29.5389140 2.8298670 3.5184280 27.4864460 2.4594100 14.6636620 - 26.7962780 2.2666300 15.2982710 28.2296760 2.7428740 15.1960920 - 29.7627430 6.2992540 7.4990540 29.0308550 6.8079350 7.8480640 - 29.3651770 5.4822520 7.1979320 26.2004820 2.9746740 5.9043030 - 26.2040880 2.5336690 5.0547570 27.0955940 3.2964070 6.0114850 - 22.0507200 2.8573230 4.8637240 21.7107250 3.3348460 5.6204290 - 21.2884520 2.4038350 4.5038340 20.3515650 10.1871980 12.1699820 - 19.9670440 9.5465650 12.7682780 20.6627840 10.8908100 12.7394550 - 26.5012790 7.0348520 7.6508440 26.8682920 6.5607090 8.3969780 - 26.3117850 7.9090090 7.9916780 24.2912160 6.0757160 9.9692650 - 23.9869230 6.9549140 10.1943010 25.2458620 6.1356950 10.0050090 - 33.5781930 9.2783510 6.3585890 34.2981050 9.7738800 5.9682000 - 33.3763750 8.6001400 5.7139760 28.9600730 9.3492570 12.2454890 - 28.8938040 8.4288240 11.9912480 28.3305870 9.4468010 12.9599530 - 22.8631350 9.9498530 4.2038310 23.3810020 9.1466790 4.1495200 - 22.4115370 9.8877670 5.0455140 20.3475890 6.9159900 11.1164010 - 19.9890830 7.5813750 11.7037350 20.0530450 6.0843450 11.4876630 - 23.5489010 7.3744140 14.0003560 23.2489150 7.5201320 13.1031380 - 24.3200250 6.8149430 13.9076950 28.8917460 12.1713140 11.9958690 - 28.9062000 11.2394100 11.7777600 28.1842560 12.2572900 12.6348460 - 30.6328260 10.5380650 4.8411120 30.9852080 9.9408780 4.1812490 - 29.7490590 10.2124880 5.0119370 26.7191540 11.2978340 2.2481530 - 27.4062670 11.8447550 2.6289120 25.9323960 11.8419070 2.2831140 - 26.0218750 8.3024370 1.8606890 26.4781530 9.1129620 2.0867080 - 25.6706960 7.9847510 2.6925380 28.3251360 13.8130710 9.0899190 - 28.9693580 14.0190090 9.7672610 28.6880800 13.0519110 8.6370040 - 29.5983440 11.8163060 7.8076640 30.2405670 11.1156220 7.9208810 - 30.1232790 12.5961500 7.6273530 31.3721060 12.6157960 11.0112700 - 31.7077300 13.2024650 11.6890620 30.4521400 12.4850670 11.2410490 - 27.2009640 12.6649290 14.1800760 26.8806510 11.9458530 14.7246450 - 26.5268000 13.3404360 14.2536820 30.4163240 2.8975450 9.5437560 - 30.7844060 2.0770660 9.8717130 29.5561440 2.9577340 9.9593210 - 25.7509300 4.1318340 1.7683000 25.0425450 4.6004680 2.2096610 - 26.0353780 3.4740350 2.4028190 27.3765230 5.5515040 9.7675530 - 27.8750480 6.0260860 10.4327380 27.5188180 4.6278560 9.9745560 - 26.8701520 16.0924300 2.4806770 27.1243020 16.0545690 1.5586140 - 25.9214000 15.9657630 2.4735020 32.8754100 7.0116180 4.0565280 - 31.9344240 7.0266410 4.2312970 33.1898170 6.2252140 4.5025580 - 22.3497210 16.2756240 5.4036670 23.0859690 15.7271700 5.6745240 - 22.7338130 17.1412250 5.2642660 30.1920120 11.5682180 14.5397660 - 29.5858280 11.5913780 13.7993410 29.8874230 10.8365220 15.0764910 - 31.6220490 15.2962470 13.1507070 32.5742240 15.2120490 13.1007310 - 31.4748710 15.9604180 13.8240860 19.0753300 7.9475770 13.5358300 - 19.5792320 7.3683050 14.1074530 18.3360720 8.2316000 14.0734670 - 26.1291240 9.3546760 13.0693820 25.3636910 9.9231760 12.9848490 - 26.3107200 9.0622970 12.1762050 33.2714590 11.0852090 13.6268840 - 34.0124060 11.4227850 13.1236300 32.5087270 11.2544150 13.0738610 - 21.1076380 6.0905360 14.4308370 21.1481720 5.7919230 15.3393600 - 21.9621880 6.4912970 14.2715890 23.0482190 4.6697460 12.3982860 - 22.5178650 4.1788410 11.7706200 23.7170260 5.0981660 11.8640790 - 31.9460250 9.1366920 12.1624050 32.3601270 9.2912000 13.0114470 - 31.0111250 9.2622140 12.3249830 32.4182420 6.4763080 14.1663200 - 33.0003190 5.7215400 14.0783680 32.5944190 7.0070520 13.3894680 - 26.7075480 9.0887400 10.1319980 27.1263590 9.9320130 9.9596220 - 25.7884110 9.2259440 9.9026760 29.0439380 6.8203580 11.5157820 - 29.1635590 6.2244720 12.2552660 29.9033380 6.8573270 11.0959120 - 31.3827280 7.3740770 10.0920330 30.8562500 7.8983390 9.4885470 - 31.7258620 8.0068160 10.7230060 24.5582900 15.3940410 11.2477710 - 24.0203990 16.1186470 10.9286410 24.8116860 14.9170750 10.4575050 - 25.8108150 3.3624930 12.7733640 26.4007840 3.0540780 13.4611460 - 24.9928390 2.8904680 12.9293540 30.6301580 2.9790740 13.0066660 - 29.9417100 2.3172280 12.9416650 31.3507600 2.5329010 13.4515120 - 27.4145110 16.6291620 11.5064360 27.3403810 17.5326760 11.8136730 - 26.5123950 16.3103520 11.4786430 18.8172400 13.9566210 4.8097980 - 19.1475410 13.0887270 4.5776620 19.2463400 14.1624730 5.6402940 - 23.4134320 12.2875220 2.4699950 23.1129860 12.1640190 3.3703860 - 23.3071810 11.4283810 2.0615680 19.3062610 14.8236800 7.5479610 - 18.8693600 15.0037600 8.3803760 19.8740290 15.5811420 7.4061330 - 19.1387040 7.3357030 3.9068490 18.4835880 7.0668670 3.2628190 - 19.9764990 7.2261510 3.4570470 33.6276430 11.0505780 9.9333080 - 32.8136270 11.4802940 10.1958870 34.0350630 11.6624190 9.3202130 - 20.1762970 7.7209600 8.4471830 20.5436010 7.6337000 9.3267850 - 19.6073790 6.9576150 8.3478690 19.7278970 4.6318690 12.3530250 - 18.8335020 4.5985450 12.6923960 20.2676520 4.8305480 13.1181500 - 33.6852480 15.8024380 3.4601350 34.2811550 15.4808950 2.7835770 - 33.9430630 15.3296200 4.2514620 21.5915050 12.2342290 13.6091980 - 21.5484680 11.8635200 14.4906440 21.7226060 13.1719860 13.7493800 - 25.2921080 13.7901880 9.2101700 25.0512440 14.0576440 8.3232220 - 26.2323720 13.9607610 9.2652560 21.4303080 15.1804540 10.7289760 - 20.8363400 15.0489240 11.4679800 21.4226920 14.3442680 10.2631860 - 34.2364690 16.4519680 7.2899080 33.7767660 17.2035490 6.9157100 - 33.6077150 15.7326150 7.2313880 21.0615310 12.5462920 9.6797800 - 20.3349370 11.9268210 9.6123750 21.1224460 12.9403530 8.8095880 - 24.6660410 14.7607920 6.4647210 25.1825430 14.1495280 5.9395470 - 25.3157790 15.2636700 6.9558220 27.4055910 15.6516410 7.2916190 - 27.6933720 15.3970710 6.4149200 27.9179130 15.0993770 7.8821760 - 19.3445310 15.8637310 2.8894560 19.1767350 15.2307990 3.5876450 - 18.4779820 16.0696710 2.5388750 27.4728270 16.5732510 14.6584390 - 28.3995580 16.3376780 14.7020350 27.4578700 17.4004510 14.1770420 - 11.3922000 25.4466800 22.9128460 11.3116050 24.5321410 22.6420170 - 10.9786420 25.9433870 22.2068170 9.5240040 27.2245820 24.7937730 - 8.7482140 26.7446260 25.0836300 10.1752970 26.5451470 24.6193890 - 3.5405870 27.5365630 25.6713490 2.7090750 27.2814340 26.0710000 - 4.1486850 27.6076400 26.4071410 13.5447470 31.6306010 23.5736860 - 13.0829000 30.8593250 23.9024030 14.3573020 31.6572080 24.0789310 - 8.9649030 29.9003010 24.8007890 8.8957310 28.9592950 24.6397020 - 9.7268890 29.9904420 25.3730460 9.9954200 22.7073390 25.0276820 - 10.3719020 21.8345270 24.9150490 9.8973710 23.0432090 24.1367260 - 4.1259210 21.0799020 20.4894260 4.2325960 21.9802750 20.7963020 - 3.2151570 21.0316990 20.1988910 13.7778320 20.2023830 21.9605160 - 14.5987940 19.7120400 22.0030640 13.8112400 20.7921490 22.7137000 - 5.9666940 24.0485390 24.8219440 6.4745160 24.5466360 25.4624460 - 6.4980180 23.2720590 24.6458800 10.6374130 28.8003260 20.5872760 - 10.0191400 29.2652710 20.0235480 11.1497380 29.4930740 21.0042270 - 5.5005890 28.3279130 27.2656360 5.1675270 28.9548050 27.9077430 - 5.6772570 28.8541010 26.4858030 14.0139800 22.1424210 24.7332700 - 13.3095370 22.4309100 25.3135810 14.3751960 22.9524320 24.3732260 - 3.9318370 20.3993910 27.0953310 4.2735180 21.2435960 26.8007180 - 2.9934180 20.5477650 27.2118650 17.0826720 21.5879000 25.1241910 - 16.1351630 21.6217980 25.2557340 17.4076710 22.4017950 25.5091270 - 7.6146260 17.8876180 23.4197900 7.9221240 17.3323070 22.7033400 - 6.6846960 18.0220640 23.2370890 14.4606740 30.3259690 20.7995530 - 14.3503070 29.3834020 20.9244950 14.4574990 30.6875330 21.6858290 - 12.5646760 28.5073310 27.0387970 12.9815550 28.2933420 26.2041430 - 13.2886580 28.7646100 27.6096590 10.0027020 25.9525010 20.1558020 - 9.0756300 25.8862390 20.3846490 10.1551840 26.8905140 20.0413260 - 9.9896100 22.2242010 17.1525180 9.0499770 22.3855970 17.2377640 - 10.3079360 22.9427690 16.6061170 16.5108860 26.9060530 27.1304210 - 15.6827490 26.9753620 26.6554340 17.1059340 27.4914840 26.6620040 - 14.0324310 27.5145310 20.6593270 13.4136730 27.1764860 20.0119570 - 14.0148760 26.8705070 21.3672460 13.4750870 23.6929850 28.0759080 - 13.0622050 24.5183560 28.3299070 12.7426820 23.1074890 27.8835440 - 5.6083660 26.4789750 22.9377750 5.2040660 25.7667000 22.4423650 - 5.6946780 26.1342990 23.8265790 6.0765360 30.1657310 25.1016960 - 5.7793650 29.5602780 24.4224740 7.0265180 30.2034620 24.9906320 - 4.3212280 23.2532960 26.7176790 4.8726870 23.3628300 25.9430040 - 4.8355790 23.6270560 27.4332020 12.2089010 26.7532120 17.6666020 - 12.6482290 25.9154550 17.5203840 12.9217430 27.3861520 17.7530160 - 2.3088290 25.3734730 20.4384400 2.1885890 26.0996820 19.8265670 - 1.9989180 24.6037640 19.9612310 4.7751950 18.7169800 23.8208070 - 4.3320100 19.4979340 23.4892590 5.3336940 19.0399690 24.5279010 - 7.8910770 18.3380500 18.7013900 7.2664950 18.3340370 19.4267210 - 7.8040890 17.4711460 18.3049870 6.8127850 19.2617130 25.6797670 - 7.1100180 20.1679960 25.5989690 7.3613410 18.7735910 25.0657240 - 4.5666170 23.8403370 21.5489320 4.3407090 23.6240540 22.4535940 - 3.7858700 24.2731830 21.2035290 11.7963930 23.8466740 20.1479970 - 11.1972820 24.5702540 19.9643590 12.3443540 23.7843520 19.3656420 - 10.4028800 22.4830280 22.1783250 10.7958370 22.8002480 21.3651940 - 10.7645650 21.6042160 22.2927770 10.9883940 25.0736030 26.2734030 - 10.2565060 25.5822840 26.6224130 10.5908280 24.2566010 25.9722810 - 7.4261330 21.7490230 24.6786520 7.4297390 21.3080180 23.8291060 - 8.3212450 22.0707560 24.7858340 3.2763710 21.6316720 23.6380730 - 2.9363760 22.1091950 24.3947780 2.5141030 21.1781840 23.2781830 - 7.4881480 22.7502310 17.6693280 7.6094300 22.7088790 18.6179100 - 7.1029990 23.6126460 17.5140010 11.4509850 20.5269710 25.6856970 - 11.4682590 20.9778360 26.5298820 11.2025430 19.6275350 25.8990410 - 6.5002980 25.0603450 17.0676580 5.9319880 24.6121700 16.4412490 - 7.2091980 25.4196230 16.5341730 7.7269300 25.8092010 26.4251930 - 8.0939430 25.3350580 27.1713270 7.5374360 26.6833580 26.7660270 - 4.0450460 29.0561640 19.3402990 3.2712720 28.5159260 19.1801710 - 4.7745090 28.5368250 19.0020750 14.8038440 28.0527000 25.1329380 - 15.5237560 28.5482290 24.7425490 14.6020260 27.3744890 24.4883250 - 10.2648380 19.4464960 19.2758770 9.4512990 18.9478130 19.2004080 - 10.0710330 20.2863210 18.8595120 12.9796770 23.8184610 17.5720930 - 13.8676390 24.1301840 17.3972270 12.9501860 22.9456860 17.1801510 - 4.0887860 28.7242020 22.9781800 4.6066530 27.9210280 22.9238690 - 3.6371880 28.6621160 23.8198630 8.9757360 29.9908300 18.2509100 - 9.3173060 30.7350530 17.7552420 8.1470070 30.3039420 18.6133930 - 15.2511040 25.1296450 17.0400790 15.1674760 25.5610670 16.1897230 - 15.9705230 25.5894800 17.4727610 11.8584770 29.3124140 23.6154610 - 12.2108590 28.7152270 22.9555980 10.9747100 28.9868370 23.7862860 - 7.9448050 30.0721830 21.0225020 8.6319180 30.6191040 21.4032610 - 7.1580470 30.6162560 21.0574630 7.2475260 27.0767860 20.6350380 - 7.7038040 27.8873110 20.8610570 6.8963470 26.7591000 21.4668870 - 3.1487040 17.4205510 21.1072900 3.0201670 17.4123410 22.0557820 - 2.3249060 17.7588600 20.7563910 10.8239950 30.5906550 26.5820130 - 11.4662180 29.8899710 26.6952300 11.3489300 31.3704990 26.4017020 - 14.6695670 30.8755170 16.3059270 14.4433250 30.1585610 16.8983910 - 14.2791870 30.6267610 15.4681020 14.2511920 28.7927110 17.9673430 - 13.9633620 29.2539800 18.7551310 15.0464090 28.3324880 18.2357650 - 11.6419750 21.6718940 28.3181050 12.0100570 20.8514150 28.6460620 - 10.7817950 21.7320830 28.7336700 15.0081780 20.6275880 19.1268610 - 14.3794940 20.7321550 18.4126840 14.5133700 20.8472660 19.9162490 - 6.9765810 22.9061830 20.5426490 6.2681960 23.3748170 20.9840100 - 7.2610290 22.2483840 21.1771680 11.0081240 19.6312490 21.9822280 - 10.8849020 19.6498620 21.0331790 11.9565800 19.6791430 22.1020680 - 5.1800830 21.2146910 17.9177200 6.0291360 21.6517060 17.8517390 - 4.8132100 21.5267170 18.7449260 14.1010610 25.7859670 22.8308770 - 13.1600750 25.8009900 23.0056460 14.4154680 24.9995630 23.2769070 - 4.0792250 27.4679270 16.3912730 4.8455520 27.3560630 16.9538160 - 4.1566940 26.7717240 15.7389500 2.7739010 24.9004340 17.0745620 - 2.8542600 23.9531850 17.1863020 3.6658760 25.2311220 17.1806280 - 7.2033430 29.4433450 15.8648080 7.0984070 28.8285800 16.5909460 - 7.0357650 28.9199340 15.0811090 10.4436160 28.5053480 16.4690070 - 9.8946260 28.9522780 17.1132810 11.0655420 28.0002730 16.9927740 - 6.1111280 27.4644400 18.3655370 6.5327700 27.2984270 19.2086760 - 6.2362120 26.6544630 17.8710510 8.6238920 26.4882010 15.6132470 - 9.1444580 25.7731030 15.2473620 9.2707320 27.1267880 15.9132980 - 5.0070180 23.5377630 15.3113530 4.8059240 22.6744990 15.6726870 - 4.8362230 23.4510990 14.3735130 3.8052980 20.6378250 15.2991410 - 4.1078880 20.6445680 14.3910560 4.5447570 20.2876110 15.7959070 - 7.0237910 19.5812870 16.2847610 6.6759910 18.9036860 15.7050060 - 7.2250680 19.1202380 17.0991000 2.8283110 29.9420670 16.1321210 - 3.2657980 30.4850340 16.7878790 3.3130840 29.1167620 16.1417550 - 4.6390830 31.0618710 21.2443440 4.3386370 30.9383680 22.1447350 - 4.5328320 30.2027300 20.8359170 17.4082750 30.8415110 15.9470120 - 16.5775710 30.9091760 16.4177350 17.5854580 31.7319230 15.6436980 - 16.8639210 27.5862590 18.4504320 17.6258120 28.1559240 18.5564010 - 16.8793580 27.0189790 19.2212630 17.8699180 28.4412690 23.2946250 - 18.2408010 27.6580010 22.8882220 18.5101110 29.1301790 23.1163550 - 16.9158380 27.8796960 15.6076840 17.1907220 28.7875840 15.4796060 - 16.7269120 27.8159490 16.5438830 13.3133990 20.9041810 16.9030470 - 13.7318870 20.7172790 16.0627130 13.0070050 20.0520860 17.2133400 - 16.8123720 25.8165950 20.6006270 16.4367320 25.0493880 20.1687560 - 16.0826840 26.1988650 21.0881200 16.0874680 24.0423440 23.9013510 - 16.6112160 23.5486090 23.2703680 16.6734100 24.1773800 24.6461080 - 16.2664400 24.1263310 27.4233060 15.3167110 24.0926850 27.5378050 - 16.4477410 25.0363420 27.1882830 1.7683000 18.4304320 15.5734340 - 2.4911130 18.9713260 15.8915560 2.0407520 18.1567740 14.6975880 - 15.5774650 23.2996790 19.8405920 16.0733770 23.0667660 20.6254800 - 15.3790720 22.4608630 19.4243560 17.0061810 21.9357740 22.3743600 - 16.8961090 21.7813780 23.3125870 17.7669400 21.4078680 22.1319010 - 12.1014610 17.2480260 27.7063280 11.2274450 17.5815370 27.5036210 - 11.9925360 16.7753440 28.5315150 9.8489430 18.1650100 26.6456190 - 9.0661220 18.6395600 26.9252950 9.5552360 17.2604930 26.5369370 - 14.4010500 18.8673200 24.6905280 14.8418590 18.8118300 23.8426880 - 14.1318090 19.7831210 24.7615460 16.2649070 18.4988440 22.7014100 - 16.2037470 17.5549660 22.5545120 17.0441100 18.6068530 23.2467560 - 7.7343650 20.5836850 22.1572220 6.9218160 20.2093740 21.8168100 - 8.2385290 19.8281930 22.4593410 12.2368630 17.2479300 23.3955290 - 12.8398000 17.8199510 23.8703810 11.4961620 17.8118740 23.1729190 - 3.5654990 18.9961630 17.7071880 4.0947750 18.2091430 17.5779830 - 4.1983310 19.6752820 17.9407410 5.7592830 18.7336770 20.7617270 - 5.1295570 18.0324080 20.9287400 5.2197970 19.4940900 20.5450430 - 3.9372280 31.3961140 17.9883770 3.3081160 31.8646240 18.5369600 - 4.0520110 30.5510360 18.4229960 11.3922000 25.4466800 4.1384970 - 11.3116050 24.5321410 3.8676680 10.9786420 25.9433870 3.4324680 - 9.5240040 27.2245820 6.0194240 8.7482140 26.7446260 6.3092810 - 10.1752970 26.5451470 5.8450400 3.5405870 27.5365630 6.8970000 - 2.7090750 27.2814340 7.2966510 4.1486850 27.6076400 7.6327920 - 13.5447470 31.6306010 4.7993370 13.0829000 30.8593250 5.1280540 - 14.3573020 31.6572080 5.3045820 8.9649030 29.9003010 6.0264400 - 8.8957310 28.9592950 5.8653530 9.7268890 29.9904420 6.5986970 - 9.9954200 22.7073390 6.2533330 10.3719020 21.8345270 6.1407000 - 9.8973710 23.0432090 5.3623770 13.7778320 20.2023830 3.1861670 - 14.5987940 19.7120400 3.2287150 13.8112400 20.7921490 3.9393510 - 5.9666940 24.0485390 6.0475950 6.4745160 24.5466360 6.6880970 - 6.4980180 23.2720590 5.8715310 18.0677870 22.2190580 9.7505300 - 17.3630040 22.6072290 9.2320410 18.3335790 22.9140870 10.3526230 - 4.2503040 18.6883950 10.3139370 4.2521480 19.2056340 9.5085270 - 3.4207350 18.2114130 10.2909300 5.5005890 28.3279130 8.4912870 - 5.1675270 28.9548050 9.1333940 5.6772570 28.8541010 7.7114540 - 14.0139800 22.1424210 5.9589210 13.3095370 22.4309100 6.5392320 - 14.3751960 22.9524320 5.5988770 3.9318370 20.3993910 8.3209820 - 4.2735180 21.2435960 8.0263690 2.9934180 20.5477650 8.4375160 - 4.7124990 30.2959930 10.3062050 5.3298990 30.9711010 10.0246500 - 3.8518110 30.7047910 10.2149550 17.0826720 21.5879000 6.3498420 - 16.1351630 21.6217980 6.4813850 17.4076710 22.4017950 6.7347780 - 7.6146260 17.8876180 4.6454410 7.9221240 17.3323070 3.9289910 - 6.6846960 18.0220640 4.4627400 14.4606740 30.3259690 2.0252040 - 14.3503070 29.3834020 2.1501460 14.4574990 30.6875330 2.9114800 - 12.5646760 28.5073310 8.2644480 12.9815550 28.2933420 7.4297940 - 13.2886580 28.7646100 8.8353100 16.5108860 26.9060530 8.3560720 - 15.6827490 26.9753620 7.8810850 17.1059340 27.4914840 7.8876550 - 13.4750870 23.6929850 9.3015590 13.0622050 24.5183560 9.5555580 - 12.7426820 23.1074890 9.1091950 5.6083660 26.4789750 4.1634260 - 5.2040660 25.7667000 3.6680160 5.6946780 26.1342990 5.0522300 - 6.0765360 30.1657310 6.3273470 5.7793650 29.5602780 5.6481250 - 7.0265180 30.2034620 6.2162830 4.3212280 23.2532960 7.9433300 - 4.8726870 23.3628300 7.1686550 4.8355790 23.6270560 8.6588530 - 5.1683790 29.9039000 12.9781270 4.2712320 30.1278970 13.2254670 - 5.1936660 30.0273940 12.0292670 3.9955010 26.7752320 11.5538640 - 3.8562780 27.6475120 11.9226040 3.1136370 26.4325130 11.4086580 - 17.1483430 30.6913970 12.2187970 17.2746490 30.4525850 11.3005150 - 17.1904810 29.8598510 12.6910050 17.0007440 22.7446430 13.1837710 - 16.3851710 23.2526100 12.6553170 16.6803860 21.8444760 13.1263650 - 4.7751950 18.7169800 5.0464580 4.3320100 19.4979340 4.7149100 - 5.3336940 19.0399690 5.7535520 9.0621760 21.5838320 10.6499900 - 8.6001130 21.7921340 11.4619840 8.6599870 20.7697670 10.3470570 - 14.0626270 22.8047270 11.7796560 13.2119860 22.5053020 12.1005630 - 13.9028060 23.0489280 10.8680380 11.1575980 24.3049020 13.8795350 - 11.4829390 23.4712770 13.5397580 11.9456310 24.8277540 14.0273680 - 6.8127850 19.2617130 6.9054180 7.1100180 20.1679960 6.8246200 - 7.3613410 18.7735910 6.2913750 4.5666170 23.8403370 2.7745830 - 4.3407090 23.6240540 3.6792450 3.7858700 24.2731830 2.4291800 - 7.0065380 24.7388950 13.4406100 6.9108980 23.7983680 13.2906500 - 7.8497360 24.9604020 13.0454170 10.4028800 22.4830280 3.4039760 - 10.7958370 22.8002480 2.5908450 10.7645650 21.6042160 3.5184280 - 8.7120970 21.2337590 14.6636620 8.0219290 21.0409790 15.2982710 - 9.4553270 21.5172230 15.1960920 10.9883940 25.0736030 7.4990540 - 10.2565060 25.5822840 7.8480640 10.5908280 24.2566010 7.1979320 - 7.4261330 21.7490230 5.9043030 7.4297390 21.3080180 5.0547570 - 8.3212450 22.0707560 6.0114850 3.2763710 21.6316720 4.8637240 - 2.9363760 22.1091950 5.6204290 2.5141030 21.1781840 4.5038340 - 11.4509850 20.5269710 6.9113480 11.4682590 20.9778360 7.7555330 - 11.2025430 19.6275350 7.1246920 7.7269300 25.8092010 7.6508440 - 8.0939430 25.3350580 8.3969780 7.5374360 26.6833580 7.9916780 - 5.5168670 24.8500650 9.9692650 5.2125740 25.7292630 10.1943010 - 6.4715130 24.9100440 10.0050090 14.8038440 28.0527000 6.3585890 - 15.5237560 28.5482290 5.9682000 14.6020260 27.3744890 5.7139760 - 10.1857240 28.1236060 12.2454890 10.1194550 27.2031730 11.9912480 - 9.5562380 28.2211500 12.9599530 4.0887860 28.7242020 4.2038310 - 4.6066530 27.9210280 4.1495200 3.6371880 28.6621160 5.0455140 - 4.7745520 26.1487630 14.0003560 4.4745660 26.2944810 13.1031380 - 5.5456760 25.5892920 13.9076950 10.1173970 30.9456630 11.9958690 - 10.1318510 30.0137590 11.7777600 9.4099070 31.0316390 12.6348460 - 16.3285180 27.5863970 11.3427270 15.6033370 28.1792750 11.1456660 - 16.8560640 27.5787510 10.5440640 11.8584770 29.3124140 4.8411120 - 12.2108590 28.7152270 4.1812490 10.9747100 28.9868370 5.0119370 - 7.9448050 30.0721830 2.2481530 8.6319180 30.6191040 2.6289120 - 7.1580470 30.6162560 2.2831140 7.2475260 27.0767860 1.8606890 - 7.7038040 27.8873110 2.0867080 6.8963470 26.7591000 2.6925380 - 3.1487040 17.4205510 2.3329410 3.0201670 17.4123410 3.2814330 - 2.3249060 17.7588600 1.9820420 10.8239950 30.5906550 7.8076640 - 11.4662180 29.8899710 7.9208810 11.3489300 31.3704990 7.6273530 - 11.6419750 21.6718940 9.5437560 12.0100570 20.8514150 9.8717130 - 10.7817950 21.7320830 9.9593210 6.9765810 22.9061830 1.7683000 - 6.2681960 23.3748170 2.2096610 7.2610290 22.2483840 2.4028190 - 8.6021740 24.3258530 9.7675530 9.1006990 24.8004350 10.4327380 - 8.7444690 23.4022050 9.9745560 11.0081240 19.6312490 3.2078790 - 10.8849020 19.6498620 2.2588300 11.9565800 19.6791430 3.3277190 - 15.8088600 20.5426490 10.6103920 15.3062200 21.2831890 10.9497730 - 16.6245260 20.9310490 10.2940710 14.1010610 25.7859670 4.0565280 - 13.1600750 25.8009900 4.2312970 14.4154680 24.9995630 4.5025580 - 11.4176630 30.3425670 14.5397660 10.8114790 30.3657270 13.7993410 - 11.1130740 29.6108710 15.0764910 7.3547750 28.1290250 13.0693820 - 6.5893420 28.6975250 12.9848490 7.5363710 27.8366460 12.1762050 - 14.4971100 29.8595580 13.6268840 15.2380570 30.1971340 13.1236300 - 13.7343780 30.0287640 13.0738610 2.3332890 24.8648850 14.4308370 - 2.3738230 24.5662720 15.3393600 3.1878390 25.2656460 14.2715890 - 4.2738700 23.4440950 12.3982860 3.7435160 22.9531900 11.7706200 - 4.9426770 23.8725150 11.8640790 13.1716760 27.9110410 12.1624050 - 13.5857780 28.0655490 13.0114470 12.2367760 28.0365630 12.3249830 - 13.6438930 25.2506570 14.1663200 14.2259700 24.4958890 14.0783680 - 13.8200700 25.7814010 13.3894680 4.7611000 20.4103320 12.4780150 - 4.6667870 19.7924070 11.7531020 4.1808390 21.1369610 12.2510060 - 7.5272440 18.9676560 9.5107540 7.0789740 18.9900820 8.6653090 - 6.8937640 18.5695580 10.1077840 7.9331990 27.8630890 10.1319980 - 8.3520100 28.7063620 9.9596220 7.0140620 28.0002930 9.9026760 - 10.2695890 25.5947070 11.5157820 10.3892100 24.9988210 12.2552660 - 11.1289890 25.6316760 11.0959120 12.6083790 26.1484260 10.0920330 - 12.0819010 26.6726880 9.4885470 12.9515130 26.7811650 10.7230060 - 7.0364660 22.1368420 12.7733640 7.6264350 21.8284270 13.4611460 - 6.2184900 21.6648170 12.9293540 11.8558090 21.7534230 13.0066660 - 11.1673610 21.0915770 12.9416650 12.5764110 21.3072500 13.4515120 - 4.6390830 31.0618710 2.4699950 4.3386370 30.9383680 3.3703860 - 4.5328320 30.2027300 2.0615680 17.8699180 28.4412690 4.5202760 - 18.2408010 27.6580010 4.1138730 18.5101110 29.1301790 4.3420060 - 14.8532940 29.8249270 9.9333080 14.0392780 30.2546430 10.1958870 - 15.2607140 30.4367680 9.3202130 16.2023340 24.9637470 11.4602300 - 15.9709430 24.7350220 10.5600250 16.4627890 25.8837020 11.4147190 - 14.6411410 20.1893900 14.2382000 15.5181590 20.1977300 14.6216000 - 14.6532250 19.4524740 13.6274350 16.0874680 24.0423440 5.1270020 - 16.6112160 23.5486090 4.4960190 16.6734100 24.1773800 5.8717590 - 16.2664400 24.1263310 8.6489570 15.3167110 24.0926850 8.7634560 - 16.4477410 25.0363420 8.4139340 17.0061810 21.9357740 3.6000110 - 16.8961090 21.7813780 4.5382380 17.7669400 21.4078680 3.3575520 - 15.3527530 17.8751870 10.0583110 15.9899750 18.4886010 10.4242300 - 15.5002420 17.9112530 9.1132340 2.8171560 31.0085780 13.6091980 - 2.7741190 30.6378690 14.4906440 2.9482570 31.9463350 13.7493800 - 8.7232010 19.0278060 12.9658210 8.6620900 19.6199890 13.7153620 - 9.6632390 18.9204020 12.8208470 11.3606570 18.6848120 12.6050500 - 11.9746980 19.0027410 11.9431580 11.9056130 18.1996710 13.2246360 - 1.8827830 17.3642380 12.7189560 2.4310820 17.1830430 11.9555670 - 1.9483140 16.5736490 13.2545870 12.1014610 17.2480260 8.9319790 - 11.2274450 17.5815370 8.7292720 11.9925360 16.7753440 9.7571660 - 13.0086590 19.4551130 10.3161510 12.6799950 18.6402890 9.9363240 - 13.9568890 19.4120940 10.1927200 9.8489430 18.1650100 7.8712700 - 9.0661220 18.6395600 8.1509460 9.5552360 17.2604930 7.7625880 - 14.4010500 18.8673200 5.9161790 14.8418590 18.8118300 5.0683390 - 14.1318090 19.7831210 5.9871970 16.2649070 18.4988440 3.9270610 - 16.2037470 17.5549660 3.7801630 17.0441100 18.6068530 4.4724070 - 7.7343650 20.5836850 3.3828730 6.9218160 20.2093740 3.0424610 - 8.2385290 19.8281930 3.6849920 12.2368630 17.2479300 4.6211800 - 12.8398000 17.8199510 5.0960320 11.4961620 17.8118740 4.3985700 - 5.7592830 18.7336770 1.9873780 5.1295570 18.0324080 2.1543910 - 5.2197970 19.4940900 1.7706940 5.8848390 18.0120750 14.4226270 - 5.6032660 18.5154350 13.6587090 6.2668090 17.2142500 14.0568400 - 11.3922000 6.6723310 22.9128460 11.3116050 5.7577920 22.6420170 - 10.9786420 7.1690380 22.2068170 9.5240040 8.4502330 24.7937730 - 8.7482140 7.9702770 25.0836300 10.1752970 7.7707980 24.6193890 - 3.5405870 8.7622140 25.6713490 2.7090750 8.5070850 26.0710000 - 4.1486850 8.8332910 26.4071410 13.5447470 12.8562520 23.5736860 - 13.0829000 12.0849760 23.9024030 14.3573020 12.8828590 24.0789310 - 3.5207130 12.7962230 25.8671510 3.8235170 11.9059970 25.6881730 - 4.2650720 13.3532450 25.6393760 9.7710780 15.1420530 23.1940850 - 9.1522220 15.5978030 22.6235290 10.6262560 15.2971990 22.7930550 - 8.9649030 11.1259520 24.8007890 8.8957310 10.1849460 24.6397020 - 9.7268890 11.2160930 25.3730460 9.9954200 3.9329900 25.0276820 - 10.3719020 3.0601780 24.9150490 9.8973710 4.2688600 24.1367260 - 4.1259210 2.3055530 20.4894260 4.2325960 3.2059260 20.7963020 - 3.2151570 2.2573500 20.1988910 5.9666940 5.2741900 24.8219440 - 6.4745160 5.7722870 25.4624460 6.4980180 4.4977100 24.6458800 - 10.6374130 10.0259770 20.5872760 10.0191400 10.4909220 20.0235480 - 11.1497380 10.7187250 21.0042270 5.5005890 9.5535640 27.2656360 - 5.1675270 10.1804560 27.9077430 5.6772570 10.0797520 26.4858030 - 14.0139800 3.3680720 24.7332700 13.3095370 3.6565610 25.3135810 - 14.3751960 4.1780830 24.3732260 15.6302580 12.9855850 26.9960180 - 14.9466100 13.6554960 26.9876850 16.1696870 13.1836500 26.2305040 - 17.0826720 2.8135510 25.1241910 16.1351630 2.8474490 25.2557340 - 17.4076710 3.6274460 25.5091270 13.1661320 14.8299980 25.7105360 - 13.1788480 14.4262270 24.8427620 12.6136540 15.6047270 25.6066790 - 14.4606740 11.5516200 20.7995530 14.3503070 10.6090530 20.9244950 - 14.4574990 11.9131840 21.6858290 12.5646760 9.7329820 27.0387970 - 12.9815550 9.5189930 26.2041430 13.2886580 9.9902610 27.6096590 - 10.0027020 7.1781520 20.1558020 9.0756300 7.1118900 20.3846490 - 10.1551840 8.1161650 20.0413260 9.9896100 3.4498520 17.1525180 - 9.0499770 3.6112480 17.2377640 10.3079360 4.1684200 16.6061170 - 16.5108860 8.1317040 27.1304210 15.6827490 8.2010130 26.6554340 - 17.1059340 8.7171350 26.6620040 14.0324310 8.7401820 20.6593270 - 13.4136730 8.4021370 20.0119570 14.0148760 8.0961580 21.3672460 - 13.4750870 4.9186360 28.0759080 13.0622050 5.7440070 28.3299070 - 12.7426820 4.3331400 27.8835440 5.6083660 7.7046260 22.9377750 - 5.2040660 6.9923510 22.4423650 5.6946780 7.3599500 23.8265790 - 6.0765360 11.3913820 25.1016960 5.7793650 10.7859290 24.4224740 - 7.0265180 11.4291130 24.9906320 4.3212280 4.4789470 26.7176790 - 4.8726870 4.5884810 25.9430040 4.8355790 4.8527070 27.4332020 - 12.2089010 7.9788630 17.6666020 12.6482290 7.1411060 17.5203840 - 12.9217430 8.6118030 17.7530160 10.3184390 12.9184700 21.6347260 - 10.7580300 13.6609030 21.2202650 10.2571280 13.1611270 22.5586220 - 2.3088290 6.5991240 20.4384400 2.1885890 7.3253330 19.8265670 - 1.9989180 5.8294150 19.9612310 5.4642950 15.6852350 20.7681890 - 5.2885430 14.7687890 20.5549690 4.6098690 16.0439640 21.0079690 - 12.2530630 15.0404090 21.6256760 12.7087580 14.2593470 21.9395380 - 12.5923150 15.7510910 22.1697940 4.5666170 5.0659880 21.5489320 - 4.3407090 4.8497050 22.4535940 3.7858700 5.4988340 21.2035290 - 11.7963930 5.0723250 20.1479970 11.1972820 5.7959050 19.9643590 - 12.3443540 5.0100030 19.3656420 10.4028800 3.7086790 22.1783250 - 10.7958370 4.0258990 21.3651940 10.7645650 2.8298670 22.2927770 - 10.9883940 6.2992540 26.2734030 10.2565060 6.8079350 26.6224130 - 10.5908280 5.4822520 25.9722810 7.4261330 2.9746740 24.6786520 - 7.4297390 2.5336690 23.8291060 8.3212450 3.2964070 24.7858340 - 3.2763710 2.8573230 23.6380730 2.9363760 3.3348460 24.3947780 - 2.5141030 2.4038350 23.2781830 7.4881480 3.9758820 17.6693280 - 7.6094300 3.9345300 18.6179100 7.1029990 4.8382970 17.5140010 - 6.5002980 6.2859960 17.0676580 5.9319880 5.8378210 16.4412490 - 7.2091980 6.6452740 16.5341730 7.7269300 7.0348520 26.4251930 - 8.0939430 6.5607090 27.1713270 7.5374360 7.9090090 26.7660270 - 4.0450460 10.2818150 19.3402990 3.2712720 9.7415770 19.1801710 - 4.7745090 9.7624760 19.0020750 14.8038440 9.2783510 25.1329380 - 15.5237560 9.7738800 24.7425490 14.6020260 8.6001400 24.4883250 - 12.9796770 5.0441120 17.5720930 13.8676390 5.3558350 17.3972270 - 12.9501860 4.1713370 17.1801510 4.0887860 9.9498530 22.9781800 - 4.6066530 9.1466790 22.9238690 3.6371880 9.8877670 23.8198630 - 8.9757360 11.2164810 18.2509100 9.3173060 11.9607040 17.7552420 - 8.1470070 11.5295930 18.6133930 15.2511040 6.3552960 17.0400790 - 15.1674760 6.7867180 16.1897230 15.9705230 6.8151310 17.4727610 - 16.2798290 13.2095220 19.7682920 15.6374450 12.5550840 20.0426640 - 15.8928950 13.6150310 18.9923590 11.8584770 10.5380650 23.6154610 - 12.2108590 9.9408780 22.9555980 10.9747100 10.2124880 23.7862860 - 7.9448050 11.2978340 21.0225020 8.6319180 11.8447550 21.4032610 - 7.1580470 11.8419070 21.0574630 7.2475260 8.3024370 20.6350380 - 7.7038040 9.1129620 20.8610570 6.8963470 7.9847510 21.4668870 - 9.5507870 13.8130710 27.8642680 10.1950090 14.0190090 28.5416100 - 9.9137310 13.0519110 27.4113530 10.8239950 11.8163060 26.5820130 - 11.4662180 11.1156220 26.6952300 11.3489300 12.5961500 26.4017020 - 14.2511920 10.0183620 17.9673430 13.9633620 10.4796310 18.7551310 - 15.0464090 9.5581390 18.2357650 6.4680320 13.5109740 17.8977400 - 6.4590420 14.2439430 17.2821910 5.5478680 13.2641320 17.9904360 - 11.6419750 2.8975450 28.3181050 12.0100570 2.0770660 28.6460620 - 10.7817950 2.9577340 28.7336700 15.0081780 1.8532390 19.1268610 - 14.3794940 1.9578060 18.4126840 14.5133700 2.0729170 19.9162490 - 9.4539920 13.5737980 16.5602590 9.5551900 13.3701970 15.6304570 - 8.5541660 13.3196260 16.7650370 6.9765810 4.1318340 20.5426490 - 6.2681960 4.6004680 20.9840100 7.2610290 3.4740350 21.1771680 - 8.0958030 16.0924300 21.2550260 8.3499530 16.0545690 20.3329630 - 7.1470510 15.9657630 21.2478510 5.1800830 2.4403420 17.9177200 - 6.0291360 2.8773570 17.8517390 4.8132100 2.7523680 18.7449260 - 14.1010610 7.0116180 22.8308770 13.1600750 7.0266410 23.0056460 - 14.4154680 6.2252140 23.2769070 4.0792250 8.6935780 16.3912730 - 4.8455520 8.5817140 16.9538160 4.1566940 7.9973750 15.7389500 - 2.7739010 6.1260850 17.0745620 2.8542600 5.1788360 17.1863020 - 3.6658760 6.4567730 17.1806280 16.1036190 14.1825030 24.5063030 - 16.2283610 15.0056200 24.9786840 16.9701750 13.9715760 24.1587180 - 3.5753720 16.2756240 24.1780160 4.3116200 15.7271700 24.4488730 - 3.9594640 17.1412250 24.0386150 6.6856030 15.4486550 16.0005730 - 7.4784640 15.7375380 15.5487520 6.1660380 15.0178720 15.3218210 - 7.2033430 10.6689960 15.8648080 7.0984070 10.0542310 16.5909460 - 7.0357650 10.1455850 15.0811090 10.4436160 9.7309990 16.4690070 - 9.8946260 10.1779290 17.1132810 11.0655420 9.2259240 16.9927740 - 6.1111280 8.6900910 18.3655370 6.5327700 8.5240780 19.2086760 - 6.2362120 7.8801140 17.8710510 8.6238920 7.7138520 15.6132470 - 9.1444580 6.9987540 15.2473620 9.2707320 8.3524390 15.9132980 - 5.0070180 4.7634140 15.3113530 4.8059240 3.9001500 15.6726870 - 4.8362230 4.6767500 14.3735130 9.1886950 15.7950130 17.9034210 - 10.0837740 16.1335100 17.9252970 9.2204580 15.0729540 17.2758490 - 2.8283110 11.1677180 16.1321210 3.2657980 11.7106850 16.7878790 - 3.3130840 10.3424130 16.1417550 4.6390830 12.2875220 21.2443440 - 4.3386370 12.1640190 22.1447350 4.5328320 11.4283810 20.8359170 - 16.8639210 8.8119100 18.4504320 17.6258120 9.3815750 18.5564010 - 16.8793580 8.2446300 19.2212630 17.8699180 9.6669200 23.2946250 - 18.2408010 8.8836520 22.8882220 18.5101110 10.3558300 23.1163550 - 16.9158380 9.1053470 15.6076840 17.1907220 10.0132350 15.4796060 - 16.7269120 9.0416000 16.5438830 16.8123720 7.0422460 20.6006270 - 16.4367320 6.2750390 20.1687560 16.0826840 7.4245160 21.0881200 - 16.0874680 5.2679950 23.9013510 16.6112160 4.7742600 23.2703680 - 16.6734100 5.4030310 24.6461080 16.2664400 5.3519820 27.4233060 - 15.3167110 5.3183360 27.5378050 16.4477410 6.2619930 27.1882830 - 15.5774650 4.5253300 19.8405920 16.0733770 4.2924170 20.6254800 - 15.3790720 3.6865140 19.4243560 17.0061810 3.1614250 22.3743600 - 16.8961090 3.0070290 23.3125870 17.7669400 2.6335190 22.1319010 - 14.9108990 15.8024380 22.2344840 15.5068060 15.4808950 21.5579260 - 15.1687140 15.3296200 23.0258110 6.5177590 13.7901880 27.9845190 - 6.2768950 14.0576440 27.0975710 7.4580230 13.9607610 28.0396050 - 15.4621200 16.4519680 26.0642570 15.0024170 17.2035490 25.6900590 - 14.8333660 15.7326150 26.0057370 5.8916920 14.7607920 25.2390700 - 6.4081940 14.1495280 24.7138960 6.5414300 15.2636700 25.7301710 - 8.6312420 15.6516410 26.0659680 8.9190230 15.3970710 25.1892690 - 9.1435640 15.0993770 26.6565250 2.1764520 14.5570540 19.1088190 - 1.6512140 14.9378570 18.4050150 2.1009100 15.1821310 19.8297900 - 16.9329840 16.1158080 20.4302130 17.1082030 15.4295820 19.7863050 - 16.2351410 16.6405130 20.0378800 4.7757490 16.5052920 17.9837440 - 5.5076310 16.2212200 17.4361330 5.0351160 16.2666200 18.8736820 - 3.9372280 12.6217650 17.9883770 3.3081160 13.0902750 18.5369600 - 4.0520110 11.7766870 18.4229960 11.3922000 6.6723310 4.1384970 - 11.3116050 5.7577920 3.8676680 10.9786420 7.1690380 3.4324680 - 9.5240040 8.4502330 6.0194240 8.7482140 7.9702770 6.3092810 - 10.1752970 7.7707980 5.8450400 3.5405870 8.7622140 6.8970000 - 2.7090750 8.5070850 7.2966510 4.1486850 8.8332910 7.6327920 - 13.5447470 12.8562520 4.7993370 13.0829000 12.0849760 5.1280540 - 14.3573020 12.8828590 5.3045820 3.5207130 12.7962230 7.0928020 - 3.8235170 11.9059970 6.9138240 4.2650720 13.3532450 6.8650270 - 9.7710780 15.1420530 4.4197360 9.1522220 15.5978030 3.8491800 - 10.6262560 15.2971990 4.0187060 8.9649030 11.1259520 6.0264400 - 8.8957310 10.1849460 5.8653530 9.7268890 11.2160930 6.5986970 - 9.9954200 3.9329900 6.2533330 10.3719020 3.0601780 6.1407000 - 9.8973710 4.2688600 5.3623770 5.9666940 5.2741900 6.0475950 - 6.4745160 5.7722870 6.6880970 6.4980180 4.4977100 5.8715310 - 18.0677870 3.4447090 9.7505300 17.3630040 3.8328800 9.2320410 - 18.3335790 4.1397380 10.3526230 5.5005890 9.5535640 8.4912870 - 5.1675270 10.1804560 9.1333940 5.6772570 10.0797520 7.7114540 - 14.0139800 3.3680720 5.9589210 13.3095370 3.6565610 6.5392320 - 14.3751960 4.1780830 5.5988770 4.7124990 11.5216440 10.3062050 - 5.3298990 12.1967520 10.0246500 3.8518110 11.9304420 10.2149550 - 15.6302580 12.9855850 8.2216690 14.9466100 13.6554960 8.2133360 - 16.1696870 13.1836500 7.4561550 17.0826720 2.8135510 6.3498420 - 16.1351630 2.8474490 6.4813850 17.4076710 3.6274460 6.7347780 - 13.1661320 14.8299980 6.9361870 13.1788480 14.4262270 6.0684130 - 12.6136540 15.6047270 6.8323300 14.4606740 11.5516200 2.0252040 - 14.3503070 10.6090530 2.1501460 14.4574990 11.9131840 2.9114800 - 12.5646760 9.7329820 8.2644480 12.9815550 9.5189930 7.4297940 - 13.2886580 9.9902610 8.8353100 16.5108860 8.1317040 8.3560720 - 15.6827490 8.2010130 7.8810850 17.1059340 8.7171350 7.8876550 - 13.4750870 4.9186360 9.3015590 13.0622050 5.7440070 9.5555580 - 12.7426820 4.3331400 9.1091950 5.6083660 7.7046260 4.1634260 - 5.2040660 6.9923510 3.6680160 5.6946780 7.3599500 5.0522300 - 6.0765360 11.3913820 6.3273470 5.7793650 10.7859290 5.6481250 - 7.0265180 11.4291130 6.2162830 4.3212280 4.4789470 7.9433300 - 4.8726870 4.5884810 7.1686550 4.8355790 4.8527070 8.6588530 - 14.1650970 14.9645690 9.9896570 13.6881530 15.7717880 9.7969180 - 13.4812760 14.3124020 10.1422780 5.1683790 11.1295510 12.9781270 - 4.2712320 11.3535480 13.2254670 5.1936660 11.2530450 12.0292670 - 3.9955010 8.0008830 11.5538640 3.8562780 8.8731630 11.9226040 - 3.1136370 7.6581640 11.4086580 10.3184390 12.9184700 2.8603770 - 10.7580300 13.6609030 2.4459160 10.2571280 13.1611270 3.7842730 - 5.7935360 13.9430530 13.7623790 5.7650790 14.3473480 12.8952220 - 5.6300330 13.0140890 13.5995390 11.0100170 15.6198480 10.7882780 - 11.5244840 15.5306490 11.5905210 10.1669250 15.9658180 11.0810690 - 5.4642950 15.6852350 1.9938400 5.2885430 14.7687890 1.7806200 - 4.6098690 16.0439640 2.2336200 12.2530630 15.0404090 2.8513270 - 12.7087580 14.2593470 3.1651890 12.5923150 15.7510910 3.3954450 - 17.0007440 3.9702940 13.1837710 16.3851710 4.4782610 12.6553170 - 16.6803860 3.0701270 13.1263650 9.0621760 2.8094830 10.6499900 - 8.6001130 3.0177850 11.4619840 8.6599870 1.9954180 10.3470570 - 14.0626270 4.0303780 11.7796560 13.2119860 3.7309530 12.1005630 - 13.9028060 4.2745790 10.8680380 11.1575980 5.5305530 13.8795350 - 11.4829390 4.6969280 13.5397580 11.9456310 6.0534050 14.0273680 - 4.5666170 5.0659880 2.7745830 4.3407090 4.8497050 3.6792450 - 3.7858700 5.4988340 2.4291800 7.0065380 5.9645460 13.4406100 - 6.9108980 5.0240190 13.2906500 7.8497360 6.1860530 13.0454170 - 10.4028800 3.7086790 3.4039760 10.7958370 4.0258990 2.5908450 - 10.7645650 2.8298670 3.5184280 8.7120970 2.4594100 14.6636620 - 8.0219290 2.2666300 15.2982710 9.4553270 2.7428740 15.1960920 - 10.9883940 6.2992540 7.4990540 10.2565060 6.8079350 7.8480640 - 10.5908280 5.4822520 7.1979320 7.4261330 2.9746740 5.9043030 - 7.4297390 2.5336690 5.0547570 8.3212450 3.2964070 6.0114850 - 3.2763710 2.8573230 4.8637240 2.9363760 3.3348460 5.6204290 - 2.5141030 2.4038350 4.5038340 7.7269300 7.0348520 7.6508440 - 8.0939430 6.5607090 8.3969780 7.5374360 7.9090090 7.9916780 - 5.5168670 6.0757160 9.9692650 5.2125740 6.9549140 10.1943010 - 6.4715130 6.1356950 10.0050090 14.8038440 9.2783510 6.3585890 - 15.5237560 9.7738800 5.9682000 14.6020260 8.6001400 5.7139760 - 10.1857240 9.3492570 12.2454890 10.1194550 8.4288240 11.9912480 - 9.5562380 9.4468010 12.9599530 4.0887860 9.9498530 4.2038310 - 4.6066530 9.1466790 4.1495200 3.6371880 9.8877670 5.0455140 - 4.7745520 7.3744140 14.0003560 4.4745660 7.5201320 13.1031380 - 5.5456760 6.8149430 13.9076950 10.1173970 12.1713140 11.9958690 - 10.1318510 11.2394100 11.7777600 9.4099070 12.2572900 12.6348460 - 16.3285180 8.8120480 11.3427270 15.6033370 9.4049260 11.1456660 - 16.8560640 8.8044020 10.5440640 11.8584770 10.5380650 4.8411120 - 12.2108590 9.9408780 4.1812490 10.9747100 10.2124880 5.0119370 - 7.9448050 11.2978340 2.2481530 8.6319180 11.8447550 2.6289120 - 7.1580470 11.8419070 2.2831140 7.2475260 8.3024370 1.8606890 - 7.7038040 9.1129620 2.0867080 6.8963470 7.9847510 2.6925380 - 9.5507870 13.8130710 9.0899190 10.1950090 14.0190090 9.7672610 - 9.9137310 13.0519110 8.6370040 10.8239950 11.8163060 7.8076640 - 11.4662180 11.1156220 7.9208810 11.3489300 12.5961500 7.6273530 - 12.5977570 12.6157960 11.0112700 12.9333810 13.2024650 11.6890620 - 11.6777910 12.4850670 11.2410490 8.4266150 12.6649290 14.1800760 - 8.1063020 11.9458530 14.7246450 7.7524510 13.3404360 14.2536820 - 11.6419750 2.8975450 9.5437560 12.0100570 2.0770660 9.8717130 - 10.7817950 2.9577340 9.9593210 6.9765810 4.1318340 1.7683000 - 6.2681960 4.6004680 2.2096610 7.2610290 3.4740350 2.4028190 - 8.6021740 5.5515040 9.7675530 9.1006990 6.0260860 10.4327380 - 8.7444690 4.6278560 9.9745560 8.0958030 16.0924300 2.4806770 - 8.3499530 16.0545690 1.5586140 7.1470510 15.9657630 2.4735020 - 15.8088600 1.7683000 10.6103920 15.3062200 2.5088400 10.9497730 - 16.6245260 2.1567000 10.2940710 14.1010610 7.0116180 4.0565280 - 13.1600750 7.0266410 4.2312970 14.4154680 6.2252140 4.5025580 - 16.1036190 14.1825030 5.7319540 16.2283610 15.0056200 6.2043350 - 16.9701750 13.9715760 5.3843690 3.5753720 16.2756240 5.4036670 - 4.3116200 15.7271700 5.6745240 3.9594640 17.1412250 5.2642660 - 2.5812000 15.4602310 14.5922250 3.3824730 15.7814360 15.0057650 - 2.0663820 15.0997890 15.3142160 7.3547750 9.3546760 13.0693820 - 6.5893420 9.9231760 12.9848490 7.5363710 9.0622970 12.1762050 - 2.3332890 6.0905360 14.4308370 2.3738230 5.7919230 15.3393600 - 3.1878390 6.4912970 14.2715890 4.2738700 4.6697460 12.3982860 - 3.7435160 4.1788410 11.7706200 4.9426770 5.0981660 11.8640790 - 13.1716760 9.1366920 12.1624050 13.5857780 9.2912000 13.0114470 - 12.2367760 9.2622140 12.3249830 13.6438930 6.4763080 14.1663200 - 14.2259700 5.7215400 14.0783680 13.8200700 7.0070520 13.3894680 - 7.9331990 9.0887400 10.1319980 8.3520100 9.9320130 9.9596220 - 7.0140620 9.2259440 9.9026760 10.2695890 6.8203580 11.5157820 - 10.3892100 6.2244720 12.2552660 11.1289890 6.8573270 11.0959120 - 12.6083790 7.3740770 10.0920330 12.0819010 7.8983390 9.4885470 - 12.9515130 8.0068160 10.7230060 5.7839410 15.3940410 11.2477710 - 5.2460500 16.1186470 10.9286410 6.0373370 14.9170750 10.4575050 - 7.0364660 3.3624930 12.7733640 7.6264350 3.0540780 13.4611460 - 6.2184900 2.8904680 12.9293540 11.8558090 2.9790740 13.0066660 - 11.1673610 2.3172280 12.9416650 12.5764110 2.5329010 13.4515120 - 8.6401620 16.6291620 11.5064360 8.5660320 17.5326760 11.8136730 - 7.7380460 16.3103520 11.4786430 4.6390830 12.2875220 2.4699950 - 4.3386370 12.1640190 3.3703860 4.5328320 11.4283810 2.0615680 - 17.8699180 9.6669200 4.5202760 18.2408010 8.8836520 4.1138730 - 18.5101110 10.3558300 4.3420060 14.8532940 11.0505780 9.9333080 - 14.0392780 11.4802940 10.1958870 15.2607140 11.6624190 9.3202130 - 16.2023340 6.1893980 11.4602300 15.9709430 5.9606730 10.5600250 - 16.4627890 7.1093530 11.4147190 16.0874680 5.2679950 5.1270020 - 16.6112160 4.7742600 4.4960190 16.6734100 5.4030310 5.8717590 - 16.2664400 5.3519820 8.6489570 15.3167110 5.3183360 8.7634560 - 16.4477410 6.2619930 8.4139340 17.0061810 3.1614250 3.6000110 - 16.8961090 3.0070290 4.5382380 17.7669400 2.6335190 3.3575520 - 14.9108990 15.8024380 3.4601350 15.5068060 15.4808950 2.7835770 - 15.1687140 15.3296200 4.2514620 2.8171560 12.2342290 13.6091980 - 2.7741190 11.8635200 14.4906440 2.9482570 13.1719860 13.7493800 - 6.5177590 13.7901880 9.2101700 6.2768950 14.0576440 8.3232220 - 7.4580230 13.9607610 9.2652560 2.6559590 15.1804540 10.7289760 - 2.0619910 15.0489240 11.4679800 2.6483430 14.3442680 10.2631860 - 15.4621200 16.4519680 7.2899080 15.0024170 17.2035490 6.9157100 - 14.8333660 15.7326150 7.2313880 5.8916920 14.7607920 6.4647210 - 6.4081940 14.1495280 5.9395470 6.5414300 15.2636700 6.9558220 - 8.6312420 15.6516410 7.2916190 8.9190230 15.3970710 6.4149200 - 9.1435640 15.0993770 7.8821760 8.6984780 16.5732510 14.6584390 - 9.6252090 16.3376780 14.7020350 8.6835210 17.4004510 14.1770420 - 36.0047690 33.3989010 30.2224290 90.0000000 90.0000000 90.0000000 diff --git a/tests/st/sponge/functions/__init__.py b/tests/st/sponge/functions/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/tests/st/sponge/functions/angle_energy.py b/tests/st/sponge/functions/angle_energy.py deleted file mode 100644 index c5cc59036bf..00000000000 --- a/tests/st/sponge/functions/angle_energy.py +++ /dev/null @@ -1,64 +0,0 @@ -# Copyright 2021 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ -'''angle energy''' -from mindspore import numpy as np - -from .common import get_periodic_displacement - - -def angle_energy(angle_numbers, uint_crd_f, scalar_f, atom_a, atom_b, atom_c, angle_k, angle_theta0): - """ - Calculate the energy caused by 3-atoms angle term. Assume the number of angles is M and the - number of atoms is N. - - .. math:: - dr_{ab} = (x_b-x_a, y_b-y_a, z_b-z_a) - .. math:: - dr_{cb} = (x_b-x_c, y_b-y_c, z_b-z_c) - .. math:: - theta = arccos(inner_product(dr_{ab}, dr_{cb})/|dr_{ab}|/|dr_{cb}|) - .. math:: - E = k*(theta - theta_0)^2 - - Args: - angle_numbers (int): the number of angles M. - uint_crd_f (Tensor, uint32): [N, 3], the unsigned int coordinate value of each atom. - scalar_f (Tensor, float32): [3,], the 3-D scale factor between - the real space float coordinates and the unsigned int coordinates. - atom_a (Tensor, int32): [M,], the 1st atom index of each angle. - atom_b (Tensor, int32): [M,], the 2nd and the central atom index of each angle. - atom_c (Tensor, int32): [M,], the 3rd atom index of each angle. - angle_k (Tensor, float32): [M,], the force constant for each angle. - angle_theta0 (Tensor, float32): [M,], the equilibrium position value for each angle. - - Outputs: - ene (Tensor, float32): [M,], the potential energy for each angle term. - - Supported Platforms: - ``GPU`` - """ - drij = get_periodic_displacement(uint_crd_f[atom_a], uint_crd_f[atom_b], scalar_f) - drkj = get_periodic_displacement(uint_crd_f[atom_c], uint_crd_f[atom_b], scalar_f) - - rij_2 = 1 / np.sum(drij ** 2, -1) - rkj_2 = 1 / np.sum(drkj ** 2, -1) - rij_1_rkj_1 = np.sqrt(rij_2 * rkj_2) - - costheta = np.sum(drij * drkj, -1) * rij_1_rkj_1 - costheta = np.clip(costheta, -0.999999, 0.999999) - theta = np.arccos(costheta) - - dtheta = theta - angle_theta0 - return angle_k * dtheta * dtheta diff --git a/tests/st/sponge/functions/angle_force_with_atom_energy.py b/tests/st/sponge/functions/angle_force_with_atom_energy.py deleted file mode 100644 index 90177cd4484..00000000000 --- a/tests/st/sponge/functions/angle_force_with_atom_energy.py +++ /dev/null @@ -1,79 +0,0 @@ -# Copyright 2021 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ -'''angle force with atom energy''' -from mindspore import numpy as np -from mindspore import ops - -from .common import get_periodic_displacement - - -def angle_force_with_atom_energy(angle_numbers, uint_crd_f, scalar_f, atom_a, atom_b, atom_c, angle_k, angle_theta0): - """ - Calculate angle force and potential energy together. Assume the number of angles is M and the - number of atoms is N. - - The calculation formula is the same as operator AngleForce() and AngleEnergy(). - - Args: - angle_numbers (int): the number of angles M. - uint_crd_f (Tensor, uint32): [N, 3], the unsigned int coordinate value of each atom. - scalar_f (Tensor, float32): [3,], the 3-D scale factor between - the real space float coordinates and the unsigned int coordinates. - atom_a (Tensor, int32): [M,], the 1st atom index of each angle. - atom_b (Tensor, int32): [M,], the 2nd and the central atom index of each angle. - atom_c (Tensor, int32): [M,], the 3rd atom index of each angle. - angle_k (Tensor, float32): [M,], the force constant for each angle. - angle_theta0 (Tensor, float32): [M,], the equilibrium position value for each angle. - - Outputs: - frc_f (Tensor, float32): [N, 3], same as operator AngleForce(). - ene (Tensor, float) - [N,], same as operator AngleAtomEnergy(). - - Supported Platforms: - ``GPU`` - """ - atom_numbers = uint_crd_f.shape[0] - k2 = angle_k - frc = np.zeros((atom_numbers, 3)) - atom_energy = np.zeros(atom_numbers) - - drij = get_periodic_displacement(uint_crd_f[atom_a], uint_crd_f[atom_b], scalar_f) - drkj = get_periodic_displacement(uint_crd_f[atom_c], uint_crd_f[atom_b], scalar_f) - - rij_2 = 1. / np.sum(drij ** 2, -1) - rkj_2 = 1. / np.sum(drkj ** 2, -1) - rij_1_rkj_1 = np.sqrt(rij_2 * rkj_2) - - costheta = np.sum(drij * drkj, -1) * rij_1_rkj_1 - costheta = np.clip(costheta, -0.999999, 0.999999) - theta = np.arccos(costheta) - - dtheta = theta - angle_theta0 - angle_k = -2 * angle_k * dtheta / np.sin(theta) - - common_factor_cross = np.expand_dims(angle_k * rij_1_rkj_1, -1) - common_factor_self = angle_k * costheta - - fi = np.expand_dims(common_factor_self * rij_2, -1) * drij - common_factor_cross * drkj - fk = np.expand_dims(common_factor_self * rkj_2, -1) * drkj - common_factor_cross * drij - - frc = ops.tensor_scatter_add(frc, np.expand_dims(atom_a, -1), fi) - frc = ops.tensor_scatter_add(frc, np.expand_dims(atom_c, -1), fk) - frc = ops.tensor_scatter_add(frc, np.expand_dims(atom_b, -1), -fi - fk) - - atom_energy_val = k2 * dtheta * dtheta - atom_energy = ops.tensor_scatter_add(atom_energy, np.expand_dims(atom_a, -1), atom_energy_val) - - return frc, atom_energy diff --git a/tests/st/sponge/functions/bond_energy.py b/tests/st/sponge/functions/bond_energy.py deleted file mode 100644 index 206035406be..00000000000 --- a/tests/st/sponge/functions/bond_energy.py +++ /dev/null @@ -1,63 +0,0 @@ -# Copyright 2021 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ -'''bond energy''' -import mindspore.numpy as mnp -from mindspore import ops - - -def bond_energy(atom_numbers, bond_numbers, uint_crd_f, scaler_f, atom_a, atom_b, bond_k, bond_r0): - """ - Calculate the harmonic potential energy between each bonded atom pair. - Assume our system has N atoms and M harmonic bonds. - - .. math:: - - dr = (x_1-x_2, y_1-y_2, z_1-z_2) - - .. math:: - - E = k*(|dr| - r_0)^2 - - Args: - atom_numbers (int): the number of atoms N. - bond_numbers (int): the number of harmonic bonds M. - uint_crd_f (Tensor, uint32 ) - [N, 3], the unsigned int coordinate value of each atom. - scaler_f (Tensor, float32): [3,], the 3-D scale factor (x, y, z), - between the real space float coordinates and the unsigned int coordinates. - atom_a (Tensor, int32): [M,], the first atom index of each bond. - atom_b (Tensor, int32): [M,], the second atom index of each bond. - bond_k (Tensor, float32): [M,], the force constant of each bond. - bond_r0 (Tensor, float32): [M,], the equlibrium length of each bond. - - Outputs: - bond_ene (Tensor, float32): [M,], the harmonic potential energy for each bond. - - Supported Platforms: - ``GPU`` - """ - uint_vec_a = uint_crd_f[atom_a] # (M, 3) - uint_vec_b = uint_crd_f[atom_b] # (M, 3) - - uint_vec_dr = uint_vec_a - uint_vec_b - uint_vec_dr = ops.depend(uint_vec_dr, uint_vec_dr) - - int_vec_dr = uint_vec_dr.astype('int32') - int_vec_dr = ops.depend(int_vec_dr, int_vec_dr) - - vec_dr = int_vec_dr * scaler_f - dr_dot = mnp.sum(vec_dr * vec_dr, 1) - tempf = mnp.sqrt(dr_dot) - bond_r0 - - return bond_k * tempf * tempf diff --git a/tests/st/sponge/functions/bond_force_with_atom_energy.py b/tests/st/sponge/functions/bond_force_with_atom_energy.py deleted file mode 100644 index 6aa16d4ede2..00000000000 --- a/tests/st/sponge/functions/bond_force_with_atom_energy.py +++ /dev/null @@ -1,61 +0,0 @@ -# Copyright 2021 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ -'''bond force with atom energy''' -from mindspore import numpy as np -from mindspore import ops - -from .common import get_periodic_displacement - - -def bond_force_with_atom_energy(atom_numbers, bond_numbers, uint_crd_f, scalar_f, atom_a, atom_b, bond_k, bond_r0): - """ - Calculate bond force and harmonic potential energy together. - - The calculation formula is the same as operator BondForce() and BondEnergy(). - - Args: - atom_numbers (int): the number of atoms N. - bond_numbers (int): the number of harmonic bonds M. - uint_crd_f (Tensor, uint32 ) - [N, 3], the unsigned int coordinate value of each atom. - scalar_f (Tensor, float32): [3,], the 3-D scale factor (x, y, z), - between the real space float coordinates and the unsigned int coordinates. - atom_a (Tensor, int32): [M,], the first atom index of each bond. - atom_b (Tensor, int32): [M,], the second atom index of each bond. - bond_k (Tensor, float32): [M,], the force constant of each bond. - bond_r0 (Tensor, float32): [M,], the equlibrium length of each bond. - - Outputs: - frc_f (Tensor, float32): [N, 3], same as operator BondForce(). - atom_energy (Tensor, float32): [N,], same as atom_ene in operator BondAtomEnergy(). - - Supported Platforms: - ``GPU`` - """ - frc = np.zeros(uint_crd_f.shape, np.float32) - atom_energy = np.zeros(atom_numbers) - - dr = get_periodic_displacement(uint_crd_f[atom_a], uint_crd_f[atom_b], scalar_f) - abs_r = np.norm(dr, axis=-1) - r_1 = 1. / abs_r - tempf = abs_r - bond_r0 - f = 2 * np.expand_dims(tempf * r_1 * bond_k, -1) * dr - - frc = ops.tensor_scatter_add(frc, np.expand_dims(atom_a, -1), -f) - frc = ops.tensor_scatter_add(frc, np.expand_dims(atom_b, -1), f) - - atom_energy_val = bond_k * tempf * tempf - atom_energy = ops.tensor_scatter_add(atom_energy, np.expand_dims(atom_a, -1), atom_energy_val) - - return frc, atom_energy diff --git a/tests/st/sponge/functions/bond_force_with_atom_energy_with_virial.py b/tests/st/sponge/functions/bond_force_with_atom_energy_with_virial.py deleted file mode 100644 index e20f9fece6b..00000000000 --- a/tests/st/sponge/functions/bond_force_with_atom_energy_with_virial.py +++ /dev/null @@ -1,64 +0,0 @@ -# Copyright 2021 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ -'''bond force with atom energy with virial''' - -import mindspore.numpy as np -import mindspore.ops as ops -from .common import get_periodic_displacement - - -def bond_force_with_atom_energy_with_virial(atom_numbers, uint_crd_f, scalar_f, atom_a, atom_b, bond_k, - bond_r0): - """ - Calculate bond force and harmonic potential energy together. - - The calculation formula is the same as operator BondForce() and BondEnergy(). - - Args: - atom_numbers (int): the number of atoms N. - uint_crd_f (Tensor, uint32 ) - [N, 3], the unsigned int coordinate value of each atom. - scalar_f (Tensor, float32): [3,], the 3-D scale factor (x, y, z), - between the real space float coordinates and the unsigned int coordinates. - atom_a (Tensor, int32): [M,], the first atom index of each bond. - atom_b (Tensor, int32): [M,], the second atom index of each bond. - bond_k (Tensor, float32): [M,], the force constant of each bond. - bond_r0 (Tensor, float32): [M,], the equlibrium length of each bond. - - Outputs: - frc_f (Tensor, float32): [N, 3], same as operator BondForce(). - atom_energy (Tensor, float32): [N,], same as atom_ene in operator BondAtomEnergy(). - atom_virial (Tensor, float32): [N,], the virial of each atom - - Supported Platforms: - ``GPU`` - """ - frc = np.zeros(uint_crd_f.shape, np.float32) - atom_energy = np.zeros(atom_numbers) - atom_virial = np.zeros(atom_numbers) - - dr = get_periodic_displacement(uint_crd_f[atom_a], uint_crd_f[atom_b], scalar_f) - abs_r = np.norm(dr, axis=-1) - r_1 = 1. / abs_r - tempf = abs_r - bond_r0 - f = 2 * np.expand_dims(tempf * r_1 * bond_k, -1) * dr - - frc = ops.tensor_scatter_add(frc, np.expand_dims(atom_a, -1), -f) - frc = ops.tensor_scatter_add(frc, np.expand_dims(atom_b, -1), f) - - atom_energy_val = bond_k * tempf * tempf - atom_virial_val = -2 * tempf * bond_k * abs_r - atom_energy = ops.tensor_scatter_add(atom_energy, np.expand_dims(atom_a, -1), atom_energy_val) - atom_virial = ops.tensor_scatter_add(atom_virial, np.expand_dims(atom_a, -1), atom_virial_val) - return frc, atom_energy, atom_virial diff --git a/tests/st/sponge/functions/common.py b/tests/st/sponge/functions/common.py deleted file mode 100644 index 2adb194637f..00000000000 --- a/tests/st/sponge/functions/common.py +++ /dev/null @@ -1,198 +0,0 @@ -# Copyright 2021 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ -'''common''' -import math - -import numpy as onp - -from mindspore import ops -from mindspore.ops import constexpr -import mindspore.numpy as np - -PI = 3.1415926 - - -@constexpr -def get_excluded_index(atom_numbers, excluded_list_size): - return np.tile(np.arange(excluded_list_size), (atom_numbers, 1)) - - -@constexpr -def get_neighbour_index(atom_numbers, neighbour_list_size): - return np.tile(np.arange(neighbour_list_size), (atom_numbers, 1)) - - -@constexpr -def get_atom_list_tensor(atom_numbers): - return np.arange(atom_numbers).reshape(-1, 1) - - -@constexpr -def get_zero_tensor(shape, dtype=np.float32): - return np.zeros(shape, dtype) - - -@constexpr -def get_full_tensor(shape, fill_value, dtype=np.float32): - return np.full(shape, fill_value, dtype) - - -@constexpr -def get_range_tensor(stop): - return np.arange(stop) - - -def get_csr_excluded_index(ele_list, start, num): - '''get csr excluded index''' - row_idx = [0] - col_idx = [] - value = [] - for s, n in zip(start, num): - if n > 0: - col_idx += list(range(n)) - row_idx.append(len(col_idx)) - value += ele_list[s : s + n].tolist() - else: - row_idx.append(len(col_idx)) - return row_idx, col_idx, value - - -def broadcast_with_excluded_list(excluded_indptr): - atom_excluded_index = [] - for i, v in enumerate(excluded_indptr[1:]): - atom_excluded_index += [i] * (v - excluded_indptr[i]) - return np.array(atom_excluded_index) - - -def get_periodic_displacement(x, y, scaler): - int_dr = x - y - int_dr = int_dr.astype('int32') - int_dr = ops.depend(int_dr, int_dr) - return int_dr * scaler - - -def get_csr_residual_index(atom_numbers, start, end): - """Re-format residual list""" - indptr = onp.append(start, end[-1]).astype("int32") - indices = onp.arange(atom_numbers).astype("int32") - data = onp.ones(atom_numbers, dtype="float32") - return indptr, indices, data - - -def cucdiv(real1, imag1, real2, imag2): - """ - Compute the quotient of the given complex numbers. - Assuming that `x` is the complex number composed of - the real part `real1` and imaginary part `imag1`, - while `y` consists of `real2` and `imag2`, - the result is computed by the equation `x / y`. - """ - divisor = real2 * real2 + imag2 * imag2 - res_real = (real1 * real2 + imag1 * imag2) / divisor - res_imag = (imag1 * real2 - real1 * imag2) / divisor - return res_real, res_imag - - -def expc(real, imag): - t = math.exp(real) - res_real = math.cos(imag) * t - res_imag = math.sin(imag) * t - return res_real, res_imag - - -def m_(u, n): - if n == 2: - if u > 2 or u < 0: - return 0 - return 1 - abs(u - 1) - return u / (n - 1) * m_(u, n - 1) + (n - u) / (n - 1) * m_(u - 1, n - 1) - - -def getb(k, nfft, b_order): - '''getb''' - tempc2_real = 0. - tempc2_imag = 0. - tempc_real = 0. - tempc_imag = 2 * (b_order - 1) * PI * k / nfft - res_real, res_imag = expc(tempc_real, tempc_imag) - for kk in range(b_order - 1): - tempc_real = 0 - tempc_imag = 2 * PI * k / nfft * kk - tempc_real, tempc_imag = expc(tempc_real, tempc_imag) - tempf = m_(kk + 1, b_order) - tempc2_real += tempf * tempc_real - tempc2_imag += tempf * tempc_imag - res_real, res_imag = cucdiv(res_real, res_imag, tempc2_real, tempc2_imag) - return res_real * res_real + res_imag * res_imag - - -def reform_residual_list(atom_numbers, start, end): - """Re-format residual list""" - idx = onp.arange(atom_numbers).reshape(1, -1) - start = start.reshape(-1, 1) - end = end.reshape(-1, 1) - mask = onp.logical_and(idx >= start, idx < end).astype('int32') - return mask - - -def reform_excluded_list(excluded_list, excluded_list_start, exlcuded_list_number): - """Re-format excluded list: (E,) -> (N, MAX_NEIGHBOUR)""" - atom_numbers = excluded_list_start.shape[0] - max_neighbour = exlcuded_list_number.max() - excluded_list_end = excluded_list_start + exlcuded_list_number - excluded_matrix = onp.full((atom_numbers, max_neighbour), -1, onp.int32) - for i in range(atom_numbers): - excluded_matrix[i, :exlcuded_list_number[i]] = excluded_list[excluded_list_start[i]:excluded_list_end[i]] - return excluded_matrix - - -def get_pme_bc(fftx, ffty, fftz, box, beta): - '''get pme_bc''' - z_range = fftz // 2 + 1 - - b1 = list(map(lambda i: getb(i, fftx, 4), range(fftx))) - b2 = list(map(lambda i: getb(i, ffty, 4), range(ffty))) - b3 = list(map(lambda i: getb(i, fftz, 4), range(z_range))) - - mprefactor = PI * PI / -beta / beta - volume = box[0] * box[1] * box[2] - - kxend = int(fftx / 2) - kxrp_l = onp.arange(kxend + 1) - kxrp_r = onp.arange(fftx - kxend - 1, 0, -1) - kxrp = onp.hstack((kxrp_l, kxrp_r)).reshape(-1, 1, 1) / box[0] - - kyend = int(ffty / 2) - kyrp_l = onp.arange(kyend + 1) - kyrp_r = onp.arange(ffty - kyend - 1, 0, -1) - kyrp = onp.hstack((kyrp_l, kyrp_r)).reshape(1, -1, 1) / box[1] - - kzrp = onp.arange(z_range).reshape(1, 1, -1) / box[2] - - msq = kxrp * kxrp + kyrp * kyrp + kzrp * kzrp - b1 = onp.broadcast_to( - onp.array(b1).reshape(-1, 1, 1), msq.shape).ravel() # pylint: disable=too-many-function-args - b2 = onp.broadcast_to( - onp.array(b2).reshape(1, -1, 1), msq.shape).ravel() # pylint: disable=too-many-function-args - b3 = onp.broadcast_to( - onp.array(b3).reshape(1, 1, -1), msq.shape).ravel() # pylint: disable=too-many-function-args - msq = msq.ravel() - - pme_bc = onp.zeros_like(msq) - msq = msq[1:] - pme_bc[1:] = 1.0 / PI / msq * onp.exp(mprefactor * msq) / volume - pme_bc = pme_bc * b1 * b2 * b3 - - return pme_bc diff --git a/tests/st/sponge/functions/constrain.py b/tests/st/sponge/functions/constrain.py deleted file mode 100644 index faf0f5369d4..00000000000 --- a/tests/st/sponge/functions/constrain.py +++ /dev/null @@ -1,112 +0,0 @@ -# Copyright 2021 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ -'''constrain''' - -import mindspore.ops as ops -import mindspore.numpy as np -from mindsponge.md.functions.common import get_periodic_displacement - -int32_four = np.array(4, dtype=np.int32) - - -def refresh_uint_crd(half_exp_gamma_plus_half, crd, quarter_cof, frc, - mass_inverse): - '''refresh uint crd ''' - mass_inverse_mul = np.expand_dims(mass_inverse, -1) - crd_lin = (crd + half_exp_gamma_plus_half * frc * mass_inverse_mul).astype("float32") - tempi = (crd_lin * quarter_cof).astype("int32") - uint_crd = (tempi * int32_four).astype("uint32") - - return uint_crd - - -def constrain_force_cycle_with_virial(uint_crd, scaler, pair_dr, atom_i_serials, - atom_j_serials, constant_rs, constrain_ks, frc, virial): - '''constrain force cycle with virial''' - dr = get_periodic_displacement(uint_crd[atom_i_serials], uint_crd[atom_j_serials], scaler) - r_1 = 1. / np.norm(dr, axis=-1) - frc_abs = (1. - constant_rs * r_1) * constrain_ks - frc_lin = np.expand_dims(frc_abs, -1) * pair_dr - frc_sum = (frc_lin * pair_dr).astype("float32") - - virial -= np.sum(frc_sum, -1) - - frc = ops.tensor_scatter_add(frc, np.expand_dims(atom_j_serials, -1), frc_lin) - frc = ops.tensor_scatter_add(frc, np.expand_dims(atom_i_serials, -1), -frc_lin) - - return frc, virial - - -def constrain_force_cycle(uint_crd, scaler, pair_dr, atom_i_serials, - atom_j_serials, constant_rs, constrain_ks, frc): - '''constrain force cycle''' - dr = get_periodic_displacement(uint_crd[atom_i_serials], uint_crd[atom_j_serials], scaler) - r_1 = 1. / np.norm(dr, axis=-1) - frc_abs = (1. - constant_rs * r_1) * constrain_ks - frc_lin = np.expand_dims(frc_abs, -1) * pair_dr - - frc = ops.tensor_scatter_add(frc, np.expand_dims(atom_j_serials, -1), frc_lin) - frc = ops.tensor_scatter_add(frc, np.expand_dims(atom_i_serials, -1), -frc_lin) - - return frc - - -def constrain(atom_numbers, constrain_pair_numbers, iteration_numbers, half_exp_gamma_plus_half, - crd, quarter_cof, mass_inverse, scalar, pair_dr, atom_i_serials, atom_j_serials, constant_rs, - constrain_ks, - need_pressure): - """ - Calculate the constraint force and virial depends on pressure calculation. - - Args: - atom_numbers (int): the number of atoms N. - constrain_pair_numbers (int): the number of constrain pairs M. - iteration_numbers (int): the number of iteration numbers p. - half_exp_gamma_plus_half (float32): half exp_gamma plus half q. - crd(Tensor, float32): [N, 3], the coordinate of each atom. - quarter_cof (Tensor, float32): [3,], the coefficient mapping coordinates to uint coordinates. - mass_inverse(Tensor, float32): [N, ], the inverse value of mass of each atom. - scalar (Tensor, float32): [3,], the 3-D scale factor (x, y, z), - pair_dr(Tensor, float32): [M, 3], the displacement vector of each constrained atom pair. - atom_i_serials(Tensor, int32): [M, ], the first atom index of each constrained atom pair. - atom_j_serials(Tensor, int32): [M, ], the second atom index of each constrained atom pair. - constant_rs(Tensor, float32): [M, ], the constrained distance of each constrained atom pair. - constrain_ks(Tensor, float32): [M, ], the constrained distance of each constrained atom pair. - need_pressure(Tensor, int32), [1, ]or [], if need pressure, 1 else 0. - - Outputs: - uint_crd(Tensor, float32): [N, 3], the unsigned int coordinate value of each atom. - frc(Tensor, float32): [N, 3], the constraint force on each atom. - virial(Tensor, float32): [M, ], the constraint virial on each atom - - """ - frc = np.zeros(crd.shape, np.float32) - virial = np.zeros((constrain_pair_numbers,), np.float32) - uint_crd = np.zeros((atom_numbers, 3)).astype("uint32") - - while iteration_numbers > 0: - uint_crd = refresh_uint_crd(half_exp_gamma_plus_half, crd, quarter_cof, frc, - mass_inverse) - if need_pressure: - frc, virial = constrain_force_cycle_with_virial(uint_crd, scalar, - pair_dr, - atom_i_serials, - atom_j_serials, constant_rs, constrain_ks, frc, virial) - else: - frc = constrain_force_cycle(uint_crd, scalar, pair_dr, atom_i_serials, - atom_j_serials, constant_rs, constrain_ks, frc) - iteration_numbers -= 1 - - return uint_crd, frc, virial diff --git a/tests/st/sponge/functions/constrain_force_cycle.py b/tests/st/sponge/functions/constrain_force_cycle.py deleted file mode 100644 index 84837c4a058..00000000000 --- a/tests/st/sponge/functions/constrain_force_cycle.py +++ /dev/null @@ -1,51 +0,0 @@ -# Copyright 2021 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ -'''constrain force cycle with virial''' - -import mindspore.numpy as np -import mindspore.ops as ops -from mindsponge.md.functions.common import get_periodic_displacement - - -def constrain_force_cycle(uint_crd, scaler, pair_dr, atom_i_serials, - atom_j_serials, constant_rs, constrain_ks): - """ - Calculate the constraint force and virial in each iteration. - Args: - uint_crd(Tensor, uint32): [N, 3], the unsigned int coordinate value of each atom. - scaler(Tensor, float32): [3,], the 3-D scale factor (x, y, z). - pair_dr(Tensor, float32): [M, 3], the displacement vector of each constrained atom pair. - atom_i_serials(Tensor, int32): [M, ], the first atom index of each constrained atom pair. - atom_j_serials(Tensor, int32): [M, ], the second atom index of each constrained atom pair. - constant_rs(Tensor, float32): [M, ], the constrained distance of each constrained atom pair. - constrain_ks(Tensor, float32): [M, ], the constrained distance of each constrained atom pair. - - Returns: - test_frc(Tensor, float32): [N, 3], the constraint force. - - Supported Platforms: - ``GPU`` - """ - test_frc = np.zeros(uint_crd.shape, np.float32) - - dr = get_periodic_displacement(uint_crd[atom_i_serials], uint_crd[atom_j_serials], scaler) - r_1 = 1. / np.norm(dr, axis=-1) - frc_abs = (1. - constant_rs * r_1) * constrain_ks - frc_lin = np.expand_dims(frc_abs, -1) * pair_dr - - test_frc = ops.tensor_scatter_add(test_frc, np.expand_dims(atom_j_serials, -1), frc_lin) - test_frc = ops.tensor_scatter_add(test_frc, np.expand_dims(atom_i_serials, -1), -frc_lin) - - return test_frc diff --git a/tests/st/sponge/functions/constrain_force_cycle_with_virial.py b/tests/st/sponge/functions/constrain_force_cycle_with_virial.py deleted file mode 100644 index e78ac1ee240..00000000000 --- a/tests/st/sponge/functions/constrain_force_cycle_with_virial.py +++ /dev/null @@ -1,58 +0,0 @@ -# Copyright 2021 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ -'''constrain force cycle with virial''' - -import mindspore.numpy as np -import mindspore.ops as ops - -from .common import get_periodic_displacement - - -def constrain_force_cycle_with_virial(constrain_pair_numbers, uint_crd, scaler, pair_dr, atom_i_serials, - atom_j_serials, constant_rs, constrain_ks): - """ - Calculate the constraint force and virial in each iteration. - Args: - constrain_pair_numbers (int): the number of constrain pairs M. - uint_crd(Tensor, uint32): [N, 3], the unsigned int coordinate value of each atom. - scaler(Tensor, float32): [3,], the 3-D scale factor (x, y, z). - pair_dr(Tensor, float32): [M, 3], the displacement vector of each constrained atom pair. - atom_i_serials(Tensor, int32): [M, ], the first atom index of each constrained atom pair. - atom_j_serials(Tensor, int32): [M, ], the second atom index of each constrained atom pair. - constant_rs(Tensor, float32): [M, ], the constrained distance of each constrained atom pair. - constrain_ks(Tensor, float32): [M, ], the constrained distance of each constrained atom pair. - - Returns: - test_frc(Tensor, float32): [N, 3], the constraint force. - atom_virial(Tensor, float32): [M, ], the virial caused by constraint force of each atom. - - Supported Platforms: - ``GPU`` - """ - frc = np.zeros(uint_crd.shape, np.float32) - atom_virial = np.zeros((constrain_pair_numbers,), np.float32) - - dr = get_periodic_displacement(uint_crd[atom_i_serials], uint_crd[atom_j_serials], scaler) - r_1 = 1. / np.norm(dr, axis=-1) - frc_abs = (1. - constant_rs * r_1) * constrain_ks - frc_lin = np.expand_dims(frc_abs, -1) * pair_dr - frc_sum = frc_lin * pair_dr - - atom_virial -= np.sum(frc_sum, -1) - - frc = ops.tensor_scatter_add(frc, np.expand_dims(atom_j_serials, -1), frc_lin) - frc = ops.tensor_scatter_add(frc, np.expand_dims(atom_i_serials, -1), -frc_lin) - - return frc, atom_virial diff --git a/tests/st/sponge/functions/crd_to_uint_crd.py b/tests/st/sponge/functions/crd_to_uint_crd.py deleted file mode 100644 index 1d738437c90..00000000000 --- a/tests/st/sponge/functions/crd_to_uint_crd.py +++ /dev/null @@ -1,37 +0,0 @@ -# Copyright 2021 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ -'''crd to uint crd''' -import mindspore.numpy as np - -uint32_two = np.array(2, dtype=np.uint32) - - -def crd_to_uint_crd(crd_to_uint_crd_cof, crd): - """ - Convert FP32 coordinate to Uint32 coordinate. - - Args: - crd_to_uint_crd_cof (Tensor, float32): [3,], the coefficient mapping coordinates to uint - coordinates. - crd (Tensor, float32): [N, 3], the coordinates of each atom. - - Outputs: - uint_crd (Tensor, uint32): [N,3], the uint coordinates of each atom. - - Supported Platforms: - ``GPU`` - """ - - return (crd * crd_to_uint_crd_cof).astype(np.uint32) * uint32_two diff --git a/tests/st/sponge/functions/crd_to_uint_crd_quarter.py b/tests/st/sponge/functions/crd_to_uint_crd_quarter.py deleted file mode 100644 index 0c0b741fce8..00000000000 --- a/tests/st/sponge/functions/crd_to_uint_crd_quarter.py +++ /dev/null @@ -1,37 +0,0 @@ -# Copyright 2021 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ -'''crd to uint crd quarter''' -import mindspore.numpy as np - -int32_four = np.array(4, dtype=np.int32) - - -def crd_to_uint_crd_quarter(crd_to_uint_crd_cof, crd): - """ - Convert FP32 coordinate to Uint32 coordinate. - - Args: - crd_to_uint_crd_cof (Tensor, float32): [3,], the coefficient mapping coordinates to uint - coordinates. - crd (Tensor, float32): [N, 3], the coordinates of each atom. - - Outputs: - uint_crd (Tensor, uint32): [N,3], the uint coordinates of each atom. - - Supported Platforms: - ``GPU`` - """ - - return ((crd * crd_to_uint_crd_cof).astype(np.int32) * int32_four).astype(np.uint32) diff --git a/tests/st/sponge/functions/dihedral_14_cf_energy.py b/tests/st/sponge/functions/dihedral_14_cf_energy.py deleted file mode 100644 index 9f94df874dd..00000000000 --- a/tests/st/sponge/functions/dihedral_14_cf_energy.py +++ /dev/null @@ -1,60 +0,0 @@ -# Copyright 2021 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ -'''dihedral 14 cf energy''' -import mindspore.numpy as mnp -from .common import get_periodic_displacement - - -def nb14_cf_energy(nb14_numbers, atom_numbers, uint_crd, atom_lj_type, charge, - boxlength_f, nb14_atom_a, nb14_atom_b, cf_scale_factor): - """ - Calculate the Coulumb part of 1,4 dihedral energy correction for - each necessary dihedral terms on the corresponding atoms. - - .. math:: - - dr = (x_a-x_b, y_a-y_b, z_a-z_b) - - .. math:: - E = k*q_a*q_b/|dr| - - Args: - nb14_numbers (int): the number of necessary dihedral 1,4 terms M. - atom_numbers (int): the number of atoms N. - uint_crd (Tensor, uint32): [N, 3], the unsigned int coordinate value of each atom. - atom_lj_type (Tensor, int32): [N,], the Lennard-Jones type of each atom. - charge (Tensor, float32): [N,], the charge of each atom. - boxlength_f (Tensor, float32): [3,], the length of molecular simulation box in 3 dimensions. - nb14_atom_a (Tensor, int32): [M,], the first atom index of each dihedral 1,4 term. - nb14_atom_b (Tensor, int32): [M,], the second atom index of each dihedral 1,4 term. - cf_scale_factor (Tensor, float) - [M,], the scale factor for the - Coulomb part of force correction for each dihedral 1,4 terms. - - Outputs: - ene (Tensor, float32): [M,], the accumulated potential energy of each atom. - - Supported Platforms: - ``GPU`` - """ - r1_xyz = uint_crd[nb14_atom_a] # [uint_x, uint_y, uint_z] (M,3) - r2_xyz = uint_crd[nb14_atom_b] # [uint_x, uint_y, uint_z] (M,3) - dr_xyz = get_periodic_displacement(r2_xyz, r1_xyz, boxlength_f) - r_1 = 1. / mnp.norm(dr_xyz, axis=-1) - - r1_charge = charge[nb14_atom_a] - r2_charge = charge[nb14_atom_b] - ene_lin = r1_charge * r2_charge * r_1 - - return ene_lin * cf_scale_factor diff --git a/tests/st/sponge/functions/dihedral_14_lj_energy.py b/tests/st/sponge/functions/dihedral_14_lj_energy.py deleted file mode 100644 index a03696b5a44..00000000000 --- a/tests/st/sponge/functions/dihedral_14_lj_energy.py +++ /dev/null @@ -1,77 +0,0 @@ -# Copyright 2021 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ -'''dihedral 14 lj energy''' -import mindspore.numpy as mnp -from .common import get_periodic_displacement - - -def nb14_lj_energy(nb14_numbers, atom_numbers, uint_crd_f, lj_type, charge, - boxlength, a_14, b_14, lj_scale_factor, lj_type_a, lj_type_b): - """ - Calculate the Lennard-Jones part of 1,4 dihedral energy correction for - each necessary dihedral terms on the corresponding atoms. - - .. math:: - dr = (x_a-x_b, y_a-y_b, z_a-z-b) - .. math:: - E = k*(A/|dr|^{12} - B/|dr|^{6}) - - Args: - nb14_numbers (int): the number of necessary dihedral 1,4 terms M. - atom_numbers (int): the number of atoms N. - uint_crd_f (Tensor, uint32): [N, 3], the unsigned int coordinate value of each atom. - lj_type (Tensor, int32): [N,], the Lennard-Jones type of each atom. - charge (Tensor, float32): [N,], the charge of each atom. - boxlength (Tensor, float32): [3,], the length of molecular simulation box in 3 dimensions. - a_14 (Tensor, int32): [M,], the first atom index of each dihedral 1,4 term. - b_14 (Tensor, int32): [M,], the second atom index of each dihedral 1,4 term. - lj_scale_factor (Tensor, float32): [M,], the scale factor for the - Lennard-Jones part of force correction of each dihedral 1,4 term. - lj_type_a (Tensor, float32): [Q,], the A parameter in Lennard-Jones scheme of each atom pair type. - Q is the number of atom pair. - lj_type_b (Tensor, float32): [Q,], the B parameter in Lennard-Jones shceme of each atom pair type. - Q is the number of atom pair. - - Outputs: - ene (Tensor, float32): [M,], the Lennard-Jones potential - energy correction for each necessary dihedral 1,4 term. - - Supported Platforms: - ``GPU`` - """ - r1_xyz = uint_crd_f[a_14] # [uint_x, uint_y, uint_z] (M,3) - r2_xyz = uint_crd_f[b_14] # [uint_x, uint_y, uint_z] (M,3) - dr_xyz = get_periodic_displacement(r2_xyz, r1_xyz, boxlength) - - dr2 = dr_xyz * dr_xyz - dr_2 = 1. / mnp.sum(dr2, 1) - dr_4 = dr_2 * dr_2 - dr_6 = dr_4 * dr_2 - dr_12 = dr_6 * dr_6 # (M,3) - - r1_lj_type = lj_type[a_14] # (M,) - r2_lj_type = lj_type[b_14] # (M,) - - y = mnp.abs(r2_lj_type - r1_lj_type) # (M,) - x = r2_lj_type + r1_lj_type # (M,) - - r2_lj_type = mnp.divide(x + y, 2, dtype=mnp.int32) - x = mnp.divide(x - y, 2, dtype=mnp.int32) - atom_pair_lj_type = mnp.divide(r2_lj_type * (r2_lj_type + 1), 2, dtype=mnp.int32) + x # (M,) - - ene_lin = 0.08333333 * lj_type_a[atom_pair_lj_type] * dr_12 - \ - 0.1666666 * lj_type_b[atom_pair_lj_type] * dr_6 - - return ene_lin * lj_scale_factor diff --git a/tests/st/sponge/functions/dihedral_14_ljcf_force_with_atom_energy.py b/tests/st/sponge/functions/dihedral_14_ljcf_force_with_atom_energy.py deleted file mode 100644 index ec19e9e1f19..00000000000 --- a/tests/st/sponge/functions/dihedral_14_ljcf_force_with_atom_energy.py +++ /dev/null @@ -1,98 +0,0 @@ -# Copyright 2021 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ -'''dihedral 14 ljcf force with atom energy''' -from mindspore import numpy as np -from mindspore import ops - -from .common import get_zero_tensor, get_periodic_displacement - - -def dihedral_14_ljcf_force_with_atom_energy(atom_numbers, uint_crd, lj_type, charge, boxlength, a_14, b_14, - lj_scale_factor, cf_scale_factor, lj_type_a, lj_type_b): - """ - Calculate the Lennard-Jones and Coulumb energy correction and force correction - for each necessary dihedral 1,4 terms together and add them to the total force - and potential energy for each atom. - - The calculation formula of force correction is the same as operator - :class:`Dihedral14LJForceWithDirectCF`, and the energy correction part is the same - as operator :class:`Dihedral14LJEnergy` and :class:`Dihedral14CFEnergy`. - - Args: - atom_numbers (int): the number of atoms N. - uint_crd (Tensor, uint32): [N, 3], the unsigned int coordinate value of each atom. - lj_type (Tensor, int32): [N,], the Lennard-Jones type of each atom. - charge (Tensor, float32): [N,], the charge of each atom. - boxlength (Tensor, float32): [3,], the length of molecular simulation box in 3 dimensions. - a_14 (Tensor, int32): [M,], the first atom index of each dihedral 1,4 term. - b_14 (Tensor, int32): [M,], the second atom index of each dihedral 1,4 term. - lj_scale_factor (Tensor, float32): [M,], the scale factor for the - Lennard-Jones part of force correction of each dihedral 1,4 term. - cf_scale_factor (Tensor,float) - [M,], the scale factor for the - Coulomb part of force correction for each dihedral 1,4 terms. - lj_type_a (Tensor, float32): [Q,], the A parameter in Lennard-Jones scheme of each atom pair type. - Q is the number of atom pair. - lj_type_b (Tensor, float32): [Q,], the B parameter in Lennard-Jones shceme of each atom pair type. - Q is the number of atom pair. - - Outputs: - frc (Tensor, float32): [N, 3], the force felt by each atom. - atom_energy (Tensor, float32): [N,], the accumulated potential energy for each atom. - - Supported Platforms: - ``GPU`` - """ - r1 = uint_crd[a_14] - r2 = uint_crd[b_14] - crd_scaled = get_periodic_displacement(r2, r1, boxlength) - crd_2 = crd_scaled ** 2 - - dr2 = np.sum(crd_2, 1) - dr_2 = 1.0 / dr2 - dr_4 = dr_2 * dr_2 - dr_8 = dr_4 * dr_4 - dr_14 = dr_8 * dr_4 * dr_2 - dr_1 = dr_2 ** 0.5 - frc_cf_abs = cf_scale_factor * dr_2 * dr_1 - charge_mul = charge[a_14] * charge[b_14] - frc_cf_abs = -charge_mul * frc_cf_abs - - r1_lj_type = lj_type[a_14] - r2_lj_type = lj_type[b_14] - x = r2_lj_type + r1_lj_type - y = np.absolute(r2_lj_type - r1_lj_type) - r2_lj_type = (x + y) // 2 - x = (x - y) // 2 - atom_pair_lj_type = (r2_lj_type * (r2_lj_type + 1) // 2) + x - frc_abs = -lj_type_a[atom_pair_lj_type] * dr_14 + lj_type_b[atom_pair_lj_type] * dr_8 - frc_abs = frc_abs * lj_scale_factor - frc_abs = frc_abs + frc_cf_abs - - frc_abs_3 = np.expand_dims(frc_abs, -1) * crd_scaled - frc = get_zero_tensor((atom_numbers, 3)) - a_14_expended = np.expand_dims(a_14, -1) - b_14_expended = np.expand_dims(b_14, -1) - frc = ops.tensor_scatter_add(frc, b_14_expended, -frc_abs_3) - frc = ops.tensor_scatter_add(frc, a_14_expended, frc_abs_3) - - ene_lin = charge_mul * dr_1 - ene_lin = ene_lin * cf_scale_factor - ene_lin2 = 0.08333333 * lj_type_a[atom_pair_lj_type] * dr_4 * dr_8 - \ - 0.1666666 * lj_type_b[atom_pair_lj_type] * dr_4 * dr_2 - ene_lin2 = ene_lin2 * lj_scale_factor - ene_lin_sum = ene_lin + ene_lin2 - atom_energy = get_zero_tensor((atom_numbers,)) - atom_energy = ops.tensor_scatter_add(atom_energy, a_14_expended, ene_lin_sum) - return frc, atom_energy diff --git a/tests/st/sponge/functions/dihedral_14_ljcf_force_with_atom_energy_with_virial.py b/tests/st/sponge/functions/dihedral_14_ljcf_force_with_atom_energy_with_virial.py deleted file mode 100644 index da096b0a6d2..00000000000 --- a/tests/st/sponge/functions/dihedral_14_ljcf_force_with_atom_energy_with_virial.py +++ /dev/null @@ -1,104 +0,0 @@ -# Copyright 2021 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ -'''dihedral 14 ljcf force with atom energy with virial''' - -import mindspore.numpy as np -import mindspore.ops as ops -from .common import get_zero_tensor, get_periodic_displacement - - -def dihedral_14_ljcf_force_with_atom_energy_with_virial(atom_numbers, uint_crd, lj_type, charge, boxlength, a_14, b_14, - lj_scale_factor, cf_scale_factor, lj_type_a, lj_type_b): - """ - Calculate the Lennard-Jones and Coulumb energy correction and force correction - for each necessary dihedral 1,4 terms together and add them to the total force - and potential energy for each atom. - - The calculation formula of force correction is the same as operator - :class:`Dihedral14LJForceWithDirectCF`, and the energy correction part is the same - as operator :class:`Dihedral14LJEnergy` and :class:`Dihedral14CFEnergy`. - - Args: - atom_numbers (int): the number of atoms N. - uint_crd (Tensor, uint32): [N, 3], the unsigned int coordinate value of each atom. - lj_type (Tensor, int32): [N,], the Lennard-Jones type of each atom. - charge (Tensor, float32): [N,], the charge of each atom. - boxlength (Tensor, float32): [3,], the length of molecular simulation box in 3 dimensions. - a_14 (Tensor, int32): [M,], the first atom index of each dihedral 1,4 term. - b_14 (Tensor, int32): [M,], the second atom index of each dihedral 1,4 term. - lj_scale_factor (Tensor, float32): [M,], the scale factor for the - Lennard-Jones part of force correction of each dihedral 1,4 term. - cf_scale_factor (Tensor,float) - [M,], the scale factor for the - Coulomb part of force correction for each dihedral 1,4 terms. - lj_type_a (Tensor, float32): [Q,], the A parameter in Lennard-Jones scheme of each atom pair type. - Q is the number of atom pair. - lj_type_b (Tensor, float32): [Q,], the B parameter in Lennard-Jones shceme of each atom pair type. - Q is the number of atom pair. - - Outputs: - frc (Tensor, float32): [N, 3], the force felt by each atom. - atom_energy (Tensor, float32): [N,], the accumulated potential energy for each atom. - atom_virial (Tensor, float32): [N,], the accumulated potential virial for each atom. - - Supported Platforms: - ``GPU`` - """ - r1 = uint_crd[a_14] - r2 = uint_crd[b_14] - crd_scaled = get_periodic_displacement(r2, r1, boxlength) - crd_2 = crd_scaled ** 2 - - dr2 = np.sum(crd_2, 1) - dr_2 = 1.0 / dr2 - dr_4 = dr_2 * dr_2 - dr_8 = dr_4 * dr_4 - dr_14 = dr_8 * dr_4 * dr_2 - dr_1 = dr_2 ** 0.5 - frc_cf_abs = cf_scale_factor * dr_2 * dr_1 - charge_mul = charge[a_14] * charge[b_14] - frc_cf_abs = -charge_mul * frc_cf_abs - - r1_lj_type = lj_type[a_14] - r2_lj_type = lj_type[b_14] - x = r2_lj_type + r1_lj_type - y = np.absolute(r2_lj_type - r1_lj_type) - r2_lj_type = (x + y) // 2 - x = (x - y) // 2 - atom_pair_lj_type = (r2_lj_type * (r2_lj_type + 1) // 2) + x - frc_abs = -lj_type_a[atom_pair_lj_type] * dr_14 + lj_type_b[atom_pair_lj_type] * dr_8 - frc_abs = frc_abs * lj_scale_factor - frc_abs = frc_abs + frc_cf_abs - - frc_abs_3 = np.expand_dims(frc_abs, -1) * crd_scaled - frc = get_zero_tensor((atom_numbers, 3)) - a_14_expended = np.expand_dims(a_14, -1) - b_14_expended = np.expand_dims(b_14, -1) - frc = ops.tensor_scatter_add(frc, b_14_expended, -frc_abs_3) - frc = ops.tensor_scatter_add(frc, a_14_expended, frc_abs_3) - - ene_lin = charge_mul * dr_1 - ene_lin = ene_lin * cf_scale_factor - ene_lin2 = 0.08333333 * lj_type_a[atom_pair_lj_type] * dr_4 * dr_8 - \ - 0.1666666 * lj_type_b[atom_pair_lj_type] * dr_4 * dr_2 - ene_lin2 = ene_lin2 * lj_scale_factor - ene_lin_sum = ene_lin + ene_lin2 - atom_energy = get_zero_tensor((atom_numbers,)) - atom_virial = get_zero_tensor((atom_numbers,)) - - frc_mul = np.sum(-frc_abs_3 * crd_scaled, axis=1) - - atom_energy = ops.tensor_scatter_add(atom_energy, a_14_expended, ene_lin_sum) - atom_virial = ops.tensor_scatter_add(atom_virial, a_14_expended, frc_mul) - return frc, atom_energy, atom_virial diff --git a/tests/st/sponge/functions/dihedral_energy.py b/tests/st/sponge/functions/dihedral_energy.py deleted file mode 100644 index 5f97c93c268..00000000000 --- a/tests/st/sponge/functions/dihedral_energy.py +++ /dev/null @@ -1,78 +0,0 @@ -# Copyright 2021 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ -'''dihedral energy''' -from mindspore import numpy as np - -from .common import get_periodic_displacement - - -def dihedral_energy(dihedral_numbers, uint_crd_f, scalar_f, atom_a, atom_b, atom_c, atom_d, ipn, pk, gamc, gams, pn): - """ - Calculate the potential energy caused by dihedral terms for each 4-atom pair. - Assume our system has N atoms and M dihedral terms. - - .. math:: - - E = k(1 + cos(n*phi - phi_0)) - - Args: - dihedral_numbers (int) - the number of dihedral terms M. - uint_crd_f (Tensor, uint32): [N, 3], the unsigned int coordinates - value of each atom. - scalar_f (Tensor, float32): [3,], the 3-D scale factor between - the real space float coordinates and the unsigned int coordinates. - atom_a (Tensor, int32): [M,], the 1st atom index of each dihedral. - atom_b (Tensor, int32): [M,], the 2nd atom index of each dihedral. - atom_c (Tensor, int32): [M,], the 3rd atom index of each dihedral. - atom_d (Tensor, int32): [M,], the 4th atom index of each dihedral. - 4 atoms are connected in the form a-b-c-d. - ipn (Tensor, int32): [M,], the period of dihedral angle of each dihedral. - pk (Tensor, float32): [M,], the force constant of each dihedral. - gamc (Tensor, float32): [M,], k*cos(phi_0) of each dihedral. - gams (Tensor, float32): [M,], k*sin(phi_0) of each dihedral. - pn (Tensor, float32): [M,], the floating point form of ipn. - - Outputs: - ene (Tensor, float32): [M,], the potential energy for each - dihedral term. - - Supported Platforms: - ``GPU`` - """ - drij = get_periodic_displacement(uint_crd_f[atom_a], uint_crd_f[atom_b], scalar_f) - drkj = get_periodic_displacement(uint_crd_f[atom_c], uint_crd_f[atom_b], scalar_f) - drkl = get_periodic_displacement(uint_crd_f[atom_c], uint_crd_f[atom_d], scalar_f) - - r1 = np.cross(drij, drkj) - r2 = np.cross(drkl, drkj) - - r1_1 = 1. / np.norm(r1, axis=-1) - r2_1 = 1. / np.norm(r2, axis=-1) - r1_1_r2_1 = r1_1 * r2_1 - - phi = np.sum(r1 * r2, -1) * r1_1_r2_1 - phi = np.clip(phi, -0.999999, 0.999999) - phi = np.arccos(phi) - - sign = np.sum(np.cross(r2, r1) * drkj, -1) - phi = np.copysign(phi, sign) - - phi = np.pi - phi - nphi = pn * phi - - cos_nphi = np.cos(nphi) - sin_nphi = np.sin(nphi) - - return pk + cos_nphi * gamc + sin_nphi * gams diff --git a/tests/st/sponge/functions/dihedral_force_with_atom_energy.py b/tests/st/sponge/functions/dihedral_force_with_atom_energy.py deleted file mode 100644 index acf17f2e42b..00000000000 --- a/tests/st/sponge/functions/dihedral_force_with_atom_energy.py +++ /dev/null @@ -1,127 +0,0 @@ -# Copyright 2021 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ -'''dihedral force with atom energy''' -from mindspore import ops -import mindspore.numpy as mnp -from mindspore.ops import functional as F -from .common import PI, get_periodic_displacement - - -def dihedral_force_with_atom_energy(dihedral_numbers, uint_crd_f, scaler_f, - atom_a, atom_b, atom_c, atom_d, - ipn, pk, gamc, gams, pn): - """ - Calculate dihedral force and potential energy together. - - The calculation formula is the same as operator DihedralForce() and DihedralEnergy(). - - Args: - dihedral_numbers (int) - the number of dihedral terms M. - uint_crd_f (Tensor, uint32): [N, 3], the unsigned int coordinates - value of each atom. - scaler_f (Tensor, float32): [3,], the 3-D scale factor between - the real space float coordinates and the unsigned int coordinates. - atom_a (Tensor, int32): [M,], the 1st atom index of each dihedral. - atom_b (Tensor, int32): [M,], the 2nd atom index of each dihedral. - atom_c (Tensor, int32): [M,], the 3rd atom index of each dihedral. - atom_d (Tensor, int32): [M,], the 4th atom index of each dihedral. - 4 atoms are connected in the form a-b-c-d. - ipn (Tensor, int32): [M,], the period of dihedral angle of each dihedral. - pk (Tensor, float32): [M,], the force constant of each dihedral. - gamc (Tensor, float32): [M,], k*cos(phi_0) of each dihedral. - gams (Tensor, float32): [M,], k*sin(phi_0) of each dihedral. - pn (Tensor, float32): [M,], the floating point form of ipn. - - Outputs: - frc_f (Tensor, float32): [N, 3], same as operator DihedralForce(). - ene (Tensor, float32): [N,], same as operator DihedralAtomEnergy(). - - Supported Platforms: - ``GPU`` - """ - uint_crd_i = uint_crd_f[atom_a] # (M, 3) - uint_crd_j = uint_crd_f[atom_b] # (M, 3) - uint_crd_k = uint_crd_f[atom_c] # (M, 3) - uint_crd_l = uint_crd_f[atom_d] # (M, 3) - - drij = get_periodic_displacement(uint_crd_i, uint_crd_j, scaler_f) # (M, 3) float32 - drkj = get_periodic_displacement(uint_crd_k, uint_crd_j, scaler_f) # (M, 3) float32 - drkl = get_periodic_displacement(uint_crd_k, uint_crd_l, scaler_f) # (M, 3) float32 - - def vecmul(veca, vecb): - """ - veca - [M, 3] - vecb - [M, 3] - Returns: - Tensor - [M, 1] - """ - return mnp.sum(veca * vecb, 1, keepdims=True) - - r1 = mnp.cross(drij, drkj) # (M, 3) - r2 = mnp.cross(drkl, drkj) # (M, 3) - - r1_1 = 1. / mnp.norm(r1, axis=-1).reshape(-1, 1) - r2_1 = 1. / mnp.norm(r2, axis=-1).reshape(-1, 1) - r1_2 = r1_1 * r1_1 # (M, 1) - r2_2 = r2_1 * r2_1 # (M, 1) - r1_1_r2_1 = r1_1 * r2_1 # (M, 1) - - phi = vecmul(r1, r2) * r1_1_r2_1 # (M, 1) - phi = mnp.clip(phi, -0.999999, 0.999999) - phi = mnp.arccos(phi) # (M, 1) - - phi = PI - phi # (M, 1) - - ipn = ipn.reshape(-1, 1) - pk = pk.reshape(-1, 1) - gamc = gamc.reshape(-1, 1) - gams = gams.reshape(-1, 1) - pn = pn.reshape(-1, 1) - nphi = pn * phi # (M, 1) - - cos_phi = mnp.cos(phi) # (M, 1) - sin_phi = mnp.sin(phi) # (M, 1) - cos_nphi = mnp.cos(nphi) # (M, 1) - sin_nphi = mnp.sin(nphi) # (M, 1) - - ipn = F.select(ipn % 2 == 0, ipn * 0, ipn) - lower = gamc * (pn - ipn + ipn * cos_phi) - upper = pn * (gamc * sin_nphi - gams * cos_nphi) / sin_phi # (M, 1) - de_dphi = F.select(mnp.abs(sin_phi) < 1e-6, lower, upper) # (M, 1) - - dphi_dr1 = r1_1_r2_1 * r2 + cos_phi * r1_2 * r1 # (M, 3) - dphi_dr2 = r1_1_r2_1 * r1 + cos_phi * r2_2 * r2 # (M, 3) - - de_dri = de_dphi * mnp.cross(drkj, dphi_dr1) - de_drl = de_dphi * mnp.cross(dphi_dr2, drkj) - de_drj_part = de_dphi * (mnp.cross(drij, dphi_dr1) + (mnp.cross(drkl, dphi_dr2))) - - fi = de_dri - fj = de_drj_part - de_dri - fk = -de_drl - de_drj_part - fl = de_drl - - n = uint_crd_f.shape[0] - frc = mnp.zeros((n, 3)) - frc = ops.tensor_scatter_add(frc, mnp.expand_dims(atom_a, -1), fi) - frc = ops.tensor_scatter_add(frc, mnp.expand_dims(atom_b, -1), fj) - frc = ops.tensor_scatter_add(frc, mnp.expand_dims(atom_c, -1), fk) - frc = ops.tensor_scatter_add(frc, mnp.expand_dims(atom_d, -1), fl) - - temp = pk + cos_nphi * gamc + sin_nphi * gams - ene = mnp.zeros((n, 1)) - ene = ops.tensor_scatter_add(ene, mnp.expand_dims(atom_a, -1), temp) - - return frc, ene.ravel() diff --git a/tests/st/sponge/functions/last_crd_to_dr.py b/tests/st/sponge/functions/last_crd_to_dr.py deleted file mode 100644 index 1a826722ba6..00000000000 --- a/tests/st/sponge/functions/last_crd_to_dr.py +++ /dev/null @@ -1,48 +0,0 @@ -# Copyright 2021 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ -'''last crd to dr''' -import mindspore.numpy as np -import mindspore.ops as ops - -int32_four = np.array(4, dtype=np.int32) - - -def last_crd_to_dr(crd, quarter_cof, uint_dr_to_dr, atom_i_serials, atom_j_serials): - """ - Calculate the diplacement vector of each constrained atom pair. - - Args: - crd(Tensor, float32): [N, 3], the coordinate of each atom. - quarter_cof(Tensor, float32): [3, ], the 3-D scale factor. - uint_dr_to_dr(Tensor, float32): [3, ], the 3-D scale factor (x, y, z). - atom_i_serials(Tensor, int32): [M, ], the first atom index of each constrained atom pair. - atom_j_serials(Tensor, int32): [M, ], the second atom index of each constrained atom pair. - - Outputs: - pair_dr(Tensor, float32): [M, 3], the displacement vector of each constrained atom pair. - - Supported Platforms: - ``GPU`` - """ - tempi = (crd[atom_i_serials] * quarter_cof).astype("int32") - tempj = (crd[atom_j_serials] * quarter_cof).astype("int32") - - uint_crd_i = (tempi * int32_four).astype("uint32") - uint_crd_j = (tempj * int32_four).astype("uint32") - - int_pair_dr = (uint_crd_i - uint_crd_j).astype("int32") - int_pair_dr = ops.depend(int_pair_dr, int_pair_dr) - pair_dr = int_pair_dr * uint_dr_to_dr - return pair_dr diff --git a/tests/st/sponge/functions/lj_energy.py b/tests/st/sponge/functions/lj_energy.py deleted file mode 100644 index a78e2254fce..00000000000 --- a/tests/st/sponge/functions/lj_energy.py +++ /dev/null @@ -1,81 +0,0 @@ -# Copyright 2021 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ -'''lj energy''' -import mindspore.numpy as np -from mindspore import Tensor - -from .common import get_neighbour_index, get_periodic_displacement - -zero_tensor = Tensor(0).astype("float32") - - -def lj_energy(atom_numbers, cutoff_square, uint_crd, atom_lj_type, scaler, - nl_atom_numbers, nl_atom_serial, lj_a, lj_b): - """ - Calculate the Van der Waals interaction energy described by Lennard-Jones - potential for each atom. Assume the number of atoms is N, and the number - of Lennard-Jones types for all atoms is P, which means there will be - Q = P*(P+1)/2 types of possible Lennard-Jones interactions for all kinds - of atom pairs. - - - .. math:: - - dr = (x_a-x_b, y_a-y_b, z_a-z_b) - - .. math:: - E = A/|dr|^{12} - B/|dr|^{6} - - Agrs: - atom_numbers(int): the number of atoms, N. - cutoff_square(float): the square value of cutoff. - uint_crd (Tensor, uint32): [N, 3], the unsigned int coordinate value of each atom. - atom_lj_type (Tensor, int32): [N,], the Lennard-Jones type of each atom. - scaler (Tensor, float32): [3,], the scale factor between real - space coordinate and its unsigned int value. - nl_atom_numbers - (Tensor, int32): [N,], the each atom. - nl_atom_serial - (Tensor, int32): [N, 800], the neighbor list of each atom, the max number is 800. - lj_a (Tensor, float32): [Q,], the Lennard-Jones A coefficient of each kind of atom pair. - Q is the number of atom pair. - lj_b (Tensor, float32): [Q,], the Lennard-Jones B coefficient of each kind of atom pair. - Q is the number of atom pair. - - Outputs: - d_LJ_energy_atom (Tensor, float32): [N,], the Lennard-Jones potential energy of each atom. - d_LJ_energy_sum (float), the sum of Lennard-Jones potential energy of each atom. - - Supported Platforms: - ``GPU`` - """ - nl_atom_serial_crd = uint_crd[nl_atom_serial] - r2_lj_type = atom_lj_type[nl_atom_serial] - crd_expand = np.expand_dims(uint_crd, 1) - crd_d = get_periodic_displacement(nl_atom_serial_crd, crd_expand, scaler) - crd_2 = crd_d ** 2 - crd_2 = np.sum(crd_2, -1) - nl_atom_mask = get_neighbour_index(atom_numbers, nl_atom_serial.shape[1]) - mask = np.logical_and((crd_2 < cutoff_square), (nl_atom_mask < np.expand_dims(nl_atom_numbers, -1))) - dr_2 = 1. / crd_2 - dr_6 = np.power(dr_2, 3.) - r1_lj_type = np.expand_dims(atom_lj_type, -1) - x = r2_lj_type + r1_lj_type - y = np.absolute(r2_lj_type - r1_lj_type) - r2_lj_type = (x + y) // 2 - x = (x - y) // 2 - atom_pair_lj_type = (r2_lj_type * (r2_lj_type + 1) // 2) + x - dr_2 = (0.083333333 * lj_a[atom_pair_lj_type] * dr_6 - 0.166666666 * lj_b[atom_pair_lj_type]) * dr_6 - ene_lin = mask * dr_2 - ene_lin = np.sum(ene_lin, -1) - return ene_lin diff --git a/tests/st/sponge/functions/lj_force_pme_direct_force.py b/tests/st/sponge/functions/lj_force_pme_direct_force.py deleted file mode 100644 index 8856ec5ad72..00000000000 --- a/tests/st/sponge/functions/lj_force_pme_direct_force.py +++ /dev/null @@ -1,97 +0,0 @@ -# Copyright 2021 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ -'''lj force pme direct force''' -from mindspore import numpy as np -from mindspore import ops - -from .common import get_neighbour_index, get_periodic_displacement, get_zero_tensor - -TWO_DIVIDED_BY_SQRT_PI = 1.1283791670218446 -MAX_NUMBER_OF_NEIGHBOR = 800 - - -def lj_force_pme_direct_force(atom_numbers, cutoff, pme_beta, uint_crd, lj_type, charge, - scalar, nl_numbers, nl_serial, d_lj_a, d_lj_b): - """ - Calculate the Lennard-Jones force and PME direct force together. - - The calculation formula of Lennard-Jones part is the same as operator - ljForce(), and the PME direct part is within PME method. - - Agrs: - atom_numbers(int): the number of atoms, N. - cutoff(float): the square value of cutoff. - pme_beta(float): PME beta parameter, same as operator PMEReciprocalForce(). - uint_crd (Tensor, uint32): [N, 3], the unsigned int coordinate value of each atom. - lj_type (Tensor, int32): [N,], the Lennard-Jones type of each atom. - charge (Tensor, float32): [N,], the charge carried by each atom. - scaler (Tensor, float32): [3,], the scale factor between real - space coordinate and its unsigned int value. - nl_numbers (Tensor, int32): [N,], the each atom. - nl_serial (Tensor, int32): [N, 800], the neighbor list of each atom, the max number is 800. - d_lj_a (Tensor, float32): [Q,], the Lennard-Jones A coefficient of each kind of atom pair. - Q is the number of atom pair. - d_lj_b (Tensor, float32): [Q,], the Lennard-Jones B coefficient of each kind of atom pair. - Q is the number of atom pair. - - Outputs: - frc (Tensor, float32), [N, 3], the force felt by each atom. - - Supported Platforms: - ``GPU`` - """ - n = uint_crd.shape[0] - frc = get_zero_tensor((n, 3), np.float32) - r1 = np.tile(np.expand_dims(uint_crd, 1), (1, MAX_NUMBER_OF_NEIGHBOR, 1)) - r2 = uint_crd[nl_serial] - - dr = get_periodic_displacement(r2, r1, scalar) - dr_abs = np.norm(dr, axis=-1) - nl_atom_mask = get_neighbour_index(atom_numbers, nl_serial.shape[1]) - mask = np.logical_and((dr_abs < cutoff), (nl_atom_mask < np.expand_dims(nl_numbers, -1))) - - dr_1 = 1. / dr_abs - dr_2 = dr_1 * dr_1 - dr_4 = dr_2 * dr_2 - dr_8 = dr_4 * dr_4 - dr_6 = dr_4 * dr_2 - - r1_lj_type = np.expand_dims(lj_type, -1) - r2_lj_type = lj_type[nl_serial] - x = r2_lj_type + r1_lj_type - y = np.absolute(r2_lj_type - r1_lj_type) - r2_lj_type = (x + y) // 2 - x = (x - y) // 2 - atom_pair_lj_type = (r2_lj_type * (r2_lj_type + 1) // 2) + x - - frc_abs = (-d_lj_a[atom_pair_lj_type] * dr_6 + d_lj_b[atom_pair_lj_type]) * dr_8 - beta_dr = pme_beta * dr_abs - frc_cf_abs = beta_dr * TWO_DIVIDED_BY_SQRT_PI * np.exp(-beta_dr * beta_dr) + ops.Erfc()(beta_dr) - frc_cf_abs *= dr_2 * dr_1 - - charge1 = np.tile(np.expand_dims(charge, -1), MAX_NUMBER_OF_NEIGHBOR) - charge2 = charge[nl_serial] - frc_cf_abs *= charge1 * charge2 - - frc_abs -= frc_cf_abs - frc_lin = np.expand_dims(frc_abs, -1) * dr - # apply cutoff mask - mask = np.expand_dims(mask, -1) - frc_lin = np.where(mask, frc_lin, 0) - frc_record = np.sum(frc_lin, -2) - nl_serial = np.where(nl_atom_mask >= np.expand_dims(nl_numbers, -1), -1, nl_serial) - frc = ops.tensor_scatter_add(frc, np.expand_dims(nl_serial, -1), -frc_lin) - frc += frc_record - return frc diff --git a/tests/st/sponge/functions/lj_force_with_virial_energy.py b/tests/st/sponge/functions/lj_force_with_virial_energy.py deleted file mode 100644 index 1aafbf65e8b..00000000000 --- a/tests/st/sponge/functions/lj_force_with_virial_energy.py +++ /dev/null @@ -1,101 +0,0 @@ -# Copyright 2021 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ -'''lj force with virial energy''' -from mindspore import numpy as np -from mindspore import ops - -from .common import get_neighbour_index, get_periodic_displacement, get_zero_tensor - -TWO_DIVIDED_BY_SQRT_PI = 1.1283791670218446 -MAX_NUMBER_OF_NEIGHBOR = 800 - - -def lj_force_with_virial_energy(atom_numbers, cutoff, pme_beta, uint_crd, lj_type, charge, - scalar, nl_numbers, nl_serial, d_lj_a, d_lj_b): - """ - Calculate the Lennard-Jones force and PME direct force together. - - The calculation formula of Lennard-Jones part is the same as operator - ljForce(), and the PME direct part is within PME method. - - Agrs: - atom_numbers(int): the number of atoms, N. - cutoff(float): the square value of cutoff. - pme_beta(float): PME beta parameter, same as operator PMEReciprocalForce(). - uint_crd (Tensor, uint32): [N, 3], the unsigned int coordinate value of each atom. - lj_type (Tensor, int32): [N,], the Lennard-Jones type of each atom. - charge (Tensor, float32): [N,], the charge carried by each atom. - scaler (Tensor, float32): [3,], the scale factor between real - space coordinate and its unsigned int value. - nl_numbers (Tensor, int32): [N,], the each atom. - nl_serial (Tensor, int32): [N, 800], the neighbor list of each atom, the max number is 800. - d_lj_a (Tensor, float32): [Q,], the Lennard-Jones A coefficient of each kind of atom pair. - Q is the number of atom pair. - d_lj_b (Tensor, float32): [Q,], the Lennard-Jones B coefficient of each kind of atom pair. - Q is the number of atom pair. - - Outputs: - frc (Tensor, float32), [N, 3], the force felt by each atom. - - Supported Platforms: - ``GPU`` - """ - n = uint_crd.shape[0] - frc = get_zero_tensor((n, 3), np.float32) - r1 = np.tile(np.expand_dims(uint_crd, 1), (1, MAX_NUMBER_OF_NEIGHBOR, 1)) - nl_atom_mask = get_neighbour_index(atom_numbers, nl_serial.shape[1]) - nl_serial = np.where(nl_atom_mask >= np.expand_dims(nl_numbers, -1), -1, nl_serial) - r2 = uint_crd[nl_serial] - - dr = get_periodic_displacement(r2, r1, scalar) - dr_abs = np.norm(dr, axis=-1) - - mask = np.logical_and((dr_abs < cutoff), (nl_atom_mask < np.expand_dims(nl_numbers, -1))) - - dr_1 = 1. / dr_abs - dr_2 = dr_1 * dr_1 - dr_4 = dr_2 * dr_2 - dr_8 = dr_4 * dr_4 - dr_6 = dr_4 * dr_2 - - r1_lj_type = np.expand_dims(lj_type, -1) - r2_lj_type = lj_type[nl_serial] - x = r2_lj_type + r1_lj_type - y = np.absolute(r2_lj_type - r1_lj_type) - r2_lj_type = (x + y) // 2 - x = (x - y) // 2 - atom_pair_lj_type = (r2_lj_type * (r2_lj_type + 1) // 2) + x - - frc_abs = (-d_lj_a[atom_pair_lj_type] * dr_6 + d_lj_b[atom_pair_lj_type]) * dr_8 - beta_dr = pme_beta * dr_abs - frc_cf_abs = beta_dr * TWO_DIVIDED_BY_SQRT_PI * np.exp(-beta_dr * beta_dr) + ops.Erfc()(beta_dr) - frc_cf_abs *= dr_2 * dr_1 - - charge1 = np.tile(np.expand_dims(charge, -1), MAX_NUMBER_OF_NEIGHBOR) - charge2 = charge[nl_serial] - frc_cf_abs *= charge1 * charge2 - - energy_lin = mask * charge1 * charge2 * ops.Erfc()(beta_dr) * dr_1 - virial_lin = -1 * (mask * frc_abs * dr_abs * dr_abs) - - frc_abs -= frc_cf_abs - frc_lin = np.expand_dims(frc_abs, -1) * dr - frc_lin = np.expand_dims(mask, -1) * frc_lin - frc_record = np.sum(frc_lin, -2) - frc = ops.tensor_scatter_add(frc, np.expand_dims(nl_serial, -1), -frc_lin) - frc += frc_record - direct_cf_energy = np.sum(energy_lin, -1) - lj_virial = np.sum(virial_lin, -1) - return frc, lj_virial, direct_cf_energy diff --git a/tests/st/sponge/functions/md_iteration_gradient_descent.py b/tests/st/sponge/functions/md_iteration_gradient_descent.py deleted file mode 100644 index 17c754c80a8..00000000000 --- a/tests/st/sponge/functions/md_iteration_gradient_descent.py +++ /dev/null @@ -1,38 +0,0 @@ -# Copyright 2021 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ -'''md information gradient descent''' -import mindspore.numpy as np - - -def md_iteration_gradient_descent(atom_numbers, learning_rate, crd, frc): - """ - Update the coordinate of each atom in the direction of potential for energy minimization. - - Args: - atom_numbers (int): the number of atoms N. - learning_rate (float): the update step length. - crd (Tensor, float32): [N, 3], the coordinate of each atom. - frc (Tensor, float32): [N, 3], the force felt by each atom. - - Returns: - crd (Tensor, float32): [N, 3], the coordinate of each atom. - frc (Tensor, float32): [N, 3], the force felt by each atom. - - Supported Platforms: - ``GPU`` - """ - crd = crd + learning_rate * frc - frc = np.zeros([atom_numbers, 3]) - return crd, frc diff --git a/tests/st/sponge/functions/md_iteration_leap_frog_liujian.py b/tests/st/sponge/functions/md_iteration_leap_frog_liujian.py deleted file mode 100644 index f408a901161..00000000000 --- a/tests/st/sponge/functions/md_iteration_leap_frog_liujian.py +++ /dev/null @@ -1,66 +0,0 @@ -# Copyright 2021 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ -'''md iteration leap frog liujian''' -import mindspore.numpy as np -import mindspore.ops as ops - -from .common import get_full_tensor - -standard_normal = ops.StandardNormal() -standard_normal.add_prim_attr("use_curand", True) - - -def md_iteration_leap_frog_liujian(atom_numbers, half_dt, dt, exp_gamma, inverse_mass, - sqrt_mass_inverse, velocity, crd, frc, acc): - """ - One step of classical leap frog algorithm to solve the finite difference - Hamiltonian equations of motion for certain system, using Langevin dynamics - with Liu's thermostat scheme. Assume the number of atoms is N and the target - control temperature is T. - - Detailed iteration formula can be found in this paper: A unified thermostat - scheme for efficient configurational sampling for classical/quantum canonical - ensembles via molecular dynamics. DOI: 10.1063/1.4991621. - - Args: - atom_numbers (int): the number of atoms N. - half_dt (float): half of time step for finite difference. - dt (float): time step for finite difference. - exp_gamma (float): parameter in Liu's dynamic, equals - exp (-gamma_ln * dt), where gamma_ln is the firction factor in Langvin - dynamics. - inverse_mass (Tensor, float32): [N,], the inverse value of - mass of each atom. - sqrt_mass_inverse (Tensor, float32): [N,], the inverse square root value - of effect mass in Liu's dynamics of each atom. - velocity (Tensor, float32): [N, 3], the velocity of each atom. - crd (Tensor, float32): [N, 3], the coordinate of each atom. - frc (Tensor, float32): [N, 3], the force felt by each atom. - acc (Tensor, float32): [N, 3], the acceleration of each atom. - - Outputs: - velocity (float) - updated_crd (float) - acc (float) - - Supported Platforms: - ``GPU`` - """ - acc = np.tile(np.expand_dims(inverse_mass, 1), (1, 3)) * frc - velocity = velocity + get_full_tensor((atom_numbers, 3), dt) * acc - updated_crd = crd + get_full_tensor((atom_numbers, 3), half_dt) * velocity - velocity = exp_gamma * velocity + np.expand_dims(sqrt_mass_inverse, 1) * standard_normal((atom_numbers, 3)) - updated_crd = updated_crd + half_dt * velocity - return velocity, updated_crd, acc diff --git a/tests/st/sponge/functions/md_iteration_leap_frog_liujian_with_max_vel.py b/tests/st/sponge/functions/md_iteration_leap_frog_liujian_with_max_vel.py deleted file mode 100644 index 9e87840e0c7..00000000000 --- a/tests/st/sponge/functions/md_iteration_leap_frog_liujian_with_max_vel.py +++ /dev/null @@ -1,72 +0,0 @@ -# Copyright 2021 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ -'''md iteration leap frog liujian with max velocity limitation''' -import mindspore.numpy as np -import mindspore.ops as ops - -from .common import get_full_tensor - -standard_normal = ops.StandardNormal() -standard_normal.add_prim_attr("use_curand", True) - - -def md_iteration_leap_frog_liujian_with_max_vel(atom_numbers, half_dt, dt, exp_gamma, inverse_mass, - sqrt_mass_inverse, velocity, crd, frc, acc, max_vel): - """ - One step of classical leap frog algorithm to solve the finite difference - Hamiltonian equations of motion for certain system, using Langevin dynamics - with Liu's thermostat scheme. Assume the number of atoms is N and the target - control temperature is T. - - Detailed iteration formula can be found in this paper: A unified thermostat - scheme for efficient configurational sampling for classical/quantum canonical - ensembles via molecular dynamics. DOI: 10.1063/1.4991621. - - Args: - atom_numbers (int): the number of atoms N. - half_dt (float): half of time step for finite difference. - dt (float): time step for finite difference. - exp_gamma (float): parameter in Liu's dynamic, equals - exp (-gamma_ln * dt), where gamma_ln is the firction factor in Langvin - dynamics. - inverse_mass (Tensor, float32): [N,], the inverse value of - mass of each atom. - sqrt_mass_inverse (Tensor, float32): [N,], the inverse square root value - of effect mass in Liu's dynamics of each atom. - velocity (Tensor, float32): [N, 3], the velocity of each atom. - crd (Tensor, float32): [N, 3], the coordinate of each atom. - frc (Tensor, float32): [N, 3], the force felt by each atom. - acc (Tensor, float32): [N, 3], the acceleration of each atom. - max_vel (flaot32): the max velocity of each atom. - - Outputs: - velocity (float) - updated_crd (float) - acc (float) - frc (float) - - Supported Platforms: - ``GPU`` - """ - acc = np.tile(np.expand_dims(inverse_mass, 1), (1, 3)) * frc - velocity = velocity + get_full_tensor((atom_numbers, 3), dt) * acc - - cur_vel = np.sqrt(np.sum(velocity ** 2, -1, keepdims=True)) - velocity = np.where(cur_vel < max_vel, velocity, max_vel / cur_vel * velocity) - - updated_crd = crd + get_full_tensor((atom_numbers, 3), half_dt) * velocity - velocity = exp_gamma * velocity + np.expand_dims(sqrt_mass_inverse, 1) * standard_normal((atom_numbers, 3)) - updated_crd = updated_crd + half_dt * velocity - return velocity, updated_crd, acc diff --git a/tests/st/sponge/functions/md_temperature.py b/tests/st/sponge/functions/md_temperature.py deleted file mode 100644 index f9916328e07..00000000000 --- a/tests/st/sponge/functions/md_temperature.py +++ /dev/null @@ -1,70 +0,0 @@ -# Copyright 2021 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ -'''md temperature''' -import mindspore.numpy as np -from mindspore.ops import functional as F - - -constant_kb = np.array(0.00198716, np.float64) - - -def md_temperature_dense(mask, atom_vel_f, atom_mass): - """ - Calculate the MD Temperature without sparse - """ - residue_numbers = mask.shape[0] - res = atom_vel_f * atom_mass.reshape(1, -1, 1) - res = np.tile(res, (residue_numbers, 1, 1)) - momentum = np.sum(np.expand_dims(mask, -1) * res, 1) - res_mass = np.sum(mask * atom_mass.reshape(1, -1), -1) - ek = 2. * np.sum(momentum * momentum, -1) / res_mass * 0.5 / 3. / constant_kb / residue_numbers - return ek.astype(np.float32) - - -def md_temperature_sparse(mask, atom_vel_f, atom_mass): - """ - Calculate the MD Temperature with sparse - """ - residue_numbers = mask.shape[0] - res = atom_vel_f * atom_mass.reshape(-1, 1) - res_x, res_y, res_z = np.split(res, 3, axis=1) - momentum_x = mask * res_x.reshape(1, -1) - momentum_y = mask * res_y.reshape(1, -1) - momentum_z = mask * res_z.reshape(1, -1) - momentum_x = F.csr_reduce_sum(momentum_x, 1) - momentum_y = F.csr_reduce_sum(momentum_y, 1) - momentum_z = F.csr_reduce_sum(momentum_z, 1) - momentum = momentum_x * momentum_x + momentum_y * momentum_y + momentum_z * momentum_z - - res_mass = mask * atom_mass.reshape(1, -1) - res_mass = F.csr_reduce_sum(res_mass, 1) - n = 3. * residue_numbers - ek = momentum / res_mass / n / constant_kb - - return ek.astype(np.float32) - - -def md_temperature(mask, atom_vel_f, atom_mass, sparse=True): - """ - Calculate the MD Temperature. - - Calculate the temperature. - - Supported Platforms: - ``GPU`` - """ - if sparse: - return md_temperature_sparse(mask, atom_vel_f, atom_mass) - return md_temperature_dense(mask, atom_vel_f, atom_mass) diff --git a/tests/st/sponge/functions/neighbor_list_update.py b/tests/st/sponge/functions/neighbor_list_update.py deleted file mode 100644 index 5dee186177c..00000000000 --- a/tests/st/sponge/functions/neighbor_list_update.py +++ /dev/null @@ -1,197 +0,0 @@ -# Copyright 2021 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ -'''neighbor list update''' -from mindspore import numpy as np -from mindspore import ops, Tensor -from .common import get_periodic_displacement, get_range_tensor, get_neighbour_index -from .crd_to_uint_crd import crd_to_uint_crd - -step = Tensor(1, np.int32) - - -def not_excluded_mask(atom_numbers, excluded_list_start, excluded_list, excluded_numbers): - not_excluded = np.full((atom_numbers, atom_numbers), True, np.bool_) - for i, v in enumerate(excluded_list_start): - if excluded_numbers[i] > 0: - excluded_serial = excluded_list[ops.tensor_range(v, v + excluded_numbers[i], step)] - not_excluded[i, excluded_serial] = False - return not_excluded - - -def find_atom_neighbors(atom_numbers, uint_crd, uint_dr_to_dr_cof, cutoff_skin_square): - dr = get_periodic_displacement(uint_crd, np.expand_dims(uint_crd, -2), uint_dr_to_dr_cof) - dr2 = np.sum(dr ** 2, -1) - atom_idx = get_range_tensor(atom_numbers) - nl_mask = np.logical_and(atom_idx.reshape(-1, 1) < atom_idx, dr2 < cutoff_skin_square) - return nl_mask - - -def delete_excluded_atoms_serial_in_neighbor_list( - atom_numbers, max_neighbor_numbers, nl_mask, not_excluded): - mask = np.logical_and(nl_mask, not_excluded) - serial_idx = get_neighbour_index(atom_numbers, atom_numbers) - nl_serial = np.where(mask, serial_idx, atom_numbers) - nl_serial = np.sort(nl_serial, -1)[:, : max_neighbor_numbers] - nl_numbers = np.sum(mask, -1) - return nl_numbers, nl_serial - - -def crd_periodic_map(crd, box_length): - crd = np.where(crd < 0, crd + box_length, crd) - crd = np.where(crd > box_length, crd - box_length, crd) - return crd - - -def find_atom_in_grid_serial(grid_length_inverse, crd, grid_n, nxy, atom_in_grid_serial): - grid_idx = (crd * grid_length_inverse).astype(np.int32) - grid_idx = np.where(grid_idx < grid_n, grid_idx, 0) - atom_in_grid_serial = grid_idx[..., 2] * nxy + grid_idx[..., 1] * grid_n[0] + grid_idx[..., 0] - return atom_in_grid_serial - - -def neighbor_list_update( - grid_numbers, atom_numbers, not_first_time, nxy, excluded_atom_numbers, - cutoff_square, half_skin_square, cutoff_with_skin, half_cutoff_with_skin, cutoff_with_skin_square, - refresh_interval, cutoff, skin, max_atom_in_grid_numbers, max_neighbor_numbers, - atom_numbers_in_grid_bucket, bucket, crd, box_length, grid_n, grid_length_inverse, atom_in_grid_serial, - old_crd, crd_to_uint_crd_cof, uint_crd, gpointer, nl_atom_numbers, nl_atom_serial, uint_dr_to_dr_cof, - not_excluded, need_refresh_flag, refresh_count): - """ - Update (or construct if first time) the Verlet neighbor list for the - calculation of short-ranged force. Assume the number of atoms is n, - the number of grids divided is G, the maximum number of atoms in one - grid is m, the maximum number of atoms in single atom's neighbor list - is L, and the number of total atom in excluded list is E. - - Args: - grid_numbers (int32): the total number of grids divided. - not_first_time (int32): whether to construct the neighbor - list first time or not. - nxy (int32): the total number of grids divided in xy plane. - excluded_atom_numbers (int32): the total atom numbers in the excluded list. - cutoff (float32): the cutoff distance for short-range force calculation. Default: 10.0. - skin (float32): the overflow value of cutoff to maintain a neighbor list. Default: 2.0. - cutoff_square (float32): the suqare value of cutoff. - half_skin_square (float32): skin*skin/4, indicates the maximum - square value of the distance atom allowed to move between two updates. - cutoff_with_skin (float32): cutoff + skin, indicates the - radius of the neighbor list for each atom. - half_cutoff_with_skin (float32): cutoff_with_skin/2. - cutoff_with_skin_square (float32): the square value of cutoff_with_skin. - refresh_interval (int32): the number of iteration steps between two updates of neighbor - list. Default: 20. - max_atom_in_grid_numbers (int32): the maximum number of atoms in one grid. Default: 64. - max_neighbor_numbers (int32): The maximum number of neighbors. Default: 800. - atom_numbers_in_grid_bucket (Tensor, int32) - [G,], the number of atoms in each grid bucket. - bucket (Tensor, int32) - (Tensor,int32) - [G, m], the atom indices in each grid bucket. - crd (Tensor, float32) - [n,], the coordinates of each atom. - box_length (Tensor, float32) - [3,], the length of 3 dimensions of the simulation box. - grid_n (Tensor, int32) - [3,], the number of grids divided of 3 dimensions of the - simulation box. - grid_length_inverse (float32) - the inverse value of grid length. - atom_in_grid_serial (Tensor, int32) - [n,], the grid index for each atom. - old_crd (Tensor, float32) - [n, 3], the coordinates before update of each atom. - crd_to_uint_crd_cof (Tensor, float32) - [3,], the scale factor - between the unsigned int value and the real space coordinates. - uint_crd (Tensor, uint32) - [n, 3], the unsigned int coordinates value fo each atom. - gpointer (Tensor, int32) - [G, 125], the 125 nearest neighbor grids (including self) of each - grid. G is the number of nearest neighbor grids. - nl_atom_numbers (Tensor, int32) - [n,], the number of atoms in neighbor list of each atom. - nl_atom_serial (Tensor, int32) - [n, L], the indices of atoms in neighbor list of each atom. - uint_dr_to_dr_cof (Tensor, float32) - [3,], the scale factor between - the real space coordinates and the unsigned int value. - excluded_list_start (Tensor, int32) - [n,], the start excluded index in excluded list for - each atom. - excluded_numbers (Tensor, int32) - [n,], the number of atom excluded in excluded list for - each atom. - not_excluded (Tensor, bool) - [n, n], marking the excluded atoms for each atom, where each - element ij indicates whether atom j is not excluded for atom i. - need_refresh_flag (Tensor, int32) - [n,], whether the neighbor list of each atom need update - or not. - refresh_count (Tensor, int32) - [1,], count how many iteration steps have passed since last - update. - - Outputs: - nl_atom_numbers (Tensor, int32) - [n,], the number of atoms in neighbor list of each atom. - nl_atom_serial (Tensor, int32) - [n, L], the indices of atoms in neighbor list of each atom. - crd (Tensor, float32) - [n,], the coordinates of each atom. - old_crd (Tensor, float32) - [n, 3], the coordinates before update of each atom. - need_refresh_flag (Tensor, int32) - [n,], whether the neighbor list of each atom need update - or not. - refresh_count (Tensor, int32) - [1,], count how many iteration steps have passed since last - update. - - Supported Platforms: - ``GPU`` - """ - half_crd_to_uint_crd_cof = 0.5 * crd_to_uint_crd_cof - if not_first_time: - if refresh_interval > 0: - refresh_cond = (refresh_count % refresh_interval) == 0 - trans_vec = np.full(3, -skin, np.float32) - crd = np.where(refresh_cond, crd + trans_vec, crd) - crd = np.where(refresh_cond, crd_periodic_map(crd, box_length), crd) - crd = np.where(refresh_cond, crd - trans_vec, crd) - old_crd = np.where(refresh_cond, crd, old_crd) - - uint_crd = np.where(refresh_cond, - crd_to_uint_crd(half_crd_to_uint_crd_cof, crd).astype(np.int32), - uint_crd.astype(np.int32)).astype(np.uint32) - - nl_mask = find_atom_neighbors( - atom_numbers, uint_crd, uint_dr_to_dr_cof, cutoff_square) - - nl_atom_numbers_updated, nl_atom_serial_updated = delete_excluded_atoms_serial_in_neighbor_list( - atom_numbers, max_neighbor_numbers, nl_mask, not_excluded) - nl_atom_numbers = np.where(refresh_cond, nl_atom_numbers_updated, nl_atom_numbers) - nl_atom_serial = np.where(refresh_cond, nl_atom_serial_updated, nl_atom_serial) - - refresh_count += 1 - else: - r1 = crd - old_crd - r1_2 = np.sum(r1, -1) - if (r1_2 > half_skin_square).any(): - trans_vec = np.full(3, skin, np.float32) - crd += trans_vec - crd = crd_periodic_map(crd, box_length) - crd -= trans_vec - old_crd[...] = crd - - uint_crd = crd_to_uint_crd(half_crd_to_uint_crd_cof, crd) - - nl_mask = find_atom_neighbors( - atom_numbers, uint_crd, uint_dr_to_dr_cof, cutoff_with_skin_square) - - nl_atom_numbers, nl_atom_serial = delete_excluded_atoms_serial_in_neighbor_list( - atom_numbers, max_neighbor_numbers, nl_mask, not_excluded) - - need_refresh_flag[0] = 0 - else: - trans_vec = np.full(3, skin, np.float32) - crd = crd_periodic_map(crd, box_length) - crd += trans_vec - old_crd[...] = crd - - uint_crd = crd_to_uint_crd(half_crd_to_uint_crd_cof, crd) - - nl_mask = find_atom_neighbors( - atom_numbers, uint_crd, uint_dr_to_dr_cof, cutoff_with_skin_square) - - nl_atom_numbers, nl_atom_serial = delete_excluded_atoms_serial_in_neighbor_list( - atom_numbers, max_neighbor_numbers, nl_mask, not_excluded) - - res = (nl_atom_numbers, nl_atom_serial, crd, old_crd, need_refresh_flag, refresh_count) - - return res diff --git a/tests/st/sponge/functions/pme_common.py b/tests/st/sponge/functions/pme_common.py deleted file mode 100644 index 6a1a934acec..00000000000 --- a/tests/st/sponge/functions/pme_common.py +++ /dev/null @@ -1,171 +0,0 @@ -# Copyright 2021 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ -'''pme common''' -import mindspore.numpy as mnp -from mindspore import ops -from mindspore.ops import functional as F -from mindspore.ops import constexpr - -from .common import get_neighbour_index, get_periodic_displacement - -PERIODIC_FACTOR_INVERSE = 2.32830643e-10 -pme_ma = mnp.array([1.0 / 6.0, -0.5, 0.5, -1.0 / 6.0]) -pme_mb = mnp.array([0, 0.5, -1, 0.5]) -pme_mc = mnp.array([0, 0.5, 0, -0.5]) -pme_md = mnp.array([0, 1.0 / 6.0, 4.0 / 6.0, 1.0 / 6.0]) -fft3d = ops.FFT3D() -ifft3d = ops.IFFT3D() -real = ops.Real() -conj = ops.Conj() - - -@constexpr -def to_tensor(args): - return mnp.array(args) - - -def scale_list(element_numbers, tensor, scaler): - """Scale values in `tensor`.""" - if tensor.ndim > 0 and len(tensor) > element_numbers: - tensor = tensor[:element_numbers] - return tensor * scaler - - -# pylint: disable=too-many-function-args -def pme_a_near(uint_crd, pme_atom_near, pme_nin, periodic_factor_inverse_x, - periodic_factor_inverse_y, periodic_factor_inverse_z, atom_numbers, - fftx, ffty, fftz, pme_kxyz, pme_uxyz): - '''pme atom near''' - periodic_factor_inverse_xyz = to_tensor( - (periodic_factor_inverse_x, periodic_factor_inverse_y, periodic_factor_inverse_z)) - tempf = uint_crd.astype('float32') * periodic_factor_inverse_xyz - tempu = tempf.astype('int32') - tempu = ops.depend(tempu, tempu) - pme_frxyz = tempf - tempu - - cond = mnp.not_equal(pme_uxyz.astype(mnp.int32), tempu).any(1, True) - pme_uxyz = mnp.where(cond, tempu, pme_uxyz) - - tempu = tempu.reshape(atom_numbers, 1, 3) - kxyz = tempu - pme_kxyz.astype(mnp.int32) - kxyz_plus = kxyz + mnp.array([fftx, ffty, fftz]) - kxyz = ops.select(kxyz < 0, kxyz_plus, kxyz) - - kxyz = kxyz * to_tensor((pme_nin, fftz, 1)).reshape(1, 1, 3) - temp_near = mnp.sum(kxyz.astype(mnp.float32), -1).astype(mnp.int32) - pme_atom_near = mnp.where(cond, temp_near, pme_atom_near) - - return pme_frxyz, pme_uxyz, pme_atom_near - - -def pme_q_spread(pme_atom_near, charge, pme_frxyz, pme_q, pme_kxyz, atom_numbers): - '''pme q spread''' - pme_kxyz = pme_kxyz.astype(mnp.int32) - pme_ma_new = pme_ma[pme_kxyz] - pme_mb_new = pme_mb[pme_kxyz] - pme_mc_new = pme_mc[pme_kxyz] - pme_md_new = pme_md[pme_kxyz] - - tempf = pme_frxyz.reshape(atom_numbers, 1, 3) # (N, 1, 3) - tempf2 = tempf * tempf # (N, 1, 3) - temp_charge = charge.reshape(atom_numbers, 1) # (N, 1) - - tempf = pme_ma_new * tempf * tempf2 + pme_mb_new * tempf2 + pme_mc_new * tempf + pme_md_new # (N, 64, 3) - - tempq = temp_charge * tempf[..., 0] * tempf[..., 1] * tempf[..., 2] # (N, 64) - index = pme_atom_near.ravel() # (N * 64,) - tempq = tempq.ravel() # (N * 64,) - pme_q = ops.tensor_scatter_add(pme_q, mnp.expand_dims(index, -1), tempq) - - return pme_q - - -# pylint: disable=too-many-arguments -def pme_direct_energy(atom_numbers, nl_numbers, nl_serial, uint_crd, boxlength, charge, beta, cutoff_square): - '''pme direct energy''' - r2 = uint_crd[nl_serial] - - dr_xyz = get_periodic_displacement(r2, mnp.expand_dims(uint_crd, 1), boxlength) - dr2 = mnp.sum(dr_xyz * dr_xyz, -1) - - dr_abs = mnp.sqrt(dr2) - charge_i = charge.reshape(-1, 1) - charge_j = charge[nl_serial] - - ene_temp = charge_i * charge_j * ops.erfc(beta * dr_abs) - where_zeros = dr_abs == 0. - dr_abs[where_zeros] = 1. - ene_temp = ene_temp / dr_abs - - idx = get_neighbour_index(atom_numbers, nl_serial.shape[1]) - mask = mnp.logical_and(dr2 < cutoff_square, idx < mnp.expand_dims(nl_numbers, -1)) - - ene_lin = mnp.sum(ene_temp * mask) - return ene_lin - - -@constexpr -def get_pme_kxyz(): - k = mnp.arange(4) - x = mnp.repeat(k, 16).reshape(64, 1) - y = F.tile(mnp.repeat(k, 4), (4, 1)).reshape(64, 1) - z = F.tile(k, (16, 1)).reshape(64, 1) - pme_kxyz = mnp.column_stack((x, y, z)).astype('uint32') - return pme_kxyz - - -def pme_energy_reciprocal(pme_fq, bc): - return mnp.sum(real(conj(pme_fq) * pme_fq) * bc) - - -def pme_energy_product(tensor1, tensor2): - return mnp.sum(tensor1 * tensor2) - - -def pme_excluded_energy_correction_dense(uint_crd, scaler, charge, pme_beta, excluded_matrix): - '''pme excluded energy correction''' - mask = (excluded_matrix > -1) - excluded_crd = uint_crd[excluded_matrix] - - dr_xyz = get_periodic_displacement(excluded_crd, mnp.expand_dims(uint_crd, 1), scaler) - dr2 = mnp.sum(dr_xyz * dr_xyz, -1) - dr_abs = mnp.sqrt(dr2) - beta_dr = pme_beta * dr_abs - - excluded_charge = charge[excluded_matrix] - charge_mul = mnp.expand_dims(charge, 1) * excluded_charge - - ene_lin = charge_mul * ops.erf(beta_dr) - where_zeros = dr_abs == 0. - dr_abs[where_zeros] = 1. - ene_lin = ene_lin / dr_abs * mask - - return 0. - mnp.sum(ene_lin * mask) - - -def pme_excluded_energy_correction_sparse(uint_crd, scaler, charge, pme_beta, excluded_list, atom_excluded_index): - '''pme excluded energy correction''' - charge_mul = charge[atom_excluded_index] * charge[excluded_list] - - dr = get_periodic_displacement(uint_crd[excluded_list], uint_crd[atom_excluded_index], scaler) - dr_2 = mnp.sum(mnp.square(dr), -1) - - dr_abs = mnp.sqrt(dr_2) - beta_dr = pme_beta * dr_abs - - ene_lin = charge_mul * ops.erf(beta_dr) / dr_abs - ene = mnp.sum(ene_lin) - - return 0. - ene diff --git a/tests/st/sponge/functions/pme_energy.py b/tests/st/sponge/functions/pme_energy.py deleted file mode 100644 index 13b09da9a88..00000000000 --- a/tests/st/sponge/functions/pme_energy.py +++ /dev/null @@ -1,98 +0,0 @@ -# Copyright 2021 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ -'''pme energy''' -import mindspore.numpy as mnp -from .common import PI, get_zero_tensor, get_full_tensor -from .pme_common import scale_list, pme_a_near, pme_q_spread, pme_direct_energy, pme_energy_reciprocal, \ - pme_excluded_energy_correction_sparse, pme_excluded_energy_correction_dense, pme_energy_product,\ - get_pme_kxyz, PERIODIC_FACTOR_INVERSE, fft3d - -CUT_OFF = 10.0 - - -def pme_energy(atom_numbers, beta, fftx, ffty, fftz, pme_bc, uint_crd, charge, - nl_numbers, nl_serial, scaler, **kargs): - """ - Calculate the Coulumb energy of the system using PME method. - - .. math:: - - E = sum_{ij} q_iq_j/r_{ij} - - Args: - atom_numbers (int): the number of atoms, N. - beta (float): the PME beta parameter, determined by the - non-bond cutoff value and simulation precision tolerance. - fftx (int): the number of points for Fourier transform in dimension X. - ffty (int): the number of points for Fourier transform in dimension Y. - fftz (int): the number of points for Fourier transform in dimension Z. - uint_crd (Tensor, uint32): [N, 3], the unsigned int coordinates value of each atom. - charge (Tensor, float32): [N,], the charge carried by each atom. - nl_numbers - (Tensor, int32): [N,], the each atom. - nl_serial - (Tensor, int32): [N, 800], the neighbor list of each atom, the max number is 800. - scaler (Tensor, float32): [3,], the scale factor between real space - coordinates and its unsigned int value. - Keyword Arguments: - excluded_matrix (Tensor, int32): [N, k] containing the excluded atoms for each atom, where k - is the maximum number of excluded atoms for all atoms. - excluded_list (Tensor, int32):[E,] containing the CSR format column indices of excluded atom. - E is the number of excluded atoms. - atom_excluded_index (Tensor, int32):[E,] containing the row indices corresponding excluded_list, - i.e. each CSR format row index is replicated to map to one CSR format column index. - - Outputs: - reciprocal_ene (float) - the reciprocal term of PME energy. - self_ene (float) - the self term of PME energy. - direct_ene (float) - the direct term of PME energy. - correction_ene (float) - the correction term of PME energy. - - Supported Platforms: - ``GPU`` - """ - pme_nin = ffty * fftz - pme_nall = fftx * ffty * fftz - - pme_kxyz = get_pme_kxyz() # (64, 3) - - pme_uxyz = get_full_tensor((atom_numbers, 3), 2 ** 30, mnp.uint32) - pme_atom_near = get_zero_tensor((atom_numbers, 64), mnp.int32) - pme_frxyz, pme_uxyz, pme_atom_near = pme_a_near(uint_crd, pme_atom_near, pme_nin, - PERIODIC_FACTOR_INVERSE * fftx, - PERIODIC_FACTOR_INVERSE * ffty, - PERIODIC_FACTOR_INVERSE * fftz, atom_numbers, - fftx, ffty, fftz, pme_kxyz, pme_uxyz) - - pme_q = get_full_tensor(pme_nall, 0, mnp.float32) - pme_q = pme_q_spread(pme_atom_near, charge, pme_frxyz, pme_q, pme_kxyz, atom_numbers) - - pme_q = pme_q.reshape(fftx, ffty, fftz).astype('float32') - pme_fq = fft3d(pme_q) - - reciprocal_ene = pme_energy_reciprocal(pme_fq, pme_bc.reshape((fftx, ffty, fftz // 2 + 1))) - - self_ene = pme_energy_product(charge, charge) - self_ene = scale_list(1, self_ene, -beta / mnp.sqrt(PI)) - - direct_ene = pme_direct_energy(atom_numbers, nl_numbers, nl_serial, uint_crd, scaler, charge, beta, - CUT_OFF * CUT_OFF) - - if "excluded_matrix" in kargs.keys(): - correction_ene = pme_excluded_energy_correction_dense(uint_crd, scaler, charge, beta, kargs["excluded_matrix"]) - else: - correction_ene = pme_excluded_energy_correction_sparse(uint_crd, scaler, charge, beta, - kargs["excluded_list"], kargs["atom_excluded_index"]) - res = (mnp.atleast_1d(reciprocal_ene), mnp.atleast_1d(self_ene), \ - mnp.atleast_1d(direct_ene), mnp.atleast_1d(correction_ene)) - return res diff --git a/tests/st/sponge/functions/pme_energy_update.py b/tests/st/sponge/functions/pme_energy_update.py deleted file mode 100644 index 240c62ed0db..00000000000 --- a/tests/st/sponge/functions/pme_energy_update.py +++ /dev/null @@ -1,66 +0,0 @@ -# Copyright 2021 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ -'''pme energy update''' -import mindspore.numpy as mnp -from .pme_energy import pme_energy - - -def pme_energy_update(atom_numbers, beta, fftx, ffty, fftz, pme_bc, uint_crd, charge, - nl_numbers, nl_serial, scaler, excluded_list, atom_excluded_index, neutralizing_factor): - """ - Calculate the Coulumb energy of the system using PME method. - - .. math:: - - E = sum_{ij} q_iq_j/r_{ij} - - Args: - atom_numbers (int): the number of atoms, N. - beta (float): the PME beta parameter, determined by the - non-bond cutoff value and simulation precision tolerance. - fftx (int): the number of points for Fourier transform in dimension X. - ffty (int): the number of points for Fourier transform in dimension Y. - fftz (int): the number of points for Fourier transform in dimension Z. - uint_crd (Tensor, uint32): [N, 3], the unsigned int coordinates value of each atom. - charge (Tensor, float32): [N,], the charge carried by each atom. - nl_numbers - (Tensor, int32): [N,], the each atom. - nl_serial - (Tensor, int32): [N, 800], the neighbor list of each atom, the max number is 800. - scaler (Tensor, float32): [3,], the scale factor between real space - coordinates and its unsigned int value. - excluded_list (Tensor, int32):[E,] containing the CSR format column indices of excluded atom. - E is the number of excluded atoms. - atom_excluded_index (Tensor, int32):[E,] containing the row indices corresponding excluded_list, - i.e. each CSR format row index is replicated to map to one CSR format column index. - neutralizing_factor -(Tensor, float32): [1, ] the factor parameter to be updated in pressure calculation. - - Outputs: - reciprocal_ene (float) - the reciprocal term of PME energy. - self_ene (float) - the self term of PME energy. - direct_ene (float) - the direct term of PME energy. - correction_ene (float) - the correction term of PME energy. - - Supported Platforms: - ``GPU`` - """ - reciprocal_ene, self_ene, direct_ene, correction_ene = pme_energy(atom_numbers, beta, fftx, ffty, fftz, - pme_bc, uint_crd, charge, nl_numbers, - nl_serial, scaler, excluded_list=excluded_list, - atom_excluded_index=atom_excluded_index) - - d_beta = mnp.sum(charge, -1) - self_ene += neutralizing_factor * d_beta * d_beta - res = (mnp.atleast_1d(reciprocal_ene), mnp.atleast_1d(self_ene), \ - mnp.atleast_1d(direct_ene), mnp.atleast_1d(correction_ene)) - return res diff --git a/tests/st/sponge/functions/pme_excluded_force.py b/tests/st/sponge/functions/pme_excluded_force.py deleted file mode 100644 index 4a9a842f326..00000000000 --- a/tests/st/sponge/functions/pme_excluded_force.py +++ /dev/null @@ -1,116 +0,0 @@ -# Copyright 2021 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ -'''pme excluded force''' -import math -import mindspore.numpy as np -import mindspore.ops as ops -from mindspore.common import CSRTensor -from mindspore.ops import functional as F - -from .common import get_periodic_displacement - -sqrt_pi = 2/math.sqrt(3.141592654) -zero_tensor = np.array(0, dtype=np.float32) - - -def pme_excluded_force_sparse(atom_numbers, beta, uint_crd, scaler, charge, excluded_csr, excluded_row): - """ - Calculate the excluded part of long-range Coulumb force using - PME(Particle Meshed Ewald) method - """ - excluded_crd = uint_crd[excluded_csr.values] - excluded_crd_row = uint_crd[excluded_row] - crd_d = get_periodic_displacement(excluded_crd, excluded_crd_row, scaler) - crd_2 = crd_d ** 2 - crd_sum = np.sum(crd_2, -1) - crd_abs = np.sqrt(crd_sum) - crd_beta = crd_abs * beta - frc_abs = crd_beta * sqrt_pi * np.exp(-crd_beta ** 2) + ops.erfc(crd_beta) - frc_abs = (frc_abs - 1.) / crd_sum / crd_abs - excluded_charge = charge[excluded_csr.values] - excluded_charge_row = charge[excluded_row] - charge_mul = excluded_charge * excluded_charge_row - frc_abs = -charge_mul * frc_abs - frc_lin = np.expand_dims(frc_abs, -1) * crd_d - indptr = excluded_csr.indptr.astype(np.int32) - indices = excluded_csr.indices.astype(np.int32) - shape = (atom_numbers, excluded_row.shape[0]) - x, y, z = np.split(frc_lin, 3, -1) - frc_lin_x = F.csr_reduce_sum(CSRTensor(indptr, indices, x.ravel(), shape), 1) - frc_lin_y = F.csr_reduce_sum(CSRTensor(indptr, indices, y.ravel(), shape), 1) - frc_lin_z = F.csr_reduce_sum(CSRTensor(indptr, indices, z.ravel(), shape), 1) - frc_outer = np.concatenate((frc_lin_x, frc_lin_y, frc_lin_z), -1) - res = ops.tensor_scatter_add(frc_outer, excluded_csr.values.reshape(-1, 1), -frc_lin) - return res - - -def pme_excluded_force_dense(beta, uint_crd, scaler, charge, excluded_matrix): - """ - Calculate the excluded part of long-range Coulumb force using - PME(Particle Meshed Ewald) method - """ - mask = (excluded_matrix > -1) - excluded_crd = uint_crd[excluded_matrix] - crd_d = get_periodic_displacement(excluded_crd, np.expand_dims(uint_crd, 1), scaler) - crd_2 = crd_d ** 2 - crd_sum = np.sum(crd_2, -1) - crd_abs = np.sqrt(crd_sum) - crd_beta = crd_abs * beta - frc_abs = crd_beta * sqrt_pi * np.exp(-crd_beta ** 2) + ops.erfc(crd_beta) - frc_abs = (frc_abs - 1.) / crd_sum / crd_abs - frc_abs = np.where(mask, frc_abs, zero_tensor) - excluded_charge = charge[excluded_matrix] - charge_mul = np.expand_dims(charge, 1) * excluded_charge - frc_abs = -charge_mul * frc_abs - frc_lin = np.expand_dims(frc_abs, 2) * crd_d - frc_outer = np.sum(frc_lin, axis=1) - frc_inner = -frc_lin.reshape(-1, 3) - excluded_list = excluded_matrix.reshape(-1, 1) - res = ops.tensor_scatter_add(frc_outer, excluded_list, frc_inner) - return res - - -def pme_excluded_force(atom_numbers, beta, uint_crd, scaler, charge, **kargs): - """ - Calculate the excluded part of long-range Coulumb force using - PME(Particle Meshed Ewald) method. Assume the number of atoms is - N, and the length of excluded list is E. - - Args: - atom_numbers (int): the number of atoms, N. - beta (float): the PME beta parameter, determined by the - non-bond cutoff value and simulation precision tolerance. - uint_crd (Tensor, uint32): [N, 3], the unsigned int coordinates value of each atom. - scaler (Tensor, float32): [3,], the scale factor between real space - coordinates and its unsigned int value. - charge (Tensor, float32): [N,], the charge carried by each atom. - Keyword Arguments: - excluded_matrix (Tensor, int32): [N, k] containing the excluded atoms for each atom, where k - is the maximum number of excluded atoms for all atoms. - excluded_csr: (CSRTensor, int32): [N, E] containing the excluded atoms for each atom, has the - same meaning as excluded_matrix, but in sparse csr format. - excluded_row: (Tensor, int32): [E,] contains the row indices for each element in the excluded - atoms list (i.e., excluded_csr.values). - - Outputs: - force (Tensor, float32): [N, 3], the force felt by each atom. - - Supported Platforms: - ``GPU`` - """ - if "excluded_matrix" in kargs.keys(): - return pme_excluded_force_dense(beta, uint_crd, scaler, charge, kargs["excluded_matrix"]) - return pme_excluded_force_sparse(atom_numbers, beta, uint_crd, scaler,\ - charge, kargs["excluded_csr"], kargs["excluded_row"]) diff --git a/tests/st/sponge/functions/pme_reciprocal_force.py b/tests/st/sponge/functions/pme_reciprocal_force.py deleted file mode 100644 index 4191c78ba2a..00000000000 --- a/tests/st/sponge/functions/pme_reciprocal_force.py +++ /dev/null @@ -1,102 +0,0 @@ -# Copyright 2021 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ -'''pme reciprocal force''' -from mindspore import numpy as np -from mindspore import dtype as mstype -from .common import get_full_tensor -from .pme_common import get_pme_kxyz, pme_a_near, pme_q_spread, to_tensor, \ -PERIODIC_FACTOR_INVERSE, pme_ma, pme_mb, pme_mc, pme_md, fft3d, ifft3d - - -MAXINT = 1073741824 -pme_dma = np.array([0.5, -1.5, 1.5, -0.5], np.float32) -pme_dmb = np.array([0, 1, -2, 1], np.float32) -pme_dmc = np.array([0, 0.5, 0, -0.5], np.float32) - - -def pme_final(pme_atom_near, charge, pme_q, pme_frxyz, pme_kxyz, pme_inverse_box_vector): - '''pme final''' - dqf = -pme_q[pme_atom_near] * np.expand_dims(charge, -1) # N * 64 - - fxyz = np.expand_dims(pme_frxyz, -2) - fxyz_2 = fxyz ** 2 - - # N * 64 * 3 - pme_kxyz = pme_kxyz.astype(np.int32) - xyz = (pme_ma[pme_kxyz] * fxyz * fxyz_2 + pme_mb[pme_kxyz] * fxyz_2 + - pme_mc[pme_kxyz] * fxyz + pme_md[pme_kxyz]) - dxyz = pme_dma[pme_kxyz] * fxyz_2 + pme_dmb[pme_kxyz] * fxyz + pme_dmc[pme_kxyz] - qxyz = dxyz * pme_inverse_box_vector - - x, y, z = xyz[..., 0], xyz[..., 1], xyz[..., 2] - qx, qy, qz = qxyz[..., 0], qxyz[..., 1], qxyz[..., 2] - - qx *= y * z * dqf - qy *= x * z * dqf - qz *= x * y * dqf - - force = np.stack((qx, qy, qz), axis=-1) - return np.sum(force, axis=1) - - -def pme_reciprocal_force(atom_numbers, fftx, ffty, fftz, box_length_0, box_length_1, - box_length_2, pme_bc, uint_crd, charge): - """ - Calculate the reciprocal part of long-range Coulumb force using - PME(Particle Meshed Ewald) method. Assume the number of atoms is N. - - The detailed calculation formula of PME(Particle Meshed Ewald) method - can be found in this paper: A Smooth Particle Mesh Ewald Method. DOI: - 10.1063/1.470117. - - Args: - atom_numbers (int): the number of atoms, N. - fftx (int): the number of points for Fourier transform in dimension X. - ffty (int): the number of points for Fourier transform in dimension Y. - fftz (int): the number of points for Fourier transform in dimension Z. - box_length_0 (float): the value of boxlength idx 0 - box_length_1 (float): the value of boxlength idx 1 - box_length_2 (float): the value of boxlength idx 2 - uint_crd (Tensor, uint32): [N, 3], the unsigned int coordinates value of each atom. - charge (Tensor, float32): [N,], the charge carried by each atom. - - Outputs: - force (Tensor, float32): [N, 3], the force felt by each atom. - - Supported Platforms: - ``GPU`` - """ - pme_nall = fftx * ffty * fftz - pme_nin = ffty * fftz - pme_atom_near = get_full_tensor((atom_numbers, 64), 0, np.int32) - pme_uxyz = get_full_tensor((atom_numbers, 3), MAXINT, np.uint32) - pme_kxyz = get_pme_kxyz() - pme_inverse_box_vector = to_tensor((fftx / box_length_0, ffty / box_length_1, fftz / box_length_2)) - - pme_frxyz, pme_uxyz, pme_atom_near = pme_a_near( - uint_crd, pme_atom_near, pme_nin, PERIODIC_FACTOR_INVERSE * fftx, PERIODIC_FACTOR_INVERSE * ffty, - PERIODIC_FACTOR_INVERSE * fftz, atom_numbers, fftx, ffty, fftz, pme_kxyz, pme_uxyz) - - pme_q = get_full_tensor(pme_nall, 0, np.float32) - pme_q = pme_q_spread(pme_atom_near, charge, pme_frxyz, pme_q, pme_kxyz, atom_numbers) - - pme_q = pme_q.reshape(fftx, ffty, fftz) - pme_bc = pme_bc.reshape(fftx, ffty, fftz // 2 + 1) - pme_fq = fft3d(pme_q) - pme_fq *= pme_bc.astype(mstype.complex64) - pme_q = ifft3d(pme_fq) - pme_q = pme_q.ravel() - - return pme_final(pme_atom_near, charge, pme_q, pme_frxyz, pme_kxyz, pme_inverse_box_vector) diff --git a/tests/st/sponge/functions/refresh_boxmap_times.py b/tests/st/sponge/functions/refresh_boxmap_times.py deleted file mode 100644 index eebb38ae48b..00000000000 --- a/tests/st/sponge/functions/refresh_boxmap_times.py +++ /dev/null @@ -1,40 +0,0 @@ -# Copyright 2021 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ -'''refresh boxmap times''' - -import mindspore.numpy as np - - -def refresh_boxmap_times(crd, old_crd, box_length_inverse, box_map_times): - """ - Refresh the box-crossing times of each atom. - Args: - atom_numbers (int): the number of atoms N. - crd (Tensor, float32): [N, 3], the coordinate of each atom. - old_crd (Tensor, float32): [N, 3], the coordinate of each atom at last update. - box_length_inverse (Tensor, float32): [3,], the inverse value of box length in 3 dimensions. - box_map_times (Tensor, int32): [N, 3], the number of times each atom has crossed the box - - Outputs: - box_map_times(Tensor, int32): [N, 3], the number of times each atom has crossed the box after updating. - old_crd (Tensor, float32): [N, 3], the coordinate of each atom at last update. - - Supported Platforms: - ``GPU`` - """ - box_map_times += np.floor((old_crd - crd) * box_length_inverse + 0.5).astype("int32") - old_crd = crd - - return box_map_times, old_crd diff --git a/tests/st/sponge/functions/refresh_crd_vel.py b/tests/st/sponge/functions/refresh_crd_vel.py deleted file mode 100644 index 6f4331432ef..00000000000 --- a/tests/st/sponge/functions/refresh_crd_vel.py +++ /dev/null @@ -1,45 +0,0 @@ -# Copyright 2021 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ -'''refresh crd vel''' - -import mindspore.numpy as np - - -def refresh_crd_vel(dt_inverse, exp_gamma, half_exp_gamma_plus_half, crd, vel, test_frc, - mass_inverse): - """ - Refresh the coordinate and velocity of each constrained atom after all iterations have ended. - - Args: - dt_inverse (float32): the inverse value of simulation time step. - exp_gamma (float32): constant value exp(gamma * dt). - half_exp_gamma_plus_half (float32): constant value 1.0 + exp(gamma)/2 - crd (Tensor, float32): [N, 3], the coordinate of each atom. - vel (Tensor, float32): [N, 3], the velocity of each atom. - test_frc(Tensor, float32): [N, 3], the constraint force. - mass_inverse(Tensor, float32): [N, ], the inverse value of mass of each atom. - - Returns: - crd_lin (Tensor, float32): [N, 3], the coordinate of each atom after updating. - vel_lin (Tensor, float32): [N, 3], the velocity of each atom after updating. - - Supported Platforms: - ``GPU`` - """ - frc_lin = test_frc * np.expand_dims(mass_inverse, -1) - crd_lin = crd + half_exp_gamma_plus_half * frc_lin - vel_lin = vel + exp_gamma * frc_lin * dt_inverse - - return crd_lin, vel_lin diff --git a/tests/st/sponge/functions/refresh_uint_crd.py b/tests/st/sponge/functions/refresh_uint_crd.py deleted file mode 100644 index 418a55cd906..00000000000 --- a/tests/st/sponge/functions/refresh_uint_crd.py +++ /dev/null @@ -1,47 +0,0 @@ -# Copyright 2021 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ -'''refresh uint crd''' - -import mindspore.numpy as np - -int32_four = np.array(4, dtype=np.int32) - - -def refresh_uint_crd(half_exp_gamma_plus_half, crd, quarter_cof, test_frc, - mass_inverse): - """ - Refresh the unsigned coordinate of each constrained atom in each constrain iteration. - - Args: - half_exp_gamma_plus_half (float32): constant value (1.0 + exp(gamma * dt)) if Langvin-Liu thermostat is used, - where gamma is friction coefficient and dt is the simulation time step, 1.0 otherwise. - crd(Tensor, float32): [N, 3], the coordinate of each atom. - quarter_cof (Tensor, float32): [3,], the coefficient mapping coordinates to uint coordinates. - test_frc(Tensor, float32): [N, 3], the constraint force. - mass_inverse(Tensor, float32): [N, ], the inverse value of mass of each atom. - - Outputs: - uint_crd(Tensor, uint32): [N, 3], the unsigned int coordinate value of each atom. - - Supported Platforms: - ``GPU`` - """ - - mass_inverse_mul = np.expand_dims(mass_inverse, -1) - crd_lin = (crd + half_exp_gamma_plus_half * test_frc * mass_inverse_mul).astype("float32") - tempi = (crd_lin * quarter_cof).astype("int32") - uint_crd = (tempi * int32_four).astype("uint32") - - return uint_crd diff --git a/tests/st/sponge/simulation_np.py b/tests/st/sponge/simulation_np.py deleted file mode 100644 index 0547b12e65b..00000000000 --- a/tests/st/sponge/simulation_np.py +++ /dev/null @@ -1,404 +0,0 @@ -# Copyright 2021 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ -'''Simulation''' -import numpy as np - -import mindspore.common.dtype as mstype -from mindspore import Tensor -from mindspore import nn -from mindspore.common.parameter import Parameter -from mindspore.ops import functional as F -from mindspore.ops import operations as P -from tests.st.sponge.sponge_cuda.angle import Angle -from tests.st.sponge.sponge_cuda.bond import Bond -from tests.st.sponge.sponge_cuda.dihedral import Dihedral -from tests.st.sponge.sponge_cuda.langevin_liujian_md import LangevinLiujian -from tests.st.sponge.sponge_cuda.lennard_jones import LennardJonesInformation -from tests.st.sponge.sponge_cuda.md_information import MdInformation -from tests.st.sponge.sponge_cuda.nb14 import NonBond14 -from tests.st.sponge.sponge_cuda.neighbor_list import NeighborList -from tests.st.sponge.sponge_cuda.particle_mesh_ewald import ParticleMeshEwald -from tests.st.sponge.functions.dihedral_14_lj_energy import nb14_lj_energy -from tests.st.sponge.functions.dihedral_14_cf_energy import nb14_cf_energy -from tests.st.sponge.functions.common import reform_excluded_list, reform_residual_list, get_pme_bc -from tests.st.sponge.functions.angle_energy import angle_energy -from tests.st.sponge.functions.angle_force_with_atom_energy import angle_force_with_atom_energy -from tests.st.sponge.functions.bond_force_with_atom_energy import bond_force_with_atom_energy -from tests.st.sponge.functions.crd_to_uint_crd import crd_to_uint_crd -from tests.st.sponge.functions.dihedral_14_ljcf_force_with_atom_energy import dihedral_14_ljcf_force_with_atom_energy -from tests.st.sponge.functions.lj_energy import lj_energy -from tests.st.sponge.functions.md_iteration_leap_frog_liujian import md_iteration_leap_frog_liujian -from tests.st.sponge.functions.pme_excluded_force import pme_excluded_force -from tests.st.sponge.functions.lj_force_pme_direct_force import lj_force_pme_direct_force -from tests.st.sponge.functions.dihedral_force_with_atom_energy import dihedral_force_with_atom_energy -from tests.st.sponge.functions.bond_energy import bond_energy -from tests.st.sponge.functions.md_temperature import md_temperature -from tests.st.sponge.functions.dihedral_energy import dihedral_energy -from tests.st.sponge.functions.pme_energy import pme_energy -from tests.st.sponge.functions.pme_reciprocal_force import pme_reciprocal_force - - -class Controller: - '''controller''' - - def __init__(self): - self.input_file = "./data/NVT_290_10ns.in" - self.amber_parm = "./data/WATER_ALA.parm7" - self.initial_coordinates_file = "./data/WATER_ALA_350_cool_290.rst7" - self.command_set = {} - self.md_task = None - self.commands_from_in_file() - - def commands_from_in_file(self): - '''command from in file''' - file = open(self.input_file, 'r') - context = file.readlines() - file.close() - self.md_task = context[0].strip() - for val in context: - if "=" in val: - assert len(val.strip().split("=")) == 2 - flag, value = val.strip().split("=") - value = value.replace(",", '') - flag = flag.replace(" ", "") - if flag not in self.command_set: - self.command_set[flag] = value - else: - print("ERROR COMMAND FILE") - - -class Simulation(nn.Cell): - '''simulation''' - - def __init__(self): - super(Simulation, self).__init__() - self.control = Controller() - self.md_info = MdInformation(self.control) - self.bond = Bond(self.control) - self.angle = Angle(self.control) - self.dihedral = Dihedral(self.control) - self.nb14 = NonBond14(self.control, self.dihedral, self.md_info.atom_numbers) - self.nb_info = NeighborList(self.control, self.md_info.atom_numbers, self.md_info.box_length) - self.lj_info = LennardJonesInformation(self.control, self.md_info.nb.cutoff, self.md_info.sys.box_length) - self.liujian_info = LangevinLiujian(self.control, self.md_info.atom_numbers) - self.pme_method = ParticleMeshEwald(self.control, self.md_info) - self.bond_energy_sum = Tensor(0, mstype.int32) - self.angle_energy_sum = Tensor(0, mstype.int32) - self.dihedral_energy_sum = Tensor(0, mstype.int32) - self.nb14_lj_energy_sum = Tensor(0, mstype.int32) - self.nb14_cf_energy_sum = Tensor(0, mstype.int32) - self.lj_energy_sum = Tensor(0, mstype.int32) - self.ee_ene = Tensor(0, mstype.int32) - self.total_energy = Tensor(0, mstype.int32) - # Init scalar - self.ntwx = self.md_info.ntwx - self.atom_numbers = self.md_info.atom_numbers - self.residue_numbers = self.md_info.residue_numbers - self.bond_numbers = self.bond.bond_numbers - self.angle_numbers = self.angle.angle_numbers - self.dihedral_numbers = self.dihedral.dihedral_numbers - self.nb14_numbers = self.nb14.nb14_numbers - self.nxy = self.nb_info.nxy - self.grid_numbers = self.nb_info.grid_numbers - self.max_atom_in_grid_numbers = self.nb_info.max_atom_in_grid_numbers - self.max_neighbor_numbers = self.nb_info.max_neighbor_numbers - self.excluded_atom_numbers = self.md_info.nb.excluded_atom_numbers - self.refresh_count = Parameter(Tensor(self.nb_info.refresh_count, mstype.int32), requires_grad=False) - self.refresh_interval = self.nb_info.refresh_interval - self.skin = self.nb_info.skin - self.cutoff = self.nb_info.cutoff - self.cutoff_square = self.nb_info.cutoff_square - self.cutoff_with_skin = self.nb_info.cutoff_with_skin - self.half_cutoff_with_skin = self.nb_info.half_cutoff_with_skin - self.cutoff_with_skin_square = self.nb_info.cutoff_with_skin_square - self.half_skin_square = self.nb_info.half_skin_square - self.beta = self.pme_method.beta - self.fftx = self.pme_method.fftx - self.ffty = self.pme_method.ffty - self.fftz = self.pme_method.fftz - self.box_length_0 = self.md_info.box_length[0] - self.box_length_1 = self.md_info.box_length[1] - self.box_length_2 = self.md_info.box_length[2] - self.random_seed = self.liujian_info.random_seed - self.dt = self.liujian_info.dt - self.half_dt = self.liujian_info.half_dt - self.exp_gamma = self.liujian_info.exp_gamma - self.init_tensor() - self.op_define() - self.update = False - - def init_tensor(self): - '''init tensor''' - self.crd = Parameter( - Tensor(np.float32(np.asarray(self.md_info.coordinate).reshape([self.atom_numbers, 3])), mstype.float32), - requires_grad=False) - self.crd_to_uint_crd_cof = Tensor(np.asarray(self.md_info.pbc.crd_to_uint_crd_cof, np.float32), mstype.float32) - self.uint_dr_to_dr_cof = Parameter( - Tensor(np.asarray(self.md_info.pbc.uint_dr_to_dr_cof, np.float32), mstype.float32), requires_grad=False) - self.box_length = Tensor(self.md_info.box_length, mstype.float32) - self.charge = Parameter(Tensor(np.asarray(self.md_info.h_charge, dtype=np.float32), mstype.float32), - requires_grad=False) - self.old_crd = Parameter(Tensor(np.zeros([self.atom_numbers, 3], dtype=np.float32), mstype.float32), - requires_grad=False) - self.last_crd = Parameter(Tensor(np.zeros([self.atom_numbers, 3], dtype=np.float32), mstype.float32), - requires_grad=False) - self.uint_crd = Parameter(Tensor(np.zeros([self.atom_numbers, 3], dtype=np.uint32), mstype.uint32), - requires_grad=False) - self.mass_inverse = Tensor(self.md_info.h_mass_inverse, mstype.float32) - res_start = np.asarray(self.md_info.h_res_start, np.int32) - res_end = np.asarray(self.md_info.h_res_end, np.int32) - self.res_start = Tensor(res_start) - self.res_end = Tensor(res_end) - self.mass = Tensor(self.md_info.h_mass, mstype.float32) - self.velocity = Parameter(Tensor(self.md_info.velocity, mstype.float32), requires_grad=False) - self.acc = Parameter(Tensor(np.zeros([self.atom_numbers, 3], np.float32), mstype.float32), requires_grad=False) - self.bond_atom_a = Tensor(np.asarray(self.bond.h_atom_a, np.int32), mstype.int32) - self.bond_atom_b = Tensor(np.asarray(self.bond.h_atom_b, np.int32), mstype.int32) - self.bond_k = Tensor(np.asarray(self.bond.h_k, np.float32), mstype.float32) - self.bond_r0 = Tensor(np.asarray(self.bond.h_r0, np.float32), mstype.float32) - self.angle_atom_a = Tensor(np.asarray(self.angle.h_atom_a, np.int32), mstype.int32) - self.angle_atom_b = Tensor(np.asarray(self.angle.h_atom_b, np.int32), mstype.int32) - self.angle_atom_c = Tensor(np.asarray(self.angle.h_atom_c, np.int32), mstype.int32) - self.angle_k = Tensor(np.asarray(self.angle.h_angle_k, np.float32), mstype.float32) - self.angle_theta0 = Tensor(np.asarray(self.angle.h_angle_theta0, np.float32), mstype.float32) - self.dihedral_atom_a = Tensor(np.asarray(self.dihedral.h_atom_a, np.int32), mstype.int32) - self.dihedral_atom_b = Tensor(np.asarray(self.dihedral.h_atom_b, np.int32), mstype.int32) - self.dihedral_atom_c = Tensor(np.asarray(self.dihedral.h_atom_c, np.int32), mstype.int32) - self.dihedral_atom_d = Tensor(np.asarray(self.dihedral.h_atom_d, np.int32), mstype.int32) - self.pk = Tensor(np.asarray(self.dihedral.h_pk, np.float32), mstype.float32) - self.gamc = Tensor(np.asarray(self.dihedral.h_gamc, np.float32), mstype.float32) - self.gams = Tensor(np.asarray(self.dihedral.h_gams, np.float32), mstype.float32) - self.pn = Tensor(np.asarray(self.dihedral.h_pn, np.float32), mstype.float32) - self.ipn = Tensor(np.asarray(self.dihedral.h_ipn, np.int32), mstype.int32) - self.nb14_atom_a = Tensor(np.asarray(self.nb14.h_atom_a, np.int32), mstype.int32) - self.nb14_atom_b = Tensor(np.asarray(self.nb14.h_atom_b, np.int32), mstype.int32) - self.lj_scale_factor = Tensor(np.asarray(self.nb14.h_lj_scale_factor, np.float32), mstype.float32) - self.cf_scale_factor = Tensor(np.asarray(self.nb14.h_cf_scale_factor, np.float32), mstype.float32) - self.grid_n = Tensor(self.nb_info.grid_n, mstype.int32) - self.grid_length_inverse = Tensor(self.nb_info.grid_length_inverse, mstype.float32) - self.bucket = Parameter(Tensor( - np.asarray(self.nb_info.bucket, np.int32).reshape([self.grid_numbers, self.max_atom_in_grid_numbers]), - mstype.int32), requires_grad=False) - self.atom_numbers_in_grid_bucket = Parameter(Tensor(self.nb_info.atom_numbers_in_grid_bucket, mstype.int32), - requires_grad=False) - self.atom_in_grid_serial = Parameter(Tensor(np.zeros([self.nb_info.atom_numbers], np.int32), mstype.int32), - requires_grad=False) - self.pointer = Parameter( - Tensor(np.asarray(self.nb_info.pointer, np.int32).reshape([self.grid_numbers, 125]), mstype.int32), - requires_grad=False) - self.nl_atom_numbers = Parameter(Tensor(np.zeros([self.atom_numbers], np.int32), mstype.int32), - requires_grad=False) - self.nl_atom_serial = Parameter( - Tensor(np.zeros([self.atom_numbers, self.max_neighbor_numbers], np.int32), mstype.int32), - requires_grad=False) - excluded_list_start = np.asarray(self.md_info.nb.h_excluded_list_start, np.int32) - excluded_list = np.asarray(self.md_info.nb.h_excluded_list, np.int32) - excluded_numbers = np.asarray(self.md_info.nb.h_excluded_numbers, np.int32) - self.excluded_list_start = Tensor(excluded_list_start) - self.excluded_list = Tensor(excluded_list) - self.excluded_numbers = Tensor(excluded_numbers) - self.excluded_matrix = Tensor(reform_excluded_list(excluded_list, excluded_list_start, excluded_numbers)) - self.residual_matrix = Tensor(reform_residual_list(self.atom_numbers, res_start, res_end)) - box = (self.box_length_0, self.box_length_1, self.box_length_2) - self.pme_bc = Tensor(get_pme_bc(self.fftx, self.ffty, self.fftz, box, self.beta), mstype.float32) - self.need_refresh_flag = Tensor(np.asarray([0], np.int32), mstype.int32) - self.atom_lj_type = Tensor(self.lj_info.atom_lj_type, mstype.int32) - self.lj_a = Tensor(self.lj_info.h_lj_a, mstype.float32) - self.lj_b = Tensor(self.lj_info.h_lj_b, mstype.float32) - self.sqrt_mass = Tensor(self.liujian_info.h_sqrt_mass, mstype.float32) - self.rand_state = Parameter(Tensor(self.liujian_info.rand_state, mstype.float32)) - self.zero_fp_tensor = Tensor(np.asarray([0], np.float32)) - - def op_define(self): - '''op define''' - self.neighbor_list_update_init = P.NeighborListUpdate(grid_numbers=self.grid_numbers, - atom_numbers=self.atom_numbers, not_first_time=0, - nxy=self.nxy, - excluded_atom_numbers=self.excluded_atom_numbers, - cutoff_square=self.cutoff_square, - half_skin_square=self.half_skin_square, - cutoff_with_skin=self.cutoff_with_skin, - half_cutoff_with_skin=self.half_cutoff_with_skin, - cutoff_with_skin_square=self.cutoff_with_skin_square, - refresh_interval=self.refresh_interval, - cutoff=self.cutoff, skin=self.skin, - max_atom_in_grid_numbers=self.max_atom_in_grid_numbers, - max_neighbor_numbers=self.max_neighbor_numbers) - self.neighbor_list_update = P.NeighborListUpdate(grid_numbers=self.grid_numbers, atom_numbers=self.atom_numbers, - not_first_time=1, nxy=self.nxy, - excluded_atom_numbers=self.excluded_atom_numbers, - cutoff_square=self.cutoff_square, - half_skin_square=self.half_skin_square, - cutoff_with_skin=self.cutoff_with_skin, - half_cutoff_with_skin=self.half_cutoff_with_skin, - cutoff_with_skin_square=self.cutoff_with_skin_square, - refresh_interval=self.refresh_interval, cutoff=self.cutoff, - skin=self.skin, - max_atom_in_grid_numbers=self.max_atom_in_grid_numbers, - max_neighbor_numbers=self.max_neighbor_numbers) - - def simulation_beforce_caculate_force(self): - '''simulation before calculate force''' - crd_to_uint_crd_cof = 0.5 * self.crd_to_uint_crd_cof - uint_crd = crd_to_uint_crd(crd_to_uint_crd_cof, self.crd) - return uint_crd - - def simulation_caculate_force(self, uint_crd, scaler, nl_atom_numbers, nl_atom_serial): - '''simulation calculate force''' - bond_f, _ = bond_force_with_atom_energy(self.atom_numbers, self.bond_numbers, - uint_crd, scaler, self.bond_atom_a, - self.bond_atom_b, self.bond_k, - self.bond_r0) - - angle_f, _ = angle_force_with_atom_energy(self.angle_numbers, uint_crd, - scaler, self.angle_atom_a, - self.angle_atom_b, self.angle_atom_c, - self.angle_k, self.angle_theta0) - - dihedral_f, _ = dihedral_force_with_atom_energy(self.dihedral_numbers, - uint_crd, scaler, - self.dihedral_atom_a, - self.dihedral_atom_b, - self.dihedral_atom_c, - self.dihedral_atom_d, self.ipn, - self.pk, self.gamc, self.gams, - self.pn) - - nb14_f, _ = dihedral_14_ljcf_force_with_atom_energy(self.atom_numbers, uint_crd, - self.atom_lj_type, self.charge, - scaler, self.nb14_atom_a, self.nb14_atom_b, - self.lj_scale_factor, self.cf_scale_factor, - self.lj_a, self.lj_b) - - lj_f = lj_force_pme_direct_force(self.atom_numbers, self.cutoff_square, self.beta, - uint_crd, self.atom_lj_type, self.charge, scaler, - nl_atom_numbers, nl_atom_serial, self.lj_a, self.lj_b) - - pme_excluded_f = pme_excluded_force(self.atom_numbers, self.beta, uint_crd, scaler, - self.charge, excluded_matrix=self.excluded_matrix) - - pme_reciprocal_f = pme_reciprocal_force(self.atom_numbers, self.fftx, self.ffty, - self.fftz, self.box_length_0, self.box_length_1, - self.box_length_2, self.pme_bc, uint_crd, self.charge) - force = bond_f + angle_f + dihedral_f + nb14_f + lj_f + pme_excluded_f + pme_reciprocal_f - return force - - def simulation_caculate_energy(self, uint_crd, uint_dr_to_dr_cof): - '''simulation calculate energy''' - bond_e = bond_energy(self.atom_numbers, self.bond_numbers, uint_crd, uint_dr_to_dr_cof, - self.bond_atom_a, self.bond_atom_b, self.bond_k, self.bond_r0) - bond_energy_sum = bond_e.sum(keepdims=True) - - angle_e = angle_energy(self.angle_numbers, uint_crd, uint_dr_to_dr_cof, - self.angle_atom_a, self.angle_atom_b, self.angle_atom_c, - self.angle_k, self.angle_theta0) - angle_energy_sum = angle_e.sum(keepdims=True) - - dihedral_e = dihedral_energy(self.dihedral_numbers, uint_crd, uint_dr_to_dr_cof, - self.dihedral_atom_a, self.dihedral_atom_b, - self.dihedral_atom_c, self.dihedral_atom_d, - self.ipn, self.pk, self.gamc, self.gams, - self.pn) - dihedral_energy_sum = dihedral_e.sum(keepdims=True) - - nb14_lj_e = nb14_lj_energy(self.nb14_numbers, self.atom_numbers, uint_crd, - self.atom_lj_type, self.charge, uint_dr_to_dr_cof, - self.nb14_atom_a, self.nb14_atom_b, self.lj_scale_factor, self.lj_a, - self.lj_b) - nb14_cf_e = nb14_cf_energy(self.nb14_numbers, self.atom_numbers, uint_crd, - self.atom_lj_type, self.charge, uint_dr_to_dr_cof, - self.nb14_atom_a, self.nb14_atom_b, self.cf_scale_factor) - nb14_lj_energy_sum = nb14_lj_e.sum(keepdims=True) - nb14_cf_energy_sum = nb14_cf_e.sum(keepdims=True) - - lj_e = lj_energy(self.atom_numbers, self.cutoff_square, uint_crd, - self.atom_lj_type, uint_dr_to_dr_cof, - self.nl_atom_numbers, self.nl_atom_serial, self.lj_a, - self.lj_b) - lj_energy_sum = lj_e.sum(keepdims=True) - reciprocal_e, self_e, direct_e, correction_e = pme_energy(self.atom_numbers, - self.beta, self.fftx, - self.ffty, self.fftz, - self.pme_bc, - uint_crd, self.charge, - self.nl_atom_numbers, - self.nl_atom_serial, - uint_dr_to_dr_cof, - excluded_matrix=self.excluded_matrix) - - ee_ene = reciprocal_e + self_e + direct_e + correction_e - total_energy = bond_energy_sum + angle_energy_sum + dihedral_energy_sum + \ - nb14_lj_energy_sum + nb14_cf_energy_sum + lj_energy_sum + ee_ene - res_cpt_ene = (bond_energy_sum, angle_energy_sum, dihedral_energy_sum, nb14_lj_energy_sum, nb14_cf_energy_sum, \ - lj_energy_sum, ee_ene, total_energy) - return res_cpt_ene - - def simulation_temperature(self): - '''caculate temperature''' - res_ek_energy = md_temperature(self.residual_matrix, self.velocity, self.mass, sparse=False) - temperature = res_ek_energy.sum() - return temperature - - def simulation_md_iteration_leap_frog_liujian(self, inverse_mass, sqrt_mass_inverse, crd, frc): - '''simulation leap frog iteration liujian''' - return md_iteration_leap_frog_liujian(self.atom_numbers, self.half_dt, - self.dt, self.exp_gamma, inverse_mass, - sqrt_mass_inverse, self.velocity, - crd, frc, self.acc) - - def construct(self): - '''construct''' - self.last_crd = self.crd - res = self.neighbor_list_update(self.atom_numbers_in_grid_bucket, - self.bucket, - self.crd, - self.box_length, - self.grid_n, - self.grid_length_inverse, - self.atom_in_grid_serial, - self.old_crd, - self.crd_to_uint_crd_cof, - self.uint_crd, - self.pointer, - self.nl_atom_numbers, - self.nl_atom_serial, - self.uint_dr_to_dr_cof, - self.excluded_list_start, - self.excluded_list, - self.excluded_numbers, - self.need_refresh_flag, - self.refresh_count) - self.nl_atom_numbers = F.depend(self.nl_atom_numbers, res) - self.nl_atom_serial = F.depend(self.nl_atom_serial, res) - self.uint_dr_to_dr_cof = F.depend(self.uint_dr_to_dr_cof, res) - self.old_crd = F.depend(self.old_crd, res) - self.atom_numbers_in_grid_bucket = F.depend(self.atom_numbers_in_grid_bucket, res) - self.bucket = F.depend(self.bucket, res) - self.atom_in_grid_serial = F.depend(self.atom_in_grid_serial, res) - self.pointer = F.depend(self.pointer, res) - uint_crd = self.simulation_beforce_caculate_force() - force = self.simulation_caculate_force(uint_crd, self.uint_dr_to_dr_cof, self.nl_atom_numbers, - self.nl_atom_serial) - - bond_energy_sum, angle_energy_sum, dihedral_energy_sum, nb14_lj_energy_sum, nb14_cf_energy_sum, \ - lj_energy_sum, ee_ene, total_energy = self.simulation_caculate_energy(uint_crd, self.uint_dr_to_dr_cof) - - temperature = self.simulation_temperature() - self.velocity = F.depend(self.velocity, temperature) - self.velocity, self.crd, _ = self.simulation_md_iteration_leap_frog_liujian(self.mass_inverse, - self.sqrt_mass, self.crd, force) - res_main = (temperature, total_energy, bond_energy_sum, angle_energy_sum, dihedral_energy_sum, \ - nb14_lj_energy_sum, nb14_cf_energy_sum, lj_energy_sum, ee_ene, res) - return res_main diff --git a/tests/st/sponge/sponge_cuda/__init__.py b/tests/st/sponge/sponge_cuda/__init__.py deleted file mode 100644 index 1a9a6172bd1..00000000000 --- a/tests/st/sponge/sponge_cuda/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -# Copyright 2020-2021 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ diff --git a/tests/st/sponge/sponge_cuda/angle.py b/tests/st/sponge/sponge_cuda/angle.py deleted file mode 100644 index 51f0f3f3046..00000000000 --- a/tests/st/sponge/sponge_cuda/angle.py +++ /dev/null @@ -1,175 +0,0 @@ -# Copyright 2021 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ -'''Angle''' - - -class Angle: - '''Angle''' - - def __init__(self, controller): - self.module_name = "angle" - self.h_atom_a = [] - self.h_atom_b = [] - self.h_atom_c = [] - self.h_angle_k = [] - self.h_angle_theta0 = [] - self.type_theta0 = [] - self.type_k = [] - self.angle_numbers = 0 - if controller.amber_parm is not None: - file_path = controller.amber_parm - self.read_information_from_amberfile(file_path) - self.is_initialized = 1 - else: - self.read_in_file(controller) - - def read_in_file(self, controller): - """read_in_file""" - print("START INITIALIZING ANGLE:") - name = self.module_name + "_in_file" - if name in controller.command_set: - path = controller.command_set[name] - file = open(path, 'r') - context = file.readlines() - self.angle_numbers = int(context[0].strip()) - print(" angle_numbers is ", self.angle_numbers) - for i in range(self.angle_numbers): - val = list(map(float, context[i + 1].strip().split())) - self.h_atom_a.append(int(val[0])) - self.h_atom_b.append(int(val[1])) - self.h_atom_c.append(int(val[2])) - self.h_angle_k.append(val[3]) - self.h_angle_theta0.append(val[4]) - self.is_initialized = 1 - file.close() - print("END INITIALIZING ANGLE") - - def read_information_from_amberfile(self, file_path): - '''read amber file''' - file = open(file_path, 'r') - context = file.readlines() - file.close() - for idx, val in enumerate(context): - if idx < len(context) - 1: - if "%FLAG POINTERS" in val + context[idx + 1] and "%FORMAT(10I8)" in val + context[idx + 1]: - start_idx = idx + 2 - count = 0 - value = list(map(int, context[start_idx].strip().split())) - self.angle_with_h_numbers = value[4] - self.angle_without_h_numbers = value[5] - self.angle_numbers = self.angle_with_h_numbers + self.angle_without_h_numbers - information = [] - information.extend(value) - while count < 15: - start_idx += 1 - value = list(map(int, context[start_idx].strip().split())) - information.extend(value) - count += len(value) - self.angle_type_numbers = information[16] - print("angle type numbers ", self.angle_type_numbers) - break - - self.h_atom_a = [0] * self.angle_numbers - self.h_atom_b = [0] * self.angle_numbers - self.h_atom_c = [0] * self.angle_numbers - self.h_type = [0] * self.angle_numbers - angle_count = 0 - for idx, val in enumerate(context): - if "%FLAG ANGLES_INC_HYDROGEN" in val: - count = 0 - start_idx = idx - information = [] - while count < 4 * self.angle_with_h_numbers: - start_idx += 1 - if "%FORMAT" in context[start_idx]: - continue - else: - value = list(map(int, context[start_idx].strip().split())) - information.extend(value) - count += len(value) - for _ in range(self.angle_with_h_numbers): - self.h_atom_a[angle_count] = int(information[angle_count * 4 + 0] / 3) - self.h_atom_b[angle_count] = int(information[angle_count * 4 + 1] / 3) - self.h_atom_c[angle_count] = int(information[angle_count * 4 + 2] / 3) - self.h_type[angle_count] = information[angle_count * 4 + 3] - 1 - angle_count += 1 - - break - - for idx, val in enumerate(context): - if "%FLAG ANGLES_WITHOUT_HYDROGEN" in val: - count = 0 - start_idx = idx - information = [] - while count < 4 * self.angle_without_h_numbers: - start_idx += 1 - if "%FORMAT" in context[start_idx]: - continue - else: - value = list(map(int, context[start_idx].strip().split())) - information.extend(value) - count += len(value) - for _ in range(self.angle_without_h_numbers): - self.h_atom_a[angle_count] = int(information[(angle_count - self.angle_with_h_numbers) * 4 + 0] / 3) - self.h_atom_b[angle_count] = int(information[(angle_count - self.angle_with_h_numbers) * 4 + 1] / 3) - self.h_atom_c[angle_count] = int(information[(angle_count - self.angle_with_h_numbers) * 4 + 2] / 3) - self.h_type[angle_count] = information[(angle_count - self.angle_with_h_numbers) * 4 + 3] - 1 - angle_count += 1 - break - self.processor(context, angle_count) - - def processor(self, context, angle_count): - ''' processor ''' - self.type_k = [0] * self.angle_type_numbers - for idx, val in enumerate(context): - if "%FLAG ANGLE_FORCE_CONSTANT" in val: - count = 0 - start_idx = idx - information = [] - while count < self.angle_type_numbers: - start_idx += 1 - if "%FORMAT" in context[start_idx]: - continue - else: - value = list(map(float, context[start_idx].strip().split())) - information.extend(value) - count += len(value) - self.type_k = information[:self.angle_type_numbers] - break - - self.type_theta0 = [0] * self.angle_type_numbers - for idx, val in enumerate(context): - if "%FLAG ANGLE_EQUIL_VALUE" in val: - count = 0 - start_idx = idx - information = [] - while count < self.angle_type_numbers: - start_idx += 1 - if "%FORMAT" in context[start_idx]: - continue - else: - value = list(map(float, context[start_idx].strip().split())) - information.extend(value) - count += len(value) - self.type_theta0 = information[:self.angle_type_numbers] - break - if self.angle_numbers != angle_count: - print("angle count %d != angle_number %d ", angle_count, self.angle_numbers) - - self.h_angle_k = [] - self.h_angle_theta0 = [] - for i in range(self.angle_numbers): - self.h_angle_k.append(self.type_k[self.h_type[i]]) - self.h_angle_theta0.append(self.type_theta0[self.h_type[i]]) diff --git a/tests/st/sponge/sponge_cuda/bond.py b/tests/st/sponge/sponge_cuda/bond.py deleted file mode 100644 index 143c6918a01..00000000000 --- a/tests/st/sponge/sponge_cuda/bond.py +++ /dev/null @@ -1,164 +0,0 @@ -# Copyright 2021 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ -'''Bond''' - - -class Bond: - '''Bond''' - - def __init__(self, controller): - self.module_name = "bond" - self.h_atom_a = [] - self.h_atom_b = [] - self.h_k = [] - self.h_r0 = [] - self.bond_numbers = 0 - self.is_initialized = 0 - if controller.amber_parm is not None: - file_path = controller.amber_parm - self.read_information_from_amberfile(file_path) - self.is_initialized = 1 - else: - self.read_in_file(controller) - - def read_in_file(self, controller): - """read_in_file""" - print("START INITIALIZING BOND:") - name = self.module_name + "_in_file" - if name in controller.command_set: - path = controller.command_set[name] - file = open(path, 'r') - context = file.readlines() - self.bond_numbers = int(context[0].strip()) - print(" bond_numbers is ", self.bond_numbers) - for i in range(self.bond_numbers): - val = list(map(float, context[i + 1].strip().split())) - self.h_atom_a.append(int(val[0])) - self.h_atom_b.append(int(val[1])) - self.h_k.append(val[2]) - self.h_r0.append(val[3]) - self.is_initialized = 1 - file.close() - print("END INITIALIZING BOND") - - def read_information_from_amberfile(self, file_path): - '''read amber file''' - file = open(file_path, 'r') - context = file.readlines() - file.close() - for idx, val in enumerate(context): - if idx < len(context) - 1: - if "%FLAG POINTERS" in val + context[idx + 1] and "%FORMAT(10I8)" in val + context[idx + 1]: - start_idx = idx + 2 - count = 0 - value = list(map(int, context[start_idx].strip().split())) - self.bond_with_hydrogen = value[2] - self.bond_numbers = value[3] - self.bond_numbers += self.bond_with_hydrogen - information = [] - information.extend(value) - while count < 16: - start_idx += 1 - value = list(map(int, context[start_idx].strip().split())) - information.extend(value) - count += len(value) - self.bond_type_numbers = information[15] - print("bond type numbers ", self.bond_type_numbers) - break - - for idx, val in enumerate(context): - if "%FLAG BOND_FORCE_CONSTANT" in val: - count = 0 - start_idx = idx - information = [] - while count < self.bond_type_numbers: - start_idx += 1 - if "%FORMAT" in context[start_idx]: - continue - else: - value = list(map(float, context[start_idx].strip().split())) - information.extend(value) - count += len(value) - self.bond_type_k = information[:self.bond_type_numbers] - break - - for idx, val in enumerate(context): - if "%FLAG BOND_EQUIL_VALUE" in val: - count = 0 - start_idx = idx - information = [] - while count < self.bond_type_numbers: - start_idx += 1 - if "%FORMAT" in context[start_idx]: - continue - else: - value = list(map(float, context[start_idx].strip().split())) - information.extend(value) - count += len(value) - self.bond_type_r = information[:self.bond_type_numbers] - break - self.processor(context) - - def processor(self, context): - '''processor''' - for idx, val in enumerate(context): - if "%FLAG BONDS_INC_HYDROGEN" in val: - self.h_atom_a = [0] * self.bond_numbers - self.h_atom_b = [0] * self.bond_numbers - self.h_k = [0] * self.bond_numbers - self.h_r0 = [0] * self.bond_numbers - - count = 0 - start_idx = idx - information = [] - while count < 3 * self.bond_with_hydrogen: - start_idx += 1 - if "%FORMAT" in context[start_idx]: - continue - else: - value = list(map(int, context[start_idx].strip().split())) - information.extend(value) - count += len(value) - - for i in range(self.bond_with_hydrogen): - self.h_atom_a[i] = int(information[3 * i + 0] / 3) - self.h_atom_b[i] = int(information[3 * i + 1] / 3) - tmpi = information[3 * i + 2] - 1 - self.h_k[i] = self.bond_type_k[tmpi] - self.h_r0[i] = self.bond_type_r[tmpi] - break - - for idx, val in enumerate(context): - if "%FLAG BONDS_WITHOUT_HYDROGEN" in val: - - count = 0 - start_idx = idx - information = [] - while count < 3 * (self.bond_numbers - self.bond_with_hydrogen): - start_idx += 1 - if "%FORMAT" in context[start_idx]: - continue - else: - value = list(map(int, context[start_idx].strip().split())) - information.extend(value) - count += len(value) - - for i in range(self.bond_with_hydrogen, self.bond_numbers): - self.h_atom_a[i] = int(information[3 * (i - self.bond_with_hydrogen) + 0] / 3) - self.h_atom_b[i] = int(information[3 * (i - self.bond_with_hydrogen) + 1] / 3) - tmpi = information[3 * (i - self.bond_with_hydrogen) + 2] - 1 - self.h_k[i] = self.bond_type_k[tmpi] - self.h_r0[i] = self.bond_type_r[tmpi] - break diff --git a/tests/st/sponge/sponge_cuda/dihedral.py b/tests/st/sponge/sponge_cuda/dihedral.py deleted file mode 100644 index 11b360e47f9..00000000000 --- a/tests/st/sponge/sponge_cuda/dihedral.py +++ /dev/null @@ -1,227 +0,0 @@ -# Copyright 2021 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ -'''Dihedral''' -import math - - -class Dihedral: - '''Dihedral''' - - def __init__(self, controller): - self.constant_pi = 3.1415926535897932 - self.module_name = "dihedral" - self.h_atom_a = [] - self.h_atom_b = [] - self.h_atom_c = [] - self.h_atom_d = [] - self.h_ipn = [] - self.h_pn = [] - self.h_pk = [] - self.h_gamc = [] - self.h_gams = [] - self.dihedral_numbers = 0 - if controller.amber_parm is not None: - file_path = controller.amber_parm - self.read_information_from_amberfile(file_path) - self.is_initialized = 1 - else: - self.read_in_file(controller) - - def read_in_file(self, controller): - """read_in_file""" - print("START INITIALIZING DIHEDRAL:") - name = self.module_name + "_in_file" - if name in controller.command_set: - path = controller.command_set[name] - file = open(path, 'r') - context = file.readlines() - self.dihedral_numbers = int(context[0].strip()) - print(" dihedral_numbers is ", self.dihedral_numbers) - for i in range(self.dihedral_numbers): - val = list(map(float, context[i + 1].strip().split())) - self.h_atom_a.append(int(val[0])) - self.h_atom_b.append(int(val[1])) - self.h_atom_c.append(int(val[2])) - self.h_atom_d.append(int(val[3])) - self.h_ipn.append(val[4]) - self.h_pn.append(val[4]) - self.h_pk.append(val[5]) - self.h_gamc.append(math.cos(val[6]) * val[5]) - self.h_gams.append(math.sin(val[6]) * val[5]) - - self.is_initialized = 1 - file.close() - print("END INITIALIZING DIHEDRAL") - - def read_information_from_amberfile(self, file_path): - '''read amber file''' - file = open(file_path, 'r') - context = file.readlines() - file.close() - for idx, val in enumerate(context): - if idx < len(context) - 1: - if "%FLAG POINTERS" in val + context[idx + 1] and "%FORMAT(10I8)" in val + context[idx + 1]: - start_idx = idx + 2 - count = 0 - value = list(map(int, context[start_idx].strip().split())) - self.dihedral_with_hydrogen = value[6] - self.dihedral_numbers = value[7] - self.dihedral_numbers += self.dihedral_with_hydrogen - information = [] - information.extend(value) - while count < 15: - start_idx += 1 - value = list(map(int, context[start_idx].strip().split())) - information.extend(value) - count += len(value) - self.dihedral_type_numbers = information[17] - print("dihedral type numbers ", self.dihedral_type_numbers) - break - - self.phase_type = [0] * self.dihedral_type_numbers - self.pk_type = [0] * self.dihedral_type_numbers - self.pn_type = [0] * self.dihedral_type_numbers - - for idx, val in enumerate(context): - if "%FLAG DIHEDRAL_FORCE_CONSTANT" in val: - count = 0 - start_idx = idx - information = [] - while count < self.dihedral_type_numbers: - start_idx += 1 - if "%FORMAT" in context[start_idx]: - continue - else: - value = list(map(float, context[start_idx].strip().split())) - information.extend(value) - count += len(value) - self.pk_type = information[:self.dihedral_type_numbers] - break - - for idx, val in enumerate(context): - if "%FLAG DIHEDRAL_PHASE" in val: - count = 0 - start_idx = idx - information = [] - while count < self.dihedral_type_numbers: - start_idx += 1 - if "%FORMAT" in context[start_idx]: - continue - else: - value = list(map(float, context[start_idx].strip().split())) - information.extend(value) - count += len(value) - self.phase_type = information[:self.dihedral_type_numbers] - break - - for idx, val in enumerate(context): - if "%FLAG DIHEDRAL_PERIODICITY" in val: - count = 0 - start_idx = idx - information = [] - while count < self.dihedral_type_numbers: - start_idx += 1 - if "%FORMAT" in context[start_idx]: - continue - else: - value = list(map(float, context[start_idx].strip().split())) - information.extend(value) - count += len(value) - self.pn_type = information[:self.dihedral_type_numbers] - break - self.processor(context) - - def processor(self, context): - '''processor''' - self.h_atom_a = [0] * self.dihedral_numbers - self.h_atom_b = [0] * self.dihedral_numbers - self.h_atom_c = [0] * self.dihedral_numbers - self.h_atom_d = [0] * self.dihedral_numbers - self.h_pk = [] - self.h_gamc = [] - self.h_gams = [] - self.h_pn = [] - self.h_ipn = [] - for idx, val in enumerate(context): - if "%FLAG DIHEDRALS_INC_HYDROGEN" in val: - count = 0 - start_idx = idx - information = [] - while count < 5 * self.dihedral_with_hydrogen: - start_idx += 1 - if "%FORMAT" in context[start_idx]: - continue - else: - value = list(map(int, context[start_idx].strip().split())) - information.extend(value) - count += len(value) - for i in range(self.dihedral_with_hydrogen): - self.h_atom_a[i] = information[i * 5 + 0] / 3 - self.h_atom_b[i] = information[i * 5 + 1] / 3 - self.h_atom_c[i] = information[i * 5 + 2] / 3 - self.h_atom_d[i] = abs(information[i * 5 + 3] / 3) - tmpi = information[i * 5 + 4] - 1 - self.h_pk.append(self.pk_type[tmpi]) - tmpf = self.phase_type[tmpi] - if abs(tmpf - self.constant_pi) <= 0.001: - tmpf = self.constant_pi - tmpf2 = math.cos(tmpf) - if abs(tmpf2) < 1e-6: - tmpf2 = 0 - self.h_gamc.append(tmpf2 * self.h_pk[i]) - tmpf2 = math.sin(tmpf) - if abs(tmpf2) < 1e-6: - tmpf2 = 0 - self.h_gams.append(tmpf2 * self.h_pk[i]) - self.h_pn.append(abs(self.pn_type[tmpi])) - self.h_ipn.append(int(self.h_pn[i] + 0.001)) - break - for idx, val in enumerate(context): - if "%FLAG DIHEDRALS_WITHOUT_HYDROGEN" in val: - count = 0 - start_idx = idx - information = [] - while count < 5 * (self.dihedral_numbers - self.dihedral_with_hydrogen): - start_idx += 1 - if "%FORMAT" in context[start_idx]: - continue - else: - value = list(map(int, context[start_idx].strip().split())) - information.extend(value) - count += len(value) - for i in range(self.dihedral_with_hydrogen, self.dihedral_numbers): - self.h_atom_a[i] = information[(i - self.dihedral_with_hydrogen) * 5 + 0] / 3 - self.h_atom_b[i] = information[(i - self.dihedral_with_hydrogen) * 5 + 1] / 3 - self.h_atom_c[i] = information[(i - self.dihedral_with_hydrogen) * 5 + 2] / 3 - self.h_atom_d[i] = abs(information[(i - self.dihedral_with_hydrogen) * 5 + 3] / 3) - tmpi = information[(i - self.dihedral_with_hydrogen) * 5 + 4] - 1 - self.h_pk.append(self.pk_type[tmpi]) - tmpf = self.phase_type[tmpi] - if abs(tmpf - self.constant_pi) <= 0.001: - tmpf = self.constant_pi - tmpf2 = math.cos(tmpf) - if abs(tmpf2) < 1e-6: - tmpf2 = 0 - self.h_gamc.append(tmpf2 * self.h_pk[i]) - tmpf2 = math.sin(tmpf) - if abs(tmpf2) < 1e-6: - tmpf2 = 0 - self.h_gams.append(tmpf2 * self.h_pk[i]) - self.h_pn.append(abs(self.pn_type[tmpi])) - self.h_ipn.append(int(self.h_pn[i] + 0.001)) - break - for i in range(self.dihedral_numbers): - if self.h_atom_c[i] < 0: - self.h_atom_c[i] *= -1 diff --git a/tests/st/sponge/sponge_cuda/langevin_liujian_md.py b/tests/st/sponge/sponge_cuda/langevin_liujian_md.py deleted file mode 100644 index df45543a800..00000000000 --- a/tests/st/sponge/sponge_cuda/langevin_liujian_md.py +++ /dev/null @@ -1,112 +0,0 @@ -# Copyright 2021 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ -'''LagevinLiuJian''' -import math - -import numpy as np - - -class LangevinLiujian: - '''LagevinLiuJian''' - - def __init__(self, controller, atom_numbers): - self.module_name = "langevin_liu" - self.atom_numbers = atom_numbers - self.h_mass = [] - print("START INITIALIZING LANGEVIN_LIU DYNAMICS:") - if controller.amber_parm is not None: - file_path = controller.amber_parm - self.read_information_from_amberfile(file_path) - else: - self.read_mass_file(controller) - self.constant_time_convertion = 20.455 - self.constant_kb = 0.00198716 - - self.target_temperature = 300.0 if "target_temperature" not in controller.command_set else float( - controller.command_set["target_temperature"]) - self.gamma_ln = 1.0 - if "gamma" in controller.command_set: - self.gamma_ln = float(controller.command_set["gamma"]) - if "langevin_liu_gamma" in controller.command_set: - self.gamma_ln = float(controller.command_set["langevin_liu_gamma"]) - print(" langevin_liu_gamma is ", self.gamma_ln) - - self.random_seed = 1 if "seed" not in controller.command_set else int( - controller.command_set["seed"]) - - print(" target temperature is {} K".format(self.target_temperature)) - print(" friction coefficient is {} ps^-1".format(self.gamma_ln)) - print(" random seed is ", self.random_seed) - self.dt = 0.001 if "dt" not in controller.command_set else float( - controller.command_set["dt"]) * self.constant_time_convertion - self.half_dt = 0.5 * self.dt - - self.float4_numbers = math.ceil(3 * self.atom_numbers / 4.0) - self.rand_state = np.float32(np.zeros([self.float4_numbers * 16])) - self.gamma_ln = self.gamma_ln / self.constant_time_convertion - self.exp_gamma = math.exp(-1 * self.gamma_ln * self.dt) - self.sqrt_gamma = math.sqrt((1. - self.exp_gamma * self.exp_gamma) * self.target_temperature * self.constant_kb) - self.h_sqrt_mass = [0] * self.atom_numbers - for i in range(self.atom_numbers): - self.h_sqrt_mass[i] = self.sqrt_gamma * math.sqrt(1. / self.h_mass[i]) if self.h_mass[i] != 0 else 0 - - self.max_velocity = 0.0 - if "velocity_max" in controller.command_set: - self.max_velocity = float(controller.command_set["velocity_max"]) - if "langevin_liu_velocity_max" in controller.command_set: - self.max_velocity = float(controller.command_set["langevin_liu_velocity_max"]) - print(" max velocity is ", self.max_velocity) - - self.h_mass_inverse = [0] * self.atom_numbers - for i in range(self.atom_numbers): - self.h_mass_inverse[i] = 1. / self.h_mass[i] if self.h_mass[i] != 0 else 0 - - self.is_initialized = 1 - - print("END INITIALIZING LANGEVIN_LIU DYNAMICS") - - def read_mass_file(self, controller): - if "mass_in_file" in controller.command_set: - path = controller.command_set["mass_in_file"] - file = open(path, 'r') - context = file.readlines() - for idx, val in enumerate(context): - if idx > 0: - self.h_mass.append(float(val.strip())) - file.close() - - def read_information_from_amberfile(self, file_path): - '''read amber file''' - file = open(file_path, 'r') - context = file.readlines() - file.close() - self.h_mass = [0] * self.atom_numbers - for idx, val in enumerate(context): - if "%FLAG MASS" in val: - count = 0 - start_idx = idx - information = [] - while count < self.atom_numbers: - start_idx += 1 - if "%FORMAT" in context[start_idx]: - continue - else: - value = list(map(float, context[start_idx].strip().split())) - information.extend(value) - count += len(value) - - for i in range(self.atom_numbers): - self.h_mass[i] = information[i] - break diff --git a/tests/st/sponge/sponge_cuda/lennard_jones.py b/tests/st/sponge/sponge_cuda/lennard_jones.py deleted file mode 100644 index b47da4b731a..00000000000 --- a/tests/st/sponge/sponge_cuda/lennard_jones.py +++ /dev/null @@ -1,170 +0,0 @@ -# Copyright 2021 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ -'''Lennard Jones''' -import mindspore.common.dtype as mstype -from mindspore import Tensor -from mindspore.ops import operations as P - - -class LennardJonesInformation: - '''Lennard Jones''' - - def __init__(self, controller, cutoff, box_length): - self.module_name = "LJ" - self.is_initialized = 0 - self.constant_unit_max_float = 4294967296.0 - self.constant_pi = 3.1415926535897932 - self.cutoff = cutoff - self.box_length = box_length - - if controller.amber_parm is not None: - file_path = controller.amber_parm - self.read_information_from_amberfile(file_path) - self.is_initialized = 1 - else: - self.read_in_file(controller) - - if self.is_initialized: - self.totalc6get = P.Totalc6get(self.atom_numbers) - self.read_information() - - def read_in_file(self, controller): - """read_in_file""" - print("START INITIALIZING LENNADR JONES INFORMATION:") - name = self.module_name + "_in_file" - if name in controller.command_set: - path = controller.command_set[name] - file = open(path, 'r') - context = file.readlines() - self.atom_numbers, self.atom_type_numbers = map(int, context[0].strip().split()) - print(" atom_numbers is ", self.atom_numbers) - print(" atom_LJ_type_number is ", self.atom_type_numbers) - self.pair_type_numbers = self.atom_type_numbers * (self.atom_type_numbers + 1) / 2 - self.h_lj_a = [] - self.h_lj_b = [] - self.h_atom_lj_type = [] - startidx = 1 - count = 0 - print(startidx) - while count < self.atom_type_numbers: - if context[startidx].strip(): - val = list(map(float, context[startidx].strip().split())) - count += 1 - self.h_lj_a.extend(val) - startidx += 1 - assert len(self.h_lj_a) == self.pair_type_numbers - self.h_lj_a = [x * 12.0 for x in self.h_lj_a] - - count = 0 - print(startidx) - while count < self.atom_type_numbers: - if context[startidx].strip(): - val = list(map(float, context[startidx].strip().split())) - count += 1 - self.h_lj_b.extend(val) - startidx += 1 - assert len(self.h_lj_b) == self.pair_type_numbers - self.h_lj_b = [x * 6.0 for x in self.h_lj_b] - for idx, val in enumerate(context): - if idx > startidx: - self.h_atom_lj_type.append(int(val.strip())) - file.close() - self.is_initialized = 1 - print("END INITIALIZING LENNADR JONES INFORMATION") - - def read_information(self): - """read_information""" - self.uint_dr_to_dr_cof = [1.0 / self.constant_unit_max_float * self.box_length[0], - 1.0 / self.constant_unit_max_float * self.box_length[1], - 1.0 / self.constant_unit_max_float * self.box_length[2]] - print("copy lj type to new crd") - self.atom_lj_type = Tensor(self.h_atom_lj_type, mstype.int32) - self.lj_b = Tensor(self.h_lj_b, mstype.float32) - self.factor = self.totalc6get(self.atom_lj_type, self.lj_b) - print(" factor is: ", self.factor) - self.long_range_factor = float(self.factor.asnumpy()) - self.long_range_factor *= -2.0 / 3.0 * self.constant_pi / self.cutoff / self.cutoff / self.cutoff / 6.0 - self.volume = self.box_length[0] * self.box_length[1] * self.box_length[2] - print(" long range correction factor is: ", self.long_range_factor) - print(" End initializing long range LJ correction") - - def read_information_from_amberfile(self, file_path): - '''read amber file''' - file = open(file_path, 'r') - context = file.readlines() - file.close() - - for idx, val in enumerate(context): - if idx < len(context) - 1: - if "%FLAG POINTERS" in val + context[idx + 1] and "%FORMAT(10I8)" in val + context[idx + 1]: - start_idx = idx + 2 - count = 0 - value = list(map(int, context[start_idx].strip().split())) - self.atom_numbers = value[0] - self.atom_type_numbers = value[1] - self.pair_type_numbers = int( - self.atom_type_numbers * (self.atom_type_numbers + 1) / 2) - break - self.h_atom_lj_type = [0] * self.atom_numbers - for idx, val in enumerate(context): - if "%FLAG ATOM_TYPE_INDEX" in val: - count = 0 - start_idx = idx - information = [] - while count < self.atom_numbers: - start_idx += 1 - if "%FORMAT" in context[start_idx]: - continue - else: - value = list(map(int, context[start_idx].strip().split())) - information.extend(value) - count += len(value) - for i in range(self.atom_numbers): - self.h_atom_lj_type[i] = information[i] - 1 - break - self.h_lj_a = [0] * self.pair_type_numbers - for idx, val in enumerate(context): - if "%FLAG LENNARD_JONES_ACOEF" in val: - count = 0 - start_idx = idx - information = [] - while count < self.pair_type_numbers: - start_idx += 1 - if "%FORMAT" in context[start_idx]: - continue - else: - value = list(map(float, context[start_idx].strip().split())) - information.extend(value) - count += len(value) - for i in range(self.pair_type_numbers): - self.h_lj_a[i] = 12.0 * information[i] - break - self.h_lj_b = [0] * self.pair_type_numbers - for idx, val in enumerate(context): - if "%FLAG LENNARD_JONES_BCOEF" in val: - count = 0 - start_idx = idx - information = [] - while count < self.pair_type_numbers: - start_idx += 1 - if "%FORMAT" in context[start_idx]: - continue - else: - value = list(map(float, context[start_idx].strip().split())) - information.extend(value) - count += len(value) - for i in range(self.pair_type_numbers): - self.h_lj_b[i] = 6.0 * information[i] - break diff --git a/tests/st/sponge/sponge_cuda/md_information.py b/tests/st/sponge/sponge_cuda/md_information.py deleted file mode 100644 index e83755f2fee..00000000000 --- a/tests/st/sponge/sponge_cuda/md_information.py +++ /dev/null @@ -1,330 +0,0 @@ -# Copyright 2021 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ -'''MD Information''' -import numpy as np -from .system_information import (PeriodicBoxConditionInformation, SystemInformation, - NonBondInformation, NveIteration, ResidueInformation, TrajectoryOutput) - - -class MdInformation: - '''MD Information''' - - def __init__(self, controller): - constant_time_convertion = 20.455 - - self.md_task = controller.md_task - - self.netfrc = 0 if "net_force" not in controller.command_set else int(controller.command_set["net_force"]) - self.ntwx = 1000 if "write_information_interval" not in controller.command_set else int( - controller.command_set["write_information_interval"]) - self.atom_numbers = 0 - self.residue_numbers = 0 - self.density = 0.0 - self.lin_serial = [] - self.h_res_start = [] - self.h_res_end = [] - - self.h_charge = [] - self.h_mass = [] - self.h_mass_inverse = [] - self.h_charge = [] - self.coordinate = [] - self.box_length = [] - self.vel = [] - self.crd = [] - self.velocity = [] - - self.mode = self.read_mode(controller) - # read dt - self.dt = 0.001 * constant_time_convertion if "dt" not in controller.command_set else float( - controller.command_set["dt"]) * constant_time_convertion - self.dt_in_ps = 0.001 if "dt" not in controller.command_set else float(controller.command_set["dt"]) - - if controller.amber_parm is not None: - self.read_basic_system_information_from_amber_file(controller.amber_parm) - if controller.initial_coordinates_file is not None: - self.read_basic_system_information_from_rst7(controller.initial_coordinates_file) - else: - self.read_coordinate_and_velocity(controller) - self.read_mass(controller) - self.read_charge(controller) - self.crd = self.coordinate - - self.sys = SystemInformation(controller, self) - self.nb = NonBondInformation(controller, self) - self.output = TrajectoryOutput(controller) - self.nve = NveIteration(controller) - self.res = ResidueInformation(controller, self) - self.pbc = PeriodicBoxConditionInformation(self.box_length) - - if not self.h_res_start: - self.h_res_start = self.res.h_res_start - self.h_res_end = self.res.h_res_end - self.residue_numbers = self.res.residue_numbers - - # Atom_Information_Initial - self.acc = np.zeros([self.atom_numbers, 3]) - self.frc = np.zeros([self.atom_numbers, 3]) - self.sys.freedom = 3 * self.atom_numbers - self.is_initialized = 1 - - self.velocity = np.reshape(np.asarray(self.velocity, np.float32), [self.atom_numbers, 3]) - self.step_limit = self.sys.step_limit - - @staticmethod - def read_mode(controller): - """read_mode""" - if "mode" in controller.command_set: - if controller.command_set["mode"] in ["NVT", "nvt", "1"]: - print(" Mode set to NVT\n") - mode = 1 - elif controller.command_set["mode"] in ["NPT", "npt", "2"]: - print(" Mode set to NPT\n") - mode = 2 - elif controller.command_set["mode"] in ["Minimization", "minimization", "-1"]: - print(" Mode set to Energy Minimization\n") - mode = -1 - elif controller.command_set["mode"] in ["NVE", "nve", "0"]: - print(" Mode set to NVE\n") - mode = 0 - else: - print( - " Warning: Mode {} is not match. Set to NVE as default\n".format(controller.command_set["mode"])) - mode = 0 - else: - print(" Mode set to NVE as default\n") - mode = 0 - return mode - - def read_coordinate_in_file(self, path): - '''read coordinates file''' - file = open(path, 'r') - print(" Start reading coordinate_in_file:\n") - context = file.readlines() - atom_numbers = int(context[0].strip()) - if self.atom_numbers != 0: - if self.atom_numbers is not atom_numbers: - print(" Error: atom_numbers is not equal: ", atom_numbers, self.atom_numbers) - exit(1) - else: - self.atom_numbers = atom_numbers - print(" atom_numbers is ", self.atom_numbers) - - for idx in range(self.atom_numbers): - coord = list(map(float, context[idx + 1].strip().split())) - self.coordinate.append(coord) - - self.box_length = list(map(float, context[-1].strip().split()))[:3] - print(" box_length is: x: {}, y: {}, z: {}".format( - self.box_length[0], self.box_length[1], self.box_length[2])) - self.crd = self.coordinate - file.close() - - def read_velocity_in_file(self, path): - '''read velocity file''' - file = open(path, 'r') - print(" Start reading velocity_in_file:\n") - context = file.readlines() - for idx, val in enumerate(context): - if idx == 0: - atom_numbers = int(val.strip()) - if self.atom_numbers > 0 and atom_numbers != self.atom_numbers: - print(" Error: atom_numbers is not equal: %d %d\n", idx, self.atom_numbers) - exit(1) - else: - self.atom_numbers = atom_numbers - else: - vel = list(map(float, val.strip().split())) - self.velocity.append(vel) - self.vel = self.velocity - file.close() - - def read_coordinate_and_velocity(self, controller): - """read_coordinate_and_velocity""" - if "coordinate_in_file" in controller.command_set: - self.read_coordinate_in_file(controller.command_set["coordinate_in_file"]) - if "velocity_in_file" in controller.command_set: - self.read_velocity_in_file(controller.command_set["velocity_in_file"]) - else: - print(" Velocity is set to zero as default\n") - self.velocity = [0] * 3 * self.atom_numbers - - def read_mass(self, controller): - """read_mass""" - print(" Start reading mass:") - if "mass_in_file" in controller.command_set: - path = controller.command_set["mass_in_file"] - file = open(path, 'r') - self.total_mass = 0 - context = file.readlines() - for idx, val in enumerate(context): - if idx == 0: - atom_numbers = int(val.strip()) - if self.atom_numbers > 0 and (atom_numbers != self.atom_numbers): - print(" Error: atom_numbers is not equal: ", atom_numbers, self.atom_numbers) - exit(1) - else: - self.atom_numbers = atom_numbers - else: - mass = float(val.strip()) - self.h_mass.append(mass) - self.total_mass += mass - if mass == 0: - self.h_mass_inverse.append(0.0) - else: - self.h_mass_inverse.append(1 / mass) - file.close() - else: - print(" mass is set to 20 as default") - self.total_mass = 20 * self.atom_numbers - self.h_mass = [20] * self.atom_numbers - self.h_mass_inverse = [1 / 20] * self.atom_numbers - - print(" End reading mass") - - def read_charge(self, controller): - """read_charge""" - if "charge_in_file" in controller.command_set: - print(" Start reading charge:") - path = controller.command_set["charge_in_file"] - file = open(path, 'r') - context = file.readlines() - for idx, val in enumerate(context): - if idx == 0: - atom_numbers = int(val.strip()) - if self.atom_numbers > 0 and (atom_numbers != self.atom_numbers): - print(" Error: atom_numbers is not equal: %d %d\n", idx, self.atom_numbers) - exit(1) - else: - self.atom_numbers = atom_numbers - else: - self.h_charge.append(float(val.strip())) - file.close() - else: - self.h_charge = [0.0] * self.atom_numbers - print(" End reading charge") - - def read_basic_system_information_from_amber_file(self, path): - '''read amber file''' - file = open(path, 'r') - context = file.readlines() - for idx, val in enumerate(context): - if idx < len(context) - 1: - if "%FLAG POINTERS" in val + context[idx + 1] and "%FORMAT(10I8)" in val + context[idx + 1]: - start_idx = idx + 2 - value = list(map(int, context[start_idx].strip().split())) - self.atom_numbers = value[0] - count = len(value) - 1 - while count < 10: - start_idx += 1 - value = list(map(int, context[start_idx].strip().split())) - count += len(value) - self.residue_numbers = list(map(int, context[start_idx].strip().split()))[ - 10 - (count - 10)] # may exist bug - break - - if self.residue_numbers != 0 and self.atom_numbers != 0: - for idx, val in enumerate(context): - if "%FLAG RESIDUE_POINTER" in val: - count = 0 - start_idx = idx - while count != self.residue_numbers: - start_idx += 1 - if "%FORMAT" in context[start_idx]: - continue - else: - value = list(map(int, context[start_idx].strip().split())) - self.lin_serial.extend(value) - count += len(value) - for i in range(self.residue_numbers - 1): - self.h_res_start.append(self.lin_serial[i] - 1) - self.h_res_end.append(self.lin_serial[i + 1] - 1) - self.h_res_start.append(self.lin_serial[-1] - 1) - self.h_res_end.append(self.atom_numbers + 1 - 1) - break - self.processor(context) - - def processor(self, context): - '''processor''' - for idx, val in enumerate(context): - if "%FLAG MASS" in val: - count = 0 - start_idx = idx - while count != self.atom_numbers: - start_idx += 1 - if "%FORMAT" in context[start_idx]: - continue - else: - value = list(map(float, context[start_idx].strip().split())) - self.h_mass.extend(value) - count += len(value) - for i in range(self.atom_numbers): - if self.h_mass[i] == 0: - self.h_mass_inverse.append(0.0) - else: - self.h_mass_inverse.append(1.0 / self.h_mass[i]) - self.density += self.h_mass[i] - break - for idx, val in enumerate(context): - if "%FLAG CHARGE" in val: - count = 0 - start_idx = idx - while count != self.atom_numbers: - start_idx += 1 - if "%FORMAT" in context[start_idx]: - continue - else: - value = list(map(float, context[start_idx].strip().split())) - self.h_charge.extend(value) - count += len(value) - break - - def read_basic_system_information_from_rst7(self, path): - '''read rst7 file''' - file = open(path, 'r') - context = file.readlines() - file.close() - x = context[1].strip().split() - irest = 1 if len(x) > 1 else 0 - atom_numbers = int(context[1].strip().split()[0]) - if atom_numbers != self.atom_numbers: - print("ERROR") - else: - print("check atom_numbers") - information = [] - count = 0 - start_idx = 1 - if irest == 1: - self.simulation_start_time = float(x[1]) - while count <= 6 * self.atom_numbers + 3: - start_idx += 1 - value = list(map(float, context[start_idx].strip().split())) - information.extend(value) - count += len(value) - self.coordinate = information[: 3 * self.atom_numbers] - self.velocity = information[3 * self.atom_numbers: 6 * self.atom_numbers] - self.box_length = information[6 * self.atom_numbers:6 * self.atom_numbers + 3] - else: - while count <= 3 * self.atom_numbers + 3: - start_idx += 1 - value = list(map(float, context[start_idx].strip().split())) - information.extend(value) - count += len(value) - self.coordinate = information[: 3 * self.atom_numbers] - self.velocity = [0.0] * (3 * self.atom_numbers) - self.box_length = information[3 * self.atom_numbers:3 * self.atom_numbers + 3] - self.coordinate = np.array(self.coordinate).reshape([-1, 3]) - self.velocity = np.array(self.velocity).reshape([-1, 3]) - print("system size is ", self.box_length[0], self.box_length[1], self.box_length[2]) diff --git a/tests/st/sponge/sponge_cuda/nb14.py b/tests/st/sponge/sponge_cuda/nb14.py deleted file mode 100644 index a538765ac07..00000000000 --- a/tests/st/sponge/sponge_cuda/nb14.py +++ /dev/null @@ -1,176 +0,0 @@ -# Copyright 2021 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ -'''NON BOND''' - - -class NonBond14: - '''NON BOND''' - - def __init__(self, controller, dihedral, atom_numbers): - self.module_name = "nb14" - self.atom_numbers = atom_numbers - self.h_atom_a = [] - self.h_atom_b = [] - self.h_lj_scale_factor = [] - self.h_cf_scale_factor = [] - self.nb14_numbers = 0 - self.is_initialized = 0 - if controller.amber_parm is not None: - self.dihedral_with_hydrogen = dihedral.dihedral_with_hydrogen - self.dihedral_numbers = dihedral.dihedral_numbers - self.dihedral_type_numbers = dihedral.dihedral_type_numbers - file_path = controller.amber_parm - self.read_information_from_amberfile(file_path) - self.h_atom_a = self.h_atom_a[:self.nb14_numbers] - self.h_atom_b = self.h_atom_b[:self.nb14_numbers] - self.h_lj_scale_factor = self.h_lj_scale_factor[:self.nb14_numbers] - self.h_cf_scale_factor = self.h_cf_scale_factor[:self.nb14_numbers] - self.is_initialized = 1 - else: - self.read_in_file(controller) - - def read_in_file(self, controller): - """read_in_file""" - name = self.module_name + "_in_file" - if name in controller.command_set: - path = controller.command_set[name] - file = open(path, 'r') - context = file.readlines() - self.nb14_numbers = int(context[0].strip()) - print(" non-bond 14 numbers is", self.nb14_numbers) - for i in range(self.nb14_numbers): - val = list(map(float, context[i + 1].strip().split())) - self.h_atom_a.append(int(val[0])) - self.h_atom_b.append(int(val[1])) - self.h_lj_scale_factor.append(val[2]) - self.h_cf_scale_factor.append(val[3]) - self.is_initialized = 1 - file.close() - - def read_information_from_amberfile(self, file_path): - '''read amber file''' - file = open(file_path, 'r') - context = file.readlines() - file.close() - - self.cf_scale_type = [0] * self.dihedral_type_numbers - self.lj_scale_type = [0] * self.dihedral_type_numbers - - for idx, val in enumerate(context): - if "%FLAG SCEE_SCALE_FACTOR" in val: - count = 0 - start_idx = idx - information = [] - while count < self.dihedral_type_numbers: - start_idx += 1 - if "%FORMAT" in context[start_idx]: - continue - else: - value = list(map(float, context[start_idx].strip().split())) - information.extend(value) - count += len(value) - self.cf_scale_type = information[:self.dihedral_type_numbers] - break - - for idx, val in enumerate(context): - if "%FLAG SCNB_SCALE_FACTOR" in val: - count = 0 - start_idx = idx - information = [] - while count < self.dihedral_type_numbers: - start_idx += 1 - if "%FORMAT" in context[start_idx]: - continue - else: - value = list(map(float, context[start_idx].strip().split())) - information.extend(value) - count += len(value) - self.lj_scale_type = information[:self.dihedral_type_numbers] - break - self.processor(context) - - def processor(self, context): - '''processor''' - self.h_atom_a = [0] * self.dihedral_numbers - self.h_atom_b = [0] * self.dihedral_numbers - self.h_lj_scale_factor = [0] * self.dihedral_numbers - self.h_cf_scale_factor = [0] * self.dihedral_numbers - nb14_numbers = 0 - for idx, val in enumerate(context): - if "%FLAG DIHEDRALS_INC_HYDROGEN" in val: - count = 0 - start_idx = idx - information = [] - while count < 5 * self.dihedral_with_hydrogen: - start_idx += 1 - if "%FORMAT" in context[start_idx]: - continue - else: - value = list(map(int, context[start_idx].strip().split())) - information.extend(value) - count += len(value) - for i in range(self.dihedral_with_hydrogen): - tempa = information[i * 5 + 0] - tempi = information[i * 5 + 1] - tempi2 = information[i * 5 + 2] - tempb = information[i * 5 + 3] - tempi = information[i * 5 + 4] - - tempi -= 1 - if tempi2 > 0: - self.h_atom_a[nb14_numbers] = tempa / 3 - self.h_atom_b[nb14_numbers] = abs(tempb / 3) - self.h_lj_scale_factor[nb14_numbers] = self.lj_scale_type[tempi] - if self.h_lj_scale_factor[nb14_numbers] != 0: - self.h_lj_scale_factor[nb14_numbers] = 1.0 / self.h_lj_scale_factor[nb14_numbers] - self.h_cf_scale_factor[nb14_numbers] = self.cf_scale_type[tempi] - if self.h_cf_scale_factor[nb14_numbers] != 0: - self.h_cf_scale_factor[nb14_numbers] = 1.0 / self.h_cf_scale_factor[nb14_numbers] - nb14_numbers += 1 - break - for idx, val in enumerate(context): - if "%FLAG DIHEDRALS_WITHOUT_HYDROGEN" in val: - count = 0 - start_idx = idx - information = [] - while count < 5 * (self.dihedral_numbers - self.dihedral_with_hydrogen): - start_idx += 1 - if "%FORMAT" in context[start_idx]: - continue - else: - value = list(map(int, context[start_idx].strip().split())) - information.extend(value) - count += len(value) - for i in range(self.dihedral_with_hydrogen, self.dihedral_numbers): - tempa = information[(i - self.dihedral_with_hydrogen) * 5 + 0] - tempi = information[(i - self.dihedral_with_hydrogen) * 5 + 1] - tempi2 = information[(i - self.dihedral_with_hydrogen) * 5 + 2] - tempb = information[(i - self.dihedral_with_hydrogen) * 5 + 3] - tempi = information[(i - self.dihedral_with_hydrogen) * 5 + 4] - - tempi -= 1 - if tempi2 > 0: - self.h_atom_a[nb14_numbers] = tempa / 3 - self.h_atom_b[nb14_numbers] = abs(tempb / 3) - self.h_lj_scale_factor[nb14_numbers] = self.lj_scale_type[tempi] - if self.h_lj_scale_factor[nb14_numbers] != 0: - self.h_lj_scale_factor[nb14_numbers] = 1.0 / self.h_lj_scale_factor[nb14_numbers] - self.h_cf_scale_factor[nb14_numbers] = self.cf_scale_type[tempi] - if self.h_cf_scale_factor[nb14_numbers] != 0: - self.h_cf_scale_factor[nb14_numbers] = 1.0 / self.h_cf_scale_factor[nb14_numbers] - nb14_numbers += 1 - break - - self.nb14_numbers = nb14_numbers diff --git a/tests/st/sponge/sponge_cuda/neighbor_list.py b/tests/st/sponge/sponge_cuda/neighbor_list.py deleted file mode 100644 index 9a0f3312e05..00000000000 --- a/tests/st/sponge/sponge_cuda/neighbor_list.py +++ /dev/null @@ -1,182 +0,0 @@ -# Copyright 2021 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ -'''Neighbor List''' - - -class NeighborList: - '''Neighbor List''' - - def __init__(self, controller, atom_numbers, box_length): - self.constant_unit_max_float = 4294967296.0 - print("START INITIALIZING NEIGHBOR LIST:") - self.module_name = "NeighborList" - self.refresh_interval = 20 if "refresh_interval" not in controller.command_set else int( - controller.command_set["refresh_interval"]) - self.max_atom_in_grid_numbers = 64 if "max_atom_in_grid_numbers" not in controller.command_set else int( - controller.command_set["max_atom_in_grid_numbers"]) - self.max_neighbor_numbers = 800 if "max_neighbor_numbers" not in controller.command_set else int( - controller.command_set["max_neighbor_numbers"]) - - self.skin = 2.0 if "skin" not in controller.command_set else float(controller.command_set["skin"]) - self.cutoff = 10.0 if "cutoff" not in controller.command_set else float(controller.command_set["cutoff"]) - self.cutoff_square = self.cutoff * self.cutoff - self.cutoff_with_skin = self.cutoff + self.skin - self.half_cutoff_with_skin = 0.5 * self.cutoff_with_skin - self.cutoff_with_skin_square = self.cutoff_with_skin * self.cutoff_with_skin - self.half_skin_square = 0.25 * self.skin * self.skin - self.atom_numbers = atom_numbers - self.box_length = box_length - self.update_volume() - - self.initial_neighbor_grid() - self.not_first_time = 0 - self.is_initialized = 1 - self.refresh_count = [0] - - if controller.amber_parm is not None: - file_path = controller.amber_parm - self.read_information_from_amberfile(file_path) - - def read_information_from_amberfile(self, file_path): - '''read amber file''' - file = open(file_path, 'r') - context = file.readlines() - file.close() - self.excluded_list_start = [0] * self.atom_numbers - self.excluded_numbers = [0] * self.atom_numbers - - for idx, val in enumerate(context): - if idx < len(context) - 1: - if "%FLAG POINTERS" in val + context[idx + 1] and "%FORMAT(10I8)" in val + context[idx + 1]: - start_idx = idx + 2 - count = 0 - value = list(map(int, context[start_idx].strip().split())) - information = [] - information.extend(value) - while count < 11: - start_idx += 1 - value = list(map(int, context[start_idx].strip().split())) - information.extend(value) - count += len(value) - self.excluded_atom_numbers = information[10] - print("excluded atom numbers ", self.excluded_atom_numbers) - break - for idx, val in enumerate(context): - if "%FLAG NUMBER_EXCLUDED_ATOMS" in val: - count = 0 - start_idx = idx - information = [] - while count < self.atom_numbers: - start_idx += 1 - if "%FORMAT" in context[start_idx]: - continue - else: - value = list(map(int, context[start_idx].strip().split())) - information.extend(value) - count += len(value) - count = 0 - for i in range(self.atom_numbers): - self.excluded_numbers[i] = information[i] - self.excluded_list_start[i] = count - count += information[i] - break - - total_count = sum(self.excluded_numbers) - self.excluded_list = [] - for idx, val in enumerate(context): - if "%FLAG EXCLUDED_ATOMS_LIST" in val: - count = 0 - start_idx = idx - information = [] - while count < total_count: - start_idx += 1 - if "%FORMAT" in context[start_idx]: - continue - else: - value = list(map(int, context[start_idx].strip().split())) - information.extend(value) - count += len(value) - - count = 0 - for i in range(self.atom_numbers): - tmp_list = [] - if self.excluded_numbers[i] == 1: - tmp_list.append(information[count] - 1) - if information[count] == 0: - self.excluded_numbers[i] = 0 - count += 1 - else: - for _ in range(self.excluded_numbers[i]): - tmp_list.append(information[count] - 1) - - count += 1 - tmp_list = sorted(tmp_list) - self.excluded_list.extend(tmp_list) - break - - def initial_neighbor_grid(self): - '''init neighbor grid''' - half_cutoff = self.half_cutoff_with_skin - self.nx = int(self.box_length[0] / half_cutoff) - self.ny = int(self.box_length[1] / half_cutoff) - self.nz = int(self.box_length[2] / half_cutoff) - self.grid_n = [self.nx, self.ny, self.nz] - self.grid_length = [self.box_length[0] / self.nx, - self.box_length[1] / self.ny, - self.box_length[2] / self.nz] - self.grid_length_inverse = [1.0 / self.grid_length[0], 1.0 / self.grid_length[1], 1.0 / self.grid_length[2]] - - self.nxy = self.nx * self.ny - self.grid_numbers = self.nz * self.nxy - self.atom_numbers_in_grid_bucket = [0] * self.grid_numbers - self.bucket = [-1] * (self.grid_numbers * self.max_atom_in_grid_numbers) - - self.pointer = [] - temp_grid_serial = [0] * 125 - for i in range(self.grid_numbers): - nz = int(i / self.nxy) - ny = int((i - self.nxy * nz) / self.nx) - nx = i - self.nxy * nz - self.nx * ny - count = 0 - for l in range(-2, 3): - for m in range(-2, 3): - for n in range(-2, 3): - xx = nx + l - if xx < 0: - xx = xx + self.nx - elif xx >= self.nx: - xx = xx - self.nx - yy = ny + m - if yy < 0: - yy = yy + self.ny - elif yy >= self.ny: - yy = yy - self.ny - zz = nz + n - if zz < 0: - zz = zz + self.nz - elif zz >= self.nz: - zz = zz - self.nz - temp_grid_serial[count] = zz * self.nxy + yy * self.nx + xx - count += 1 - temp_grid_serial = sorted(temp_grid_serial) - self.pointer.extend(temp_grid_serial) - - def update_volume(self): - self.quarter_crd_to_uint_crd_cof = [0.25 * self.constant_unit_max_float / self.box_length[0], - 0.25 * self.constant_unit_max_float / self.box_length[1], - 0.25 * self.constant_unit_max_float / self.box_length[2]] - self.uint_dr_to_dr_cof = [1.0 / self.constant_unit_max_float * self.box_length[0], - 1.0 / self.constant_unit_max_float * self.box_length[1], - 1.0 / self.constant_unit_max_float * self.box_length[2]] diff --git a/tests/st/sponge/sponge_cuda/particle_mesh_ewald.py b/tests/st/sponge/sponge_cuda/particle_mesh_ewald.py deleted file mode 100644 index 2b29db16455..00000000000 --- a/tests/st/sponge/sponge_cuda/particle_mesh_ewald.py +++ /dev/null @@ -1,128 +0,0 @@ -# Copyright 2021 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ -'''PME''' -import math - - -class ParticleMeshEwald(): - '''PME''' - def __init__(self, controller, md_info): - self.module_name = "PME" - self.constant_pi = 3.1415926535897932 - self.cutoff = 10.0 if "cutoff" not in controller.command_set else float(controller.command_set["cutoff"]) - self.tolerance = 0.00001 if "Direct_Tolerance" not in controller.command_set else float( - controller.command_set["Direct_Tolerance"]) - self.fftx = -1 if "fftx" not in controller.command_set else int(controller.command_set["fftx"]) - self.ffty = -1 if "ffty" not in controller.command_set else int(controller.command_set["ffty"]) - self.fftz = -1 if "fftz" not in controller.command_set else int(controller.command_set["fftz"]) - self.atom_numbers = md_info.atom_numbers - self.box_length = md_info.box_length - - self.volume = self.box_length[0] * self.box_length[1] * self.box_length[2] - - if self.fftx < 0: - self.fftx = self.get_fft_patameter(self.box_length[0]) - if self.ffty < 0: - self.ffty = self.get_fft_patameter(self.box_length[1]) - if self.fftz < 0: - self.fftz = self.get_fft_patameter(self.box_length[2]) - print(" fftx: ", self.fftx) - print(" ffty: ", self.ffty) - print(" fftz: ", self.fftz) - print("pme cutoff", self.cutoff) - print("pme tolerance", self.tolerance) - self.pme_nall = self.fftx * self.ffty * self.fftz - self.pme_nin = self.ffty * self.fftz - self.pme_nfft = self.fftx * self.ffty * (int(self.fftz / 2) + 1) - self.pme_inverse_box_vector = [self.fftx / self.box_length[0], - self.ffty / self.box_length[1], - self.fftz / self.box_length[2]] - - self.beta = self.get_beta(self.cutoff, self.tolerance) - self.neutralizing_factor = -0.5 * self.constant_pi / (self.beta * self.beta * self.volume) - self.is_initialized = 1 - - @staticmethod - def check_2357_factor(number): - '''CHECK FACTOR''' - while number > 0: - if number == 1: - return 1 - tempn = number // 2 - if tempn * 2 != number: - break - number = tempn - while number > 0: - if number == 1: - return 1 - tempn = number / 3 - if tempn * 3 != number: - break - number = tempn - while number > 0: - if number == 1: - return 1 - tempn = number // 5 - if tempn * 5 != number: - break - number = tempn - while number > 0: - if number == 1: - return 1 - tempn = number // 7 - if tempn * 7 != number: - break - number = tempn - return 0 - - @staticmethod - def get_beta(cutoff, tolerance): - '''GET BETA''' - high = 1.0 - ihigh = 1 - while 1: - tempf = math.erfc(high * cutoff) / cutoff - if tempf <= tolerance: - break - high *= 2 - ihigh += 1 - ihigh += 50 - low = 0.0 - for _ in range(1, ihigh): - beta = (low + high) / 2 - tempf = math.erfc(beta * cutoff) / cutoff - if tempf >= tolerance: - low = beta - else: - high = beta - return beta - - def get_fft_patameter(self, length): - '''GET FFT PARAMETER''' - tempi = math.ceil(length + 3) >> 2 << 2 - if 60 <= tempi <= 68: - tempi = 64 - elif 120 <= tempi <= 136: - tempi = 128 - elif 240 <= tempi <= 272: - tempi = 256 - elif 480 <= tempi <= 544: - tempi = 512 - elif 960 <= tempi <= 1088: - tempi = 1024 - while 1: - if self.check_2357_factor(tempi): - return tempi - tempi += 4 diff --git a/tests/st/sponge/sponge_cuda/restrain.py b/tests/st/sponge/sponge_cuda/restrain.py deleted file mode 100644 index dd500e2ee7a..00000000000 --- a/tests/st/sponge/sponge_cuda/restrain.py +++ /dev/null @@ -1,66 +0,0 @@ -# Copyright 2021 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ -'''restrain_information''' - - -class RestrainInformation: - '''restrain_information''' - - def __init__(self, controller, atom_numbers, crd): - self.module_name = "restrain" - self.atom_numbers = atom_numbers - self.crd = crd - self.controller = controller - self.weight = 100.0 if "weight" not in controller.command_set else float( - controller.command_set["weight"]) - print(" %s_weight is %.0f\n" % (self.module_name, self.weight)) - self.is_initialized = 0 - name = self.module_name + "_in_file" - if name in controller.command_set: - print("START INITIALIZING RESTRAIN\n") - path = controller.command_set[name] - self.read_in_file(path) - self.read_crd_ref() - self.is_initialized = 1 - print("END INITIALIZING RESTRAIN\n") - - def read_in_file(self, path): - """read_in_file""" - file = open(path, 'r') - context = file.readlines() - self.restrain_numbers = 0 - h_lists = [] - for _, val in enumerate(context): - h_lists.append(float(val.strip())) - self.restrain_numbers += 1 - print(" restrain_numbers is %d\n", self.restrain_numbers) - file.close() - self.restrain_ene = [0] * self.restrain_numbers - self.sum_of_restrain_ene = [0] - - def read_crd_ref(self): - """read_crd_ref""" - self.crd_ref = [] - if "coordinate" not in self.controller.command_set: - print(" restrain reference coordinate copy from input coordinate") - self.crd_ref = self.crd - else: - print(" reading restrain reference coordinate file") - file = open(self.controller.command_set["coordinate"], 'r') - context = file.readlines() - atom_numbers = int(context[0].strip()) - print(" atom_numbers is %d", atom_numbers) - for i in range(atom_numbers): - self.crd_ref.append(list(map(float, context[i + 1].strip().split()))) diff --git a/tests/st/sponge/sponge_cuda/simple_constrain.py b/tests/st/sponge/sponge_cuda/simple_constrain.py deleted file mode 100644 index dfe00e9515a..00000000000 --- a/tests/st/sponge/sponge_cuda/simple_constrain.py +++ /dev/null @@ -1,206 +0,0 @@ -# Copyright 2021 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ -"""SimpleConstarin""" - - -class BondInformation: - def __init__(self): - self.bond_numbers = 0 - self.atom_a = [] - self.atom_b = [] - self.bond_r = [] - - -class Information: - """Information""" - - def __init__(self): - self.atom_numbers = 0 - self.dt = 0.001 - self.uint_dr_to_dr_cof = [] - self.quarter_crd_to_uint_crd_cof = [] - self.volume = 0.0 - self.exp_gamma = 1.0 - self.step_length = 1.0 - self.iteration_numbers = 25 - self.constrain_mass = 3 - - -class SimpleConstarin: - """SimpleConstrain""" - - def __init__(self, controller, md_info, bond, angle, liujian_info): - self.module_name = "simple_constrain" - self.constant_unit_max_float = 4294967296.0 - self.controller = controller - self.md_info = md_info - self.bond = bond - self.angle = angle - self.liujian_info = liujian_info - self.bond_info = BondInformation() - self.info = Information() - - self.atom_numbers = md_info.atom_numbers - self.bond_numbers = bond.bond_numbers - self.angle_numbers = angle.angle_numbers - - self.is_initialized = 0 - self.constrain_frc = [] - self.pair_virial = [] - self.virial = [] - self.test_uint_crd = [] - self.last_pair_dr = [] - self.dt_inverse = 0 - self.half_exp_gamma_plus_half = 0 - self.bond_constrain_pair_numbers = 0 - self.angle_constrain_pair_numbers = 0 - # in the real calculation, use constrain pair - self.constrain_pair_numbers = 1 - self.system_freedom = 0 - self.h_bond_pair = [] - self.h_constrain_pair = [] - if "constrain_mode" in self.controller.command_set and \ - self.controller.command_set["constrain_mode"] == "simple_constrain": - print("START INITIALIZING SIMPLE CONSTRAIN:") - self.constrain_pair_numbers = 0 - self.add_hbond_to_constrain_pair(self.bond_numbers, bond.h_atom_a, bond.h_atom_b, bond.h_r0, md_info.h_mass) - self.add_hangle_to_constrain_pair() - if liujian_info.is_initialized: - self.initial_simple_constrain(md_info.atom_numbers, md_info.dt, md_info.sys.box_length, - liujian_info.exp_gamma, 0, md_info.h_mass, md_info.sys.freedom) - else: - self.initial_simple_constrain(md_info.atom_numbers, md_info.dt, md_info.sys.box_length, - 1.0, md_info.mode == -1, md_info.h_mass, md_info.sys.freedom) - self.is_initialized = 1 - print("END INITIALIZING SIMPLE CONSTRAIN\n") - - def add_hbond_to_constrain_pair(self, bond_numbers, atom_a, atom_b, bond_r, atom_mass): - """ add hbond to constrain pair - :param bond_numbers: - :param atom_a: - :param atom_b: - :param bond_r: - :param atom_mass: - :return: update bond info - """ - - self.info.constrain_mass = 3.0 - name = self.module_name + "_in_file" - if name in self.controller.command_set: - self.info.constrain_mass = 0 - if "mass" in self.controller.command_set: - self.info.constrain_mass = float(self.controller.command_set["mass"]) - self.h_bond_pair = [] - s = 0 - for i in range(bond_numbers): - mass_a = atom_mass[atom_a[i]] - mass_b = atom_mass[atom_b[i]] - if (float(mass_a) < self.info.constrain_mass and mass_a > 0) or \ - (float(mass_b) < self.info.constrain_mass and mass_b > 0): - constrain_k = \ - atom_mass[atom_a[i]] * atom_mass[atom_b[i]] / (atom_mass[atom_a[i]] + atom_mass[atom_b[i]]) - self.h_bond_pair.append([atom_a[i], atom_b[i], bond_r[i], constrain_k]) - s += 1 - self.bond_constrain_pair_numbers = s - self.bond_info.bond_numbers = bond_numbers - self.bond_info.atom_a = atom_a - self.bond_info.atom_b = atom_b - self.bond_info.bond_r = bond_r - - def add_hangle_to_constrain_pair(self): - """add hangle to constrain_pair""" - - self.h_angle_pair = [] - self.angle_constrain_pair_numbers = 0 - - def initial_simple_constrain(self, atom_numbers, dt, box_length, exp_gamma, is_minimization, atom_mass, - system_freedom): - """initial simple constrain - :param atom_numbers: - :param dt: - :param box_length: - :param exp_gamma: - :param is_minimization: - :param atom_mass: - :param system_freedom: - :return: - """ - - self.system_freedom = system_freedom - self.atom_mass = atom_mass - self.info.atom_numbers = atom_numbers - self.info.dt = dt - self.info.quarter_crd_to_uint_crd_cof = [0.25 * self.constant_unit_max_float / box_length[0], - 0.25 * self.constant_unit_max_float / box_length[1], - 0.25 * self.constant_unit_max_float / box_length[2]] - self.info.uint_dr_to_dr_cof = [1 / self.constant_unit_max_float * box_length[0], - 1 / self.constant_unit_max_float * box_length[1], - 1 / self.constant_unit_max_float * box_length[2]] - self.info.volume = box_length[0] * box_length[1] * box_length[2] - self.info.exp_gamma = exp_gamma - self.half_exp_gamma_plus_half = 0.5 * (1. + self.info.exp_gamma) - if is_minimization: - self.info.exp_gamma = 0.0 - self.info.iteration_numbers = 25 if "iteration_numbers" not in self.controller.command_set else int( - self.controller.command_set["iteration_numbers"]) - print(" constrain iteration step is ", self.info.iteration_numbers) - self.info.step_length = 1 if "step_length" not in self.controller.command_set else float( - self.controller.command_set["step_length"]) - print(" constrain step_length is ", self.info.step_length) - self.extra_numbers = 0 - name = self.module_name + "_in_file" - if name in self.controller.command_set: - path = self.controller.command_set[name] - with open(path, 'r') as pf: - context = pf.readlines() - self.extra_numbers = int(context[0].strip()) - - self.dt_inverse = 1 / self.info.dt - self.constrain_pair_numbers = \ - self.bond_constrain_pair_numbers + self.angle_constrain_pair_numbers + self.extra_numbers - self.system_freedom -= self.constrain_pair_numbers - - print(" constrain pair number is ", self.constrain_pair_numbers) - - for i in range(self.bond_constrain_pair_numbers): - self.h_constrain_pair.append(self.h_bond_pair[i]) - self.h_constrain_pair[i][-1] = self.info.step_length / self.half_exp_gamma_plus_half \ - * self.h_constrain_pair[i][-1] - for i in range(self.angle_constrain_pair_numbers): - self.h_constrain_pair.append(self.h_bond_pair[i]) - self.h_constrain_pair[i + self.bond_constrain_pair_numbers][-1] =\ - self.info.step_length / self.half_exp_gamma_plus_half * \ - self.h_constrain_pair[i + self.bond_constrain_pair_numbers][-1] - if name in self.controller.command_set: - path = self.controller.command_set[name] - self.read_in_file(path) - - def read_in_file(self, path): - """ read in_file - - :param path: - :return: in_file context - """ - - with open(path, 'r') as pf: - context = pf.readlines() - count = self.bond_constrain_pair_numbers + self.angle_constrain_pair_numbers - for i in range(self.extra_numbers): - val = list(map(float, context[i + 1].strip().split())) - atom_i, atom_j, constant_r = int(val[0]), int(val[1]), float(val[2]) - constrain_k = self.info.step_length / self.half_exp_gamma_plus_half * self.atom_mass[atom_i] * \ - self.atom_mass[atom_j] / (self.atom_mass[atom_i] + self.atom_mass[atom_j]) - self.h_constrain_pair.append([atom_i, atom_j, constant_r, constrain_k]) - count += 1 diff --git a/tests/st/sponge/sponge_cuda/system_information.py b/tests/st/sponge/sponge_cuda/system_information.py deleted file mode 100644 index 174e668a57e..00000000000 --- a/tests/st/sponge/sponge_cuda/system_information.py +++ /dev/null @@ -1,291 +0,0 @@ -# Copyright 2021 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ -'''System information''' -import numpy as np - - -class PeriodicBoxConditionInformation: - """PeriodicBoxConditionInformation""" - - def __init__(self, box_length): - constant_unit_max_float = 4294967296.0 - self.crd_to_uint_crd_cof = np.array([constant_unit_max_float / box_length[0], - constant_unit_max_float / box_length[1], - constant_unit_max_float / box_length[2]]) - self.quarter_crd_to_uint_crd_cof = 0.25 * self.crd_to_uint_crd_cof - self.uint_dr_to_dr_cof = 1.0 / self.crd_to_uint_crd_cof - - -class SystemInformation: - """SystemInformation""" - - def __init__(self, controller, md_info): - constant_pres_convertion_inverse = 0.00001439506089041446 - self.md_info = md_info - self.box_length = self.md_info.box_length - self.steps = 0 - self.step_limit = 1000 if "step_limit" not in controller.command_set else int( - controller.command_set["step_limit"]) - self.target_temperature = 300.0 if "target_temperature" not in controller.command_set else float( - controller.command_set["target_temperature"]) - if md_info.mode == 2 and "target_pressure" in controller.command_set: - self.target_pressure = float(controller.command_set["target_pressure"]) - else: - self.target_pressure = 1 - self.target_pressure *= constant_pres_convertion_inverse - self.d_virial = 0 - self.d_pressure = 0 - self.d_temperature = 0 - self.d_potential = 0 - self.d_sum_of_atom_ek = 0 - self.freedom = 3 * md_info.atom_numbers - - -class NonBondInformation: - """NonBondInformation""" - - def __init__(self, controller, md_info): - self.md_info = md_info - self.skin = 2.0 if "skin" not in controller.command_set else float(controller.command_set["skin"]) - print(" skin set to %.2f Angstram" % (self.skin)) - self.cutoff = 10.0 if "cutoff" not in controller.command_set else float(controller.command_set["cutoff"]) - self.atom_numbers = self.md_info.atom_numbers - self.excluded_atom_numbers = 0 - self.h_excluded_list_start = [] - self.h_excluded_numbers = [] - self.h_excluded_list = [] - if controller.amber_parm is not None: - file_path = controller.amber_parm - self.read_information_from_amberfile(file_path) - else: - self.read_exclude_file(controller) - - def read_exclude_file(self, controller): - """read_exclude_file""" - if "exclude_in_file" in controller.command_set: - print(" Start reading excluded list:") - path = controller.command_set["exclude_in_file"] - file = open(path, 'r') - context = file.readlines() - atom_numbers, self.excluded_atom_numbers = list(map(int, context[0].strip().split())) - if self.md_info.atom_numbers > 0 and (atom_numbers != self.md_info.atom_numbers): - print(" Error: atom_numbers is not equal: ", atom_numbers, self.md_info.atom_numbers) - exit(1) - else: - self.md_info.atom_numbers = atom_numbers - count = 0 - for idx, val in enumerate(context): - if idx > 0: - el = list(map(int, val.strip().split())) - if el[0] == 1 and -1 in el: - self.h_excluded_numbers.append(0) - else: - self.h_excluded_numbers.append(el[0]) - self.h_excluded_list_start.append(count) - if el: - self.h_excluded_list.extend(el[1:]) - count += el[0] - print(" End reading excluded list") - file.close() - else: - print(" Set all atom exclude no atoms as default") - count = 0 - for i in range(self.md_info.atom_numbers): - self.h_excluded_numbers[i] = 0 - self.h_excluded_list_start[i] = count - for _ in range(self.h_excluded_numbers[i]): - self.h_excluded_list[count] = 0 - count += 1 - print(" End reading charge") - - def read_information_from_amberfile(self, file_path): - '''read amber file''' - file = open(file_path, 'r') - context = file.readlines() - file.close() - self.h_excluded_list_start = [0] * self.atom_numbers - self.h_excluded_numbers = [0] * self.atom_numbers - - for idx, val in enumerate(context): - if idx < len(context) - 1: - if "%FLAG POINTERS" in val + context[idx + 1] and "%FORMAT(10I8)" in val + context[idx + 1]: - start_idx = idx + 2 - count = 0 - value = list(map(int, context[start_idx].strip().split())) - information = [] - information.extend(value) - while count < 11: - start_idx += 1 - value = list(map(int, context[start_idx].strip().split())) - information.extend(value) - count += len(value) - self.excluded_atom_numbers = information[10] - print("excluded atom numbers ", self.excluded_atom_numbers) - break - for idx, val in enumerate(context): - if "%FLAG NUMBER_EXCLUDED_ATOMS" in val: - count = 0 - start_idx = idx - information = [] - while count < self.atom_numbers: - start_idx += 1 - if "%FORMAT" in context[start_idx]: - continue - else: - value = list(map(int, context[start_idx].strip().split())) - information.extend(value) - count += len(value) - count = 0 - for i in range(self.atom_numbers): - self.h_excluded_numbers[i] = information[i] - self.h_excluded_list_start[i] = count - count += information[i] - break - - total_count = sum(self.h_excluded_numbers) - self.h_excluded_list = [] - for idx, val in enumerate(context): - if "%FLAG EXCLUDED_ATOMS_LIST" in val: - count = 0 - start_idx = idx - information = [] - while count < total_count: - start_idx += 1 - if "%FORMAT" in context[start_idx]: - continue - else: - value = list(map(int, context[start_idx].strip().split())) - information.extend(value) - count += len(value) - - count = 0 - for i in range(self.atom_numbers): - tmp_list = [] - if self.h_excluded_numbers[i] == 1: - tmp_list.append(information[count] - 1) - if information[count] == 0: - self.h_excluded_numbers[i] = 0 - count += 1 - else: - for _ in range(self.h_excluded_numbers[i]): - tmp_list.append(information[count] - 1) - - count += 1 - tmp_list = sorted(tmp_list) - self.h_excluded_list.extend(tmp_list) - break - - -class NveIteration: - """NveIteration""" - - def __init__(self, controller): - self.max_velocity = -1 if "nve_velocity_max" not in controller.command_set else float( - controller.command_set["nve_velocity_max"]) - - -class ResidueInformation: - """ResidueInformation""" - - def __init__(self, controller, md_info): - self.md_info = md_info - self.residue_numbers = 0 - self.h_mass = [] - self.h_mass_inverse = [] - self.h_res_start = [] - self.h_res_end = [] - self.momentum = [] - self.center_of_mass = [] - self.sigma_of_res_ek = 0 - self.res_ek_energy = 0 - self.sigma_of_res_ek = 0 - self.is_initialized = 0 - print(" Start reading residue list:") - if "residue_in_file" in controller.command_set: - self.read_residule_file(controller) - if controller.amber_parm is not None: - print("amber_parm7 in residue_information") - self.residue_numbers = self.md_info.residue_numbers - self.h_res_start = md_info.h_res_start - self.h_res_end = md_info.h_res_end - self.is_initialized = 1 - self.read_res_mass() - else: - self.residue_numbers = md_info.atom_numbers - self.h_res_start = list(range(self.residue_numbers)) - self.h_res_end = list(range(1, self.residue_numbers + 1)) - self.read_res_mass() - self.is_initialized = 1 - print(" End reading residue list") - - def read_res_mass(self): - """ Read_AMBER_Parm7 """ - if self.md_info.h_mass: - for i in range(self.residue_numbers): - temp_mass = 0 - for j in range(self.h_res_start[i], self.h_res_end[i]): - temp_mass += self.md_info.h_mass[j] - self.h_mass.append(temp_mass) - if temp_mass == 0: - self.h_mass_inverse.append(0) - else: - self.h_mass_inverse.append(1.0 / temp_mass) - else: - print(" Error: atom mass should be initialized before residue mass") - exit(1) - - def read_residule_file(self, controller): - """read_residule_file""" - if "residue_in_file" in controller.command_set: - path = controller.command_set["residue_in_file"] - file = open(path, 'r') - context = file.readlines() - atom_numbers, self.residue_numbers = list(map(int, context[0].strip().split())) - print(" residue_numbers is ", self.residue_numbers) - if self.md_info.atom_numbers > 0 and (atom_numbers != self.md_info.atom_numbers): - print(" Error: atom_numbers is not equal: ", atom_numbers, self.md_info.atom_numbers) - exit(1) - else: - self.md_info.atom_numbers = atom_numbers - print(" residue_numbers is ", self.residue_numbers) - - count = 0 - for idx, val in enumerate(context): - if idx > 0: - self.h_res_start.append(count) - temp = int(val.strip()) - count += temp - self.h_res_end.append(count) - print(" End reading excluded list") - file.close() - self.is_initialized = 1 - if self.is_initialized: - self.read_res_mass() - - -class TrajectoryOutput: - """TrajectoryOutput""" - - def __init__(self, controller): - self.current_crd_synchronized_step = 0 - self.is_molecule_map_output = 0 - if "molecule_map_output" in controller.command_set: - self.is_molecule_map_output = int(controller.command_set["molecule_map_output"]) - self.amber_irest = -1 - self.write_trajectory_interval = 1000 if "write_information_interval" not in controller.command_set else int( - controller.command_set["write_information_interval"]) - self.write_restart_file_interval = self.write_trajectory_interval if "write_restart_file_interval" not in \ - controller.command_set else \ - int(controller.command_set["write_restart_file_interval"]) diff --git a/tests/st/sponge/test_polypeptide.py b/tests/st/sponge/test_polypeptide.py deleted file mode 100644 index 719ac31ada8..00000000000 --- a/tests/st/sponge/test_polypeptide.py +++ /dev/null @@ -1,34 +0,0 @@ -from tests.st.sponge.simulation_np import Simulation -import pytest -import mindspore.context as context - - -@pytest.mark.level1 -@pytest.mark.platform_x86_gpu_training -@pytest.mark.env_onecard -def test_sponge_polypeptide(): - """ - Feature: Test the polypeptide case from sponge. - Description: Test polypeptide net - Expectation: Success. - """ - - context.set_context(mode=context.GRAPH_MODE, device_target="GPU", save_graphs=False, enable_graph_kernel=True) - context.set_context(graph_kernel_flags="--enable_expand_ops=Gather \ - --enable_cluster_ops=TensorScatterAdd,UnSortedSegmentSum,GatherNd \ - --enable_recompute_fusion=false --enable_parallel_fusion=true") - simulation = Simulation() - for steps in range(1000): - temperature, total_potential_energy, sigma_of_bond_ene, sigma_of_angle_ene, sigma_of_dihedral_ene, \ - nb14_lj_energy_sum, nb14_cf_energy_sum, lj_energy_sum, ee_ene, _ = simulation() - if steps == 1000: - print(temperature, total_potential_energy, sigma_of_bond_ene, sigma_of_angle_ene, \ - sigma_of_dihedral_ene, nb14_lj_energy_sum, nb14_cf_energy_sum, lj_energy_sum, ee_ene) - assert 250 < temperature < 350 - assert -7500 < total_potential_energy < -6500 - assert 800 < sigma_of_bond_ene < 1300 - assert 10 < sigma_of_dihedral_ene < 25 - assert 3 < nb14_lj_energy_sum < 9 - assert 130 < nb14_cf_energy_sum < 220 - assert 1200 < lj_energy_sum < 1800 - assert -12000 < ee_ene < -7000