!42263 Code Warning Clean

Merge pull request !42263 from archer2049/r1.9
This commit is contained in:
i-robot 2022-09-19 01:25:52 +00:00 committed by Gitee
commit e17f1d691f
No known key found for this signature in database
GPG Key ID: 173E9B9CA92EEF8F
11 changed files with 47 additions and 19 deletions

View File

@ -13,6 +13,7 @@
# limitations under the License. # limitations under the License.
# ============================================================================ # ============================================================================
"""batch_matmul_impl""" """batch_matmul_impl"""
from __future__ import absolute_import
from te import tik from te import tik
from topi.cce import util from topi.cce import util

View File

@ -13,9 +13,11 @@
# limitations under the License. # limitations under the License.
# ============================================================================ # ============================================================================
"""CusCholeskyTrsm""" """CusCholeskyTrsm"""
from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType from __future__ import absolute_import
from te import tik from te import tik
from topi.cce import util from topi.cce import util
from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType
cus_cholesky_trsm_op_info = TBERegOp("CusCholeskyTrsm") \ cus_cholesky_trsm_op_info = TBERegOp("CusCholeskyTrsm") \
.fusion_type("OPAQUE") \ .fusion_type("OPAQUE") \
@ -84,7 +86,10 @@ def cus_cholesky_trsm(input_x, output, kernel_name):
chol_diag_element_final = tik_instance.Scalar("float32") chol_diag_element_final = tik_instance.Scalar("float32")
chol_diag_element_final.set_as(input_x_ub[split_dim * split_dim - 1]) chol_diag_element_final.set_as(input_x_ub[split_dim * split_dim - 1])
trsm_diag_element = tik_instance.Scalar("float32") trsm_diag_element = tik_instance.Scalar("float32")
trsm_diag_element.set_as(1.0 / chol_diag_element_final) try:
trsm_diag_element.set_as(1.0 / chol_diag_element_final)
except ZeroDivisionError:
assert False, "chol_diag_element_final as a divisor should not be zero."
temp_ub.__setitem__(split_dim * split_dim - 1, trsm_diag_element) temp_ub.__setitem__(split_dim * split_dim - 1, trsm_diag_element)
with tik_instance.for_range(1, split_dim) as i: with tik_instance.for_range(1, split_dim) as i:
@ -99,7 +104,10 @@ def cus_cholesky_trsm(input_x, output, kernel_name):
temp_scalar = tik_instance.Scalar("float32") temp_scalar = tik_instance.Scalar("float32")
temp_scalar.set_as(input_x_ub[index, index]) temp_scalar.set_as(input_x_ub[index, index])
chol_diag_element = tik_instance.Scalar("float32") chol_diag_element = tik_instance.Scalar("float32")
chol_diag_element.set_as(1.0 / temp_scalar) try:
chol_diag_element.set_as(1.0 / temp_scalar)
except ZeroDivisionError:
assert False, "temp_scalar as a divisor should not be zero."
tik_instance.vsub(64, temp_ub[index, 0], temp_ub[index, 0], assist_1_ub, vector_repeat_times, tik_instance.vsub(64, temp_ub[index, 0], temp_ub[index, 0], assist_1_ub, vector_repeat_times,
1, 1, 1, 8, 8, 8) 1, 1, 1, 8, 8, 8)
tik_instance.vmuls(64, temp_ub[index, 0], temp_ub[index, 0], chol_diag_element, vector_repeat_times, 1, 1, tik_instance.vmuls(64, temp_ub[index, 0], temp_ub[index, 0], chol_diag_element, vector_repeat_times, 1, 1,

View File

@ -13,6 +13,8 @@
# limitations under the License. # limitations under the License.
# ============================================================================ # ============================================================================
"""CusFusedAbsMax1""" """CusFusedAbsMax1"""
from __future__ import absolute_import
from te import tik from te import tik
from topi.cce import util from topi.cce import util

View File

@ -13,9 +13,11 @@
# limitations under the License. # limitations under the License.
# ============================================================================ # ============================================================================
"""CusImg2ColNC1HWC0""" """CusImg2ColNC1HWC0"""
from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType from __future__ import absolute_import
from te import tik from te import tik
from topi.cce import util from topi.cce import util
from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType
cus_img2col_info = TBERegOp("CusImg2Col") \ cus_img2col_info = TBERegOp("CusImg2Col") \
.fusion_type("OPAQUE") \ .fusion_type("OPAQUE") \

View File

@ -17,14 +17,14 @@ limitations under the License.
matmul matmul
""" """
from __future__ import absolute_import from __future__ import absolute_import
from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType
from mindspore.ops._op_impl._custom_op._basic import _shape_check, _get_bias, _get_input_shape
import te.lang.cce import te.lang.cce
import te.platform.cce_params as cce import te.platform.cce_params as cce
from te import tik from te import tik
from te import tvm from te import tvm
from topi import generic from topi import generic
from topi.cce import util from topi.cce import util
from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType
from mindspore.ops._op_impl._custom_op._basic import _shape_check, _get_bias, _get_input_shape
# General limitation of the size for input shape: 2**31 # General limitation of the size for input shape: 2**31
SHAPE_SIZE_LIMIT = 2147483648 SHAPE_SIZE_LIMIT = 2147483648

View File

@ -19,9 +19,9 @@ matmul
""" """
from __future__ import absolute_import from __future__ import absolute_import
from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType
from te import tik from te import tik
from topi.cce import util from topi.cce import util
from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType
matmul_cube_dense_right_op_info = TBERegOp("CusMatMulCubeDenseRight") \ matmul_cube_dense_right_op_info = TBERegOp("CusMatMulCubeDenseRight") \
.fusion_type("OPAQUE") \ .fusion_type("OPAQUE") \

View File

@ -17,11 +17,14 @@ limitations under the License.
matmul matmul
""" """
from __future__ import absolute_import from __future__ import absolute_import
from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType
from mindspore.ops._op_impl._custom_op._basic import _shape_check, _get_bias, _get_input_shape import collections
import te.platform.cce_params as cce import te.platform.cce_params as cce
from te import tik from te import tik
from topi.cce import util from topi.cce import util
from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType
from mindspore.ops._op_impl._custom_op._basic import _shape_check, _get_bias, _get_input_shape
# General limitation of the size for input shape: 2**31 # General limitation of the size for input shape: 2**31
SHAPE_SIZE_LIMIT = 2147483648 SHAPE_SIZE_LIMIT = 2147483648
@ -218,7 +221,8 @@ def get_cus_tile_info(input_x1, input_x2, diag_size):
if shape_info not in tile_map: if shape_info not in tile_map:
raise ValueError("shape %s is not supported" % str(shape_info)) raise ValueError("shape %s is not supported" % str(shape_info))
mo_tile, ko_tile, no_tile = tile_map[shape_info] mo_tile, ko_tile, no_tile = tile_map[shape_info]
return mo_tile, ko_tile, no_tile, diag_opt cus_tile_info = collections.namedtuple('cus_tile_info', ['mo_tile', 'ko_tile', 'no_tile', 'diag_opt'])
return cus_tile_info(mo_tile, ko_tile, no_tile, diag_opt)
def cus_cube_matmul_cast(tik_instance, input_x1, trans_a, input_x2, trans_b, def cus_cube_matmul_cast(tik_instance, input_x1, trans_a, input_x2, trans_b,

View File

@ -19,13 +19,17 @@ matmul
""" """
from __future__ import absolute_import from __future__ import absolute_import
from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType from collections import namedtuple
import logging
from te import tik from te import tik
from topi.cce import util from topi.cce import util
from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType
# General limitation of the size for input shape: 2**31 # General limitation of the size for input shape: 2**31
SHAPE_SIZE_LIMIT = 2147483648 SHAPE_SIZE_LIMIT = 2147483648
NoneType = type(None) NoneType = type(None)
logging.basicConfig(level=logging.DEBUG, format="%(asctime)s %(levelname)s %(message)s")
cus_matmul_cube_fracz_right_mul_op_info = TBERegOp("CusMatMulCubeFraczRightMul") \ cus_matmul_cube_fracz_right_mul_op_info = TBERegOp("CusMatMulCubeFraczRightMul") \
.fusion_type("OPAQUE") \ .fusion_type("OPAQUE") \
@ -134,9 +138,12 @@ def get_cus_tile_info(input_x1, input_x2, input_x3):
mo_tile_ = mo_tile_ // 2 mo_tile_ = mo_tile_ // 2
else: else:
raise ValueError("please add tile config to the tile_map") raise ValueError("please add tile config to the tile_map")
print("shape: %s, tile: %s" % (input_shape, str((mo_tile_, ko_tile_, no_tile_, core_m_num_, core_n_num_, logging.info(
diag_opt)))) "shape: %s, tile: %s", input_shape, str((mo_tile_, ko_tile_, no_tile_, core_m_num_, core_n_num_,
return mo_tile_, ko_tile_, no_tile_, core_m_num_, core_n_num_, diag_opt diag_opt)))
cus_tile_info = namedtuple('cus_tile_info', ['mo_tile_', 'ko_tile_', 'no_tile_', 'core_m_num_',
'core_n_num_', 'diag_opt'])
return cus_tile_info(mo_tile_, ko_tile_, no_tile_, core_m_num_, core_n_num_, diag_opt)
def cus_cube_matmul_right_mul(tik_instance, input_x1, input_x2, input_x3, def cus_cube_matmul_right_mul(tik_instance, input_x1, input_x2, input_x3,

View File

@ -19,13 +19,13 @@ matmul
""" """
from __future__ import absolute_import from __future__ import absolute_import
from impl.matmul_vector import matmul_vector_cce from impl.matmul_vector import matmul_vector_cce
from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType
from mindspore.ops._op_impl._custom_op._basic import _shape_check, _get_bias, _get_input_shape
import te.lang.cce
import te.platform.cce_params as cce import te.platform.cce_params as cce
import te.lang.cce
from te import tvm from te import tvm
from topi import generic from topi import generic
from topi.cce import util from topi.cce import util
from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType
from mindspore.ops._op_impl._custom_op._basic import _shape_check, _get_bias, _get_input_shape
# General limitation of the size for input shape: 2**31 # General limitation of the size for input shape: 2**31
SHAPE_SIZE_LIMIT = 2147483648 SHAPE_SIZE_LIMIT = 2147483648

View File

@ -13,9 +13,11 @@
# limitations under the License. # limitations under the License.
# ============================================================================ # ============================================================================
"""CusMatrixCombine""" """CusMatrixCombine"""
from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType from __future__ import absolute_import
from te import tik from te import tik
from topi.cce import util from topi.cce import util
from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType
cus_matrix_combine_op_info = TBERegOp("CusMatrixCombine") \ cus_matrix_combine_op_info = TBERegOp("CusMatrixCombine") \
.fusion_type("OPAQUE") \ .fusion_type("OPAQUE") \

View File

@ -13,9 +13,11 @@
# limitations under the License. # limitations under the License.
# ============================================================================ # ============================================================================
"""CusTranspose02314""" """CusTranspose02314"""
from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType from __future__ import absolute_import
from te import tik from te import tik
from topi.cce import util from topi.cce import util
from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType
cus_transpose02314_op_info = TBERegOp("CusTranspose02314") \ cus_transpose02314_op_info = TBERegOp("CusTranspose02314") \
.fusion_type("OPAQUE") \ .fusion_type("OPAQUE") \