diff --git a/mindspore/ops/_grad/grad_nn_ops.py b/mindspore/ops/_grad/grad_nn_ops.py index 9e6a8791afb..246e4a63632 100755 --- a/mindspore/ops/_grad/grad_nn_ops.py +++ b/mindspore/ops/_grad/grad_nn_ops.py @@ -678,16 +678,13 @@ def get_bprop_top_kv2(self): def bprop(input_x, k, out, dout): - # (n1, n2, ...., n_p), in_lastdim = n_p in_shape = shape_op(input_x) in_lastdim = in_shape[-1] - # (n_1, ... n_(p-1), k), ind_lastdim = k indices = out[1] ind_shape = shape_op(indices) ind_lastdim = ind_shape[-1] - # (n_1*n_2..*n_(p-1), k), outerdim = n_1*n_2..*n_(p-1) ind_2d = reshape_op(indices, (-1, ind_lastdim)) outerdim = shape_op(ind_2d)[0] diff --git a/mindspore/ops/_op_impl/_custom_op/correction_mul.py b/mindspore/ops/_op_impl/_custom_op/correction_mul.py index 49cd35cc111..1d2332ecc22 100644 --- a/mindspore/ops/_op_impl/_custom_op/correction_mul.py +++ b/mindspore/ops/_op_impl/_custom_op/correction_mul.py @@ -71,7 +71,6 @@ def correction_mul(x, batch_std, running_std, y, channel, kernel_name="correctio if not inp_dtype in check_list: raise RuntimeError("Dtype of input only support float16, float32") - # shape = util.shape_refine(shape) x_t = tvm.placeholder(shape, name="x", dtype=inp_dtype) shape_c = [1] * len(shape) shape_c[channel] = batch_std.get("ori_shape")[0] diff --git a/mindspore/ops/_op_impl/_custom_op/fake_quant_perchannel.py b/mindspore/ops/_op_impl/_custom_op/fake_quant_perchannel.py index dae2d7058dd..e6ee62f6ec4 100644 --- a/mindspore/ops/_op_impl/_custom_op/fake_quant_perchannel.py +++ b/mindspore/ops/_op_impl/_custom_op/fake_quant_perchannel.py @@ -60,7 +60,6 @@ def fake_quant_perchannel_compute(x, min_val, max_val, y, quant_min, quant_max, quant_min = te.lang.cce.broadcast(quant_min, minmax_shape, x.dtype) quant_max = te.lang.cce.broadcast(quant_max, minmax_shape, x.dtype) - # CalNudge(NudgeMinMax) scale = te.lang.cce.vdiv(te.lang.cce.vsub( max_val, min_val), te.lang.cce.vsub(quant_max, quant_min)) zp_from_min = te.lang.cce.vsub(quant_min, te.lang.cce.vdiv(min_val, scale)) diff --git a/mindspore/ops/_op_impl/_custom_op/fake_quant_perchannel_grad.py b/mindspore/ops/_op_impl/_custom_op/fake_quant_perchannel_grad.py index 795aab52a3d..399d4f0139a 100644 --- a/mindspore/ops/_op_impl/_custom_op/fake_quant_perchannel_grad.py +++ b/mindspore/ops/_op_impl/_custom_op/fake_quant_perchannel_grad.py @@ -87,7 +87,6 @@ def fake_quant_perchannel_grad_compute(dout, x, min_val, max_val, quant_min, qua quant_min = te.lang.cce.broadcast(quant_min, minmax_shape, x.dtype) quant_max = te.lang.cce.broadcast(quant_max, minmax_shape, x.dtype) - # CalNudge(NudgeMinMax) scale = te.lang.cce.vdiv(te.lang.cce.vsub( max_val, min_val), te.lang.cce.vsub(quant_max, quant_min)) zp_from_min = te.lang.cce.vsub(quant_min, te.lang.cce.vdiv(min_val, scale)) diff --git a/mindspore/ops/_op_impl/_custom_op/fake_quant_perlayer.py b/mindspore/ops/_op_impl/_custom_op/fake_quant_perlayer.py index 3e75e9e0a59..1390b31e182 100644 --- a/mindspore/ops/_op_impl/_custom_op/fake_quant_perlayer.py +++ b/mindspore/ops/_op_impl/_custom_op/fake_quant_perlayer.py @@ -61,7 +61,6 @@ def fake_quant_per_layer_compute(x, min_val, max_val, y, quant_min, quant_max, s max_val = te.lang.cce.vmax(te.lang.cce.vmuls(min_val, -1.), max_val) min_val = te.lang.cce.vmuls(max_val, -1.) - # CalNudge(NudgeMinMax) scale = te.lang.cce.vdiv(te.lang.cce.vsub( max_val, min_val), te.lang.cce.vsub(quant_max, quant_min)) zp_from_min = te.lang.cce.vsub(quant_min, te.lang.cce.vdiv(min_val, scale)) diff --git a/mindspore/ops/_op_impl/_custom_op/fake_quant_perlayer_grad.py b/mindspore/ops/_op_impl/_custom_op/fake_quant_perlayer_grad.py index a78effcc4fe..9109c3ec970 100644 --- a/mindspore/ops/_op_impl/_custom_op/fake_quant_perlayer_grad.py +++ b/mindspore/ops/_op_impl/_custom_op/fake_quant_perlayer_grad.py @@ -92,7 +92,6 @@ def fake_quant_per_layer_grad_compute(dout, x, min_val, max_val, quant_min, quan max_val = te.lang.cce.vmax(te.lang.cce.vmuls(min_val, -1.), max_val) min_val = te.lang.cce.vmuls(max_val, -1.) - # CalNudge(NudgeMinMax) scale = te.lang.cce.vdiv(te.lang.cce.vsub( max_val, min_val), te.lang.cce.vsub(quant_max, quant_min)) zp_from_min = te.lang.cce.vsub(quant_min, te.lang.cce.vdiv(min_val, scale)) diff --git a/mindspore/ops/composite/multitype_ops/_constexpr_utils.py b/mindspore/ops/composite/multitype_ops/_constexpr_utils.py index dec427601d1..903c111bc7b 100644 --- a/mindspore/ops/composite/multitype_ops/_constexpr_utils.py +++ b/mindspore/ops/composite/multitype_ops/_constexpr_utils.py @@ -91,20 +91,16 @@ def check_tensor_setitem_index(index, element_type=None): """Checks tuple index type of tensor assignment.""" if index is None: raise IndexError("Tensor's index cannot be None.") - # eg. Tensor[Slice] = u if isinstance(index, slice): return True - # eg. Tensor[tuple] = u if isinstance(index, tuple): if not index: raise IndexError("Tensor's index cannot be empty.") - # eg. Tensor[tuple(Slice,...)] = u for item in index: if not isinstance(item, (slice, type(...), int)): raise IndexError( "Index of type '{}' is not supported yet.".format(type(item))) return True - # eg. Tensor[Tensor[dtype=bool]] = u if isinstance(index, mstype.tensor_type): if element_type is None or element_type != mstype.bool_: raise TypeError( diff --git a/mindspore/ops/composite/multitype_ops/setitem_impl.py b/mindspore/ops/composite/multitype_ops/setitem_impl.py index 53a5ba5b865..e6c7e28c95a 100644 --- a/mindspore/ops/composite/multitype_ops/setitem_impl.py +++ b/mindspore/ops/composite/multitype_ops/setitem_impl.py @@ -305,23 +305,19 @@ def _tensor_setitem_with_slice_v1(data, input_slice, value): @setitem.register("Tensor", "Number", "Number") def _tensor_setitem_with_int_v1(data, index, value): - """Syntax: A[1] = 3""" return compile_utils.tensor_setitem_by_number_with_number(data, index, value) @setitem.register("Tensor", "Number", "Tensor") def _tensor_setitem_with_int_v2(data, index, value): - """Syntax: A[1] = Tensor""" return compile_utils.tensor_setitem_by_number_with_tensor(data, index, value) @setitem.register("Tensor", "Ellipsis", "Number") def _tensor_setitem_with_ellipsis_v1(data, index, value): - """Syntax: A[...] = number.""" return compile_utils.tensor_setitem_by_ellipsis_with_number(data, index, value) @setitem.register("Tensor", "Ellipsis", "Tensor") def _tensor_setitem_with_ellipsis_v2(data, index, value): - """Syntax: A[...] = Tensor.""" return compile_utils.tensor_setitem_by_ellipsis_with_tensor(data, index, value) diff --git a/mindspore/ops/operations/_thor_ops.py b/mindspore/ops/operations/_thor_ops.py index a4f2335c9bc..a8f336841c3 100644 --- a/mindspore/ops/operations/_thor_ops.py +++ b/mindspore/ops/operations/_thor_ops.py @@ -320,8 +320,6 @@ class CusMatMulCube(PrimitiveWithInfer): from mindspore.ops._op_impl._custom_op.matmul_cube_impl import CusMatMulCube def infer_shape(self, data1_shape, data2_shape): - # shape = [1, data1_shape[1], data2_shape[2], 16, 16] - # return shape if self.transpose_a: k1, m = data1_shape else: diff --git a/mindspore/ops/operations/array_ops.py b/mindspore/ops/operations/array_ops.py index 5b7a645f630..638d74f364b 100644 --- a/mindspore/ops/operations/array_ops.py +++ b/mindspore/ops/operations/array_ops.py @@ -2068,7 +2068,6 @@ def _compute_slicing_length(begin, end, stride, x_shape, i): if 0 <= begin < x_dim: begin += -x_dim if begin >= x_dim: - # When slicing backward, if begin >= x_dim, set begin = -1, which means start from the last element. begin = -1 if 0 <= end < x_dim: end += -x_dim diff --git a/mindspore/ops/operations/nn_ops.py b/mindspore/ops/operations/nn_ops.py index 305a5899b17..4dbe750aad8 100644 --- a/mindspore/ops/operations/nn_ops.py +++ b/mindspore/ops/operations/nn_ops.py @@ -745,7 +745,6 @@ class BNTrainingUpdate(PrimitiveWithInfer): def __init__(self, isRef=True, epsilon=1e-5, factor=0.1): self.init_prim_io_names(inputs=['x', 'sum', 'square_sum', 'scale', 'b', 'mean', 'variance'], outputs=['y', 'running_mean', 'running_variance', 'save_mean', 'save_inv_variance']) - #self.isRef = validator.check_integer('isRef', isRef, [0, 1], Rel.IN) self.epsilon = validator.check_number_range('epsilon', epsilon, 0, 1, Rel.INC_RIGHT, 'BNTrainingUpdate') self.factor = validator.check_number_range('factor', factor, 0, 1, Rel.INC_BOTH, 'BNTrainingUpdate')