diff --git a/mindspore/nn/layer/basic.py b/mindspore/nn/layer/basic.py
index d31efd1ee26..f597f0a7697 100644
--- a/mindspore/nn/layer/basic.py
+++ b/mindspore/nn/layer/basic.py
@@ -591,7 +591,7 @@ class MatrixDiagPart(Cell):
         Tensor, same type as input `x`. The shape should be x.shape[:-2] + [min(x.shape[-2:])].
 
     Examples:
-        >>> x = Tensor([[[-1, 0], [0, 1]], [-1, 0], [0, 1]], [[-1, 0], [0, 1]]], mindspore.float32)
+        >>> x = Tensor([[[-1, 0], [0, 1]], [[-1, 0], [0, 1]], [[-1, 0], [0, 1]]], mindspore.float32)
         >>> matrix_diag_part = nn.MatrixDiagPart()
         >>> result = matrix_diag_part(x)
         [[-1., 1.], [-1., 1.], [-1., 1.]]
@@ -622,11 +622,11 @@ class MatrixSetDiag(Cell):
         Tensor, same type as input `x`. The shape same as `x`.
 
     Examples:
-        >>> x = Tensor([[[-1, 0], [0, 1]], [-1, 0], [0, 1]], [[-1, 0], [0, 1]]], mindspore.float32)
+        >>> x = Tensor([[[-1, 0], [0, 1]], [[-1, 0], [0, 1]], [[-1, 0], [0, 1]]], mindspore.float32)
         >>> diagonal = Tensor([[-1., 2.], [-1., 1.], [-1., 1.]], mindspore.float32)
         >>> matrix_set_diag = nn.MatrixSetDiag()
         >>> result = matrix_set_diag(x, diagonal)
-        [[[-1, 0], [0, 2]], [-1, 0], [0, 1]], [[-1, 0], [0, 1]]]
+        [[[-1, 0], [0, 2]], [[-1, 0], [0, 1]], [[-1, 0], [0, 1]]]
     """
     def __init__(self):
         super(MatrixSetDiag, self).__init__()
diff --git a/mindspore/nn/loss/loss.py b/mindspore/nn/loss/loss.py
index 3f97fbf83c8..5f17baf64ac 100644
--- a/mindspore/nn/loss/loss.py
+++ b/mindspore/nn/loss/loss.py
@@ -218,7 +218,8 @@ class SoftmaxCrossEntropyWithLogits(_Loss):
         sparse (bool): Specifies whether labels use sparse format or not. Default: False.
         reduction (Union[str, None]): Type of reduction to apply to loss. Support 'sum' or 'mean' If None,
             do not reduction. Default: None.
-        smooth_factor (float): Label smoothing factor. It is a optional input. Default: 0.
+        smooth_factor (float): Label smoothing factor. It is a optional input which should be in range [0, 1].
+            Default: 0.
         num_classes (int): The number of classes in the task. It is a optional input Default: 2.
 
     Inputs:
diff --git a/mindspore/ops/_grad/grad_math_ops.py b/mindspore/ops/_grad/grad_math_ops.py
index 3e5949df058..c7d39c6aa01 100755
--- a/mindspore/ops/_grad/grad_math_ops.py
+++ b/mindspore/ops/_grad/grad_math_ops.py
@@ -284,14 +284,9 @@ def get_bprop_ceil(self):
 @bprop_getters.register(P.FloorDiv)
 def get_bprop_floordiv(self):
     """Grad definition for `FloorDiv` operation."""
-    div_op = P.FloorDiv()
-    neg = P.Neg()
-    mul_op = P.Mul()
 
     def bprop(x, y, out, dout):
-        bc_x = div_op(dout, y)
-        bc_y = neg(mul_op(bc_x, out))
-        return binop_grad_common(x, y, bc_x, bc_y)
+        return zeros_like(x), zeros_like(y)
 
     return bprop
 
@@ -311,14 +306,9 @@ def get_bprop_floormod(self):
 @bprop_getters.register(P.TruncateDiv)
 def get_bprop_truncate_div(self):
     """Grad definition for `TruncateDiv` operation."""
-    div_op = P.TruncateDiv()
-    neg = P.Neg()
-    mul_op = P.Mul()
 
     def bprop(x, y, out, dout):
-        bc_x = div_op(dout, y)
-        bc_y = neg(mul_op(bc_x, out))
-        return binop_grad_common(x, y, bc_x, bc_y)
+        return zeros_like(x), zeros_like(y)
 
     return bprop
 
diff --git a/mindspore/ops/_grad/grad_nn_ops.py b/mindspore/ops/_grad/grad_nn_ops.py
index 52d61dafb5d..75fd099f99a 100755
--- a/mindspore/ops/_grad/grad_nn_ops.py
+++ b/mindspore/ops/_grad/grad_nn_ops.py
@@ -14,7 +14,6 @@
 # ============================================================================
 
 """Define the grad rules of neural network related operations."""
-import math
 import numpy as np
 from mindspore.ops import _selected_grad_ops as SG
 from mindspore.ops.primitive import constexpr
@@ -632,11 +631,8 @@ def get_bprop_onehot(self):
 @constexpr
 def _range_op(start, limit, delta, dtype):
     """helper function for Grad TopK"""
-    range_op = inner.Range(float(start), float(limit), float(delta))
-    length_input = math.ceil((limit - start) / delta)
-    input_tensor = Tensor(list(range(length_input)), dtype)
-    range_out = range_op(input_tensor)
-    return range_out
+    output_tensor = Tensor(list(range(start, limit, delta)), dtype)
+    return output_tensor
 
 @constexpr
 def _get_1d_shape(in_shape):