!15070 fix code review alarmsfix code review alarms
From: @shibeiji Reviewed-by: @wuxuejian,@liangchenghui Signed-off-by: @wuxuejian
This commit is contained in:
commit
e2112c55f9
|
@ -83,10 +83,11 @@ bool MinimumGradCPUKernel::Launch(const std::vector<kernel::AddressPtr> &inputs,
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename T>
|
template <typename T>
|
||||||
void MinimumGradRecTask(T *x, T *y, T *dout, T *dx, T *dy, size_t dim, size_t x_index, size_t y_index,
|
void MinimumGradRecTask(const T *x, const T *y, const T *dout, T *dx, T *dy, const size_t dim, const size_t x_index,
|
||||||
size_t dout_index, const std::vector<size_t> &x_cargo, const std::vector<size_t> &y_cargo,
|
const size_t y_index, const size_t dout_index, const std::vector<size_t> &x_cargo,
|
||||||
const std::vector<size_t> &dout_cargo, const std::vector<size_t> &x_shape,
|
const std::vector<size_t> &y_cargo, const std::vector<size_t> &dout_cargo,
|
||||||
const std::vector<size_t> &y_shape, const std::vector<size_t> &dout_shape) {
|
const std::vector<size_t> &x_shape, const std::vector<size_t> &y_shape,
|
||||||
|
const std::vector<size_t> &dout_shape) {
|
||||||
for (size_t i = 0; i < dout_shape[dim]; i++) {
|
for (size_t i = 0; i < dout_shape[dim]; i++) {
|
||||||
size_t x_i = x_shape[dim] == dout_shape[dim] ? i * x_cargo[dim] : 0;
|
size_t x_i = x_shape[dim] == dout_shape[dim] ? i * x_cargo[dim] : 0;
|
||||||
size_t y_i = y_shape[dim] == dout_shape[dim] ? i * y_cargo[dim] : 0;
|
size_t y_i = y_shape[dim] == dout_shape[dim] ? i * y_cargo[dim] : 0;
|
||||||
|
|
|
@ -64,7 +64,7 @@ def gen_data(inputA_np, inputB_np, grad_=None):
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.level0
|
@pytest.mark.level0
|
||||||
@pytest.mark.platform_x86_cpu_training
|
@pytest.mark.platform_x86_cpu
|
||||||
@pytest.mark.env_onecard
|
@pytest.mark.env_onecard
|
||||||
def test_min_tensor_grad_4d():
|
def test_min_tensor_grad_4d():
|
||||||
inputA_np = np.random.randn(1, 3, 2, 2).astype(np.float32)
|
inputA_np = np.random.randn(1, 3, 2, 2).astype(np.float32)
|
||||||
|
@ -76,7 +76,7 @@ def test_min_tensor_grad_4d():
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.level0
|
@pytest.mark.level0
|
||||||
@pytest.mark.platform_x86_cpu_training
|
@pytest.mark.platform_x86_cpu
|
||||||
@pytest.mark.env_onecard
|
@pytest.mark.env_onecard
|
||||||
def test_min_tensor_grad_result():
|
def test_min_tensor_grad_result():
|
||||||
inputA = np.array([[[[0.659578], [0.49113268], [0.75909054], [0.71681815], [0.30421826]]],
|
inputA = np.array([[[[0.659578], [0.49113268], [0.75909054], [0.71681815], [0.30421826]]],
|
||||||
|
|
|
@ -42,7 +42,7 @@ class TwoTensorsMinimum(Cell):
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.level0
|
@pytest.mark.level0
|
||||||
@pytest.mark.platform_x86_cpu_training
|
@pytest.mark.platform_x86_cpu
|
||||||
@pytest.mark.env_onecard
|
@pytest.mark.env_onecard
|
||||||
def test_minimum_constScalar_tensor_int():
|
def test_minimum_constScalar_tensor_int():
|
||||||
x = Tensor(np.array([[2, 3, 4], [100, 200, 300]]).astype(np.int32))
|
x = Tensor(np.array([[2, 3, 4], [100, 200, 300]]).astype(np.int32))
|
||||||
|
@ -55,7 +55,7 @@ def test_minimum_constScalar_tensor_int():
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.level0
|
@pytest.mark.level0
|
||||||
@pytest.mark.platform_x86_cpu_training
|
@pytest.mark.platform_x86_cpu
|
||||||
@pytest.mark.env_onecard
|
@pytest.mark.env_onecard
|
||||||
def test_minimum_two_tensors_Not_Broadcast_int():
|
def test_minimum_two_tensors_Not_Broadcast_int():
|
||||||
prop = 100 if np.random.random() > 0.5 else -100
|
prop = 100 if np.random.random() > 0.5 else -100
|
||||||
|
@ -70,7 +70,7 @@ def test_minimum_two_tensors_Not_Broadcast_int():
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.level0
|
@pytest.mark.level0
|
||||||
@pytest.mark.platform_x86_cpu_training
|
@pytest.mark.platform_x86_cpu
|
||||||
@pytest.mark.env_onecard
|
@pytest.mark.env_onecard
|
||||||
def test_minimum_two_tensors_Broadcast_int():
|
def test_minimum_two_tensors_Broadcast_int():
|
||||||
prop = 100 if np.random.random() > 0.5 else -100
|
prop = 100 if np.random.random() > 0.5 else -100
|
||||||
|
@ -85,7 +85,7 @@ def test_minimum_two_tensors_Broadcast_int():
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.level0
|
@pytest.mark.level0
|
||||||
@pytest.mark.platform_x86_cpu_training
|
@pytest.mark.platform_x86_cpu
|
||||||
@pytest.mark.env_onecard
|
@pytest.mark.env_onecard
|
||||||
def test_minimum_two_tensors_Broadcast_oneDimension_int():
|
def test_minimum_two_tensors_Broadcast_oneDimension_int():
|
||||||
prop = 100 if np.random.random() > 0.5 else -100
|
prop = 100 if np.random.random() > 0.5 else -100
|
||||||
|
@ -100,7 +100,7 @@ def test_minimum_two_tensors_Broadcast_oneDimension_int():
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.level0
|
@pytest.mark.level0
|
||||||
@pytest.mark.platform_x86_cpu_training
|
@pytest.mark.platform_x86_cpu
|
||||||
@pytest.mark.env_onecard
|
@pytest.mark.env_onecard
|
||||||
def test_minimum_two_tensors_notBroadcast_all_oneDimension_int():
|
def test_minimum_two_tensors_notBroadcast_all_oneDimension_int():
|
||||||
x = Tensor(np.array([[2]]).astype(np.int32))
|
x = Tensor(np.array([[2]]).astype(np.int32))
|
||||||
|
@ -114,7 +114,7 @@ def test_minimum_two_tensors_notBroadcast_all_oneDimension_int():
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.level0
|
@pytest.mark.level0
|
||||||
@pytest.mark.platform_x86_cpu_training
|
@pytest.mark.platform_x86_cpu
|
||||||
@pytest.mark.env_onecard
|
@pytest.mark.env_onecard
|
||||||
def test_minimum_two_tensors_notBroadcast_float32():
|
def test_minimum_two_tensors_notBroadcast_float32():
|
||||||
prop = 100 if np.random.random() > 0.5 else -100
|
prop = 100 if np.random.random() > 0.5 else -100
|
||||||
|
@ -132,7 +132,7 @@ def test_minimum_two_tensors_notBroadcast_float32():
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.level0
|
@pytest.mark.level0
|
||||||
@pytest.mark.platform_x86_cpu_training
|
@pytest.mark.platform_x86_cpu
|
||||||
@pytest.mark.env_onecard
|
@pytest.mark.env_onecard
|
||||||
def test_minimum_two_tensors_notBroadcast_float16():
|
def test_minimum_two_tensors_notBroadcast_float16():
|
||||||
prop = 100 if np.random.random() > 0.5 else -100
|
prop = 100 if np.random.random() > 0.5 else -100
|
||||||
|
@ -150,7 +150,7 @@ def test_minimum_two_tensors_notBroadcast_float16():
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.level0
|
@pytest.mark.level0
|
||||||
@pytest.mark.platform_x86_cpu_training
|
@pytest.mark.platform_x86_cpu
|
||||||
@pytest.mark.env_onecard
|
@pytest.mark.env_onecard
|
||||||
def test_minimum_two_tensors_Broadcast_float16():
|
def test_minimum_two_tensors_Broadcast_float16():
|
||||||
prop = 100 if np.random.random() > 0.5 else -100
|
prop = 100 if np.random.random() > 0.5 else -100
|
||||||
|
@ -168,7 +168,7 @@ def test_minimum_two_tensors_Broadcast_float16():
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.level0
|
@pytest.mark.level0
|
||||||
@pytest.mark.platform_x86_cpu_training
|
@pytest.mark.platform_x86_cpu
|
||||||
@pytest.mark.env_onecard
|
@pytest.mark.env_onecard
|
||||||
def test_minimum_two_tensors_notBroadcast_float64():
|
def test_minimum_two_tensors_notBroadcast_float64():
|
||||||
prop = 100 if np.random.random() > 0.5 else -100
|
prop = 100 if np.random.random() > 0.5 else -100
|
||||||
|
|
Loading…
Reference in New Issue