diff --git a/mindspore/ccsrc/pipeline/pipeline.cc b/mindspore/ccsrc/pipeline/pipeline.cc index 7df5c6fea06..f4c41fec1cb 100644 --- a/mindspore/ccsrc/pipeline/pipeline.cc +++ b/mindspore/ccsrc/pipeline/pipeline.cc @@ -615,12 +615,16 @@ void ProcessVmArgInner(const py::tuple &args, const ResourcePtr &res, VectorRef py::object arg = args[i]; auto ms_context = MsContext::GetInstance(); if (ms_context->backend_policy() == kMsConvert && py::isinstance(arg)) { - MS_LOG(EXCEPTION) << "Args[" << i << "] is numpy array, not tensor"; + MS_LOG(EXCEPTION) << "The " << i << "th arg is numpy array, not tensor."; } ValuePtr converted = nullptr; bool succ = parse::ConvertData(arg, &converted); if (!succ) { - MS_LOG(EXCEPTION) << "Args convert error"; + MS_LOG(EXCEPTION) << "The " << i << "th arg convert failed."; + } + if (MsContext::GetInstance()->execution_mode() == 0 && !converted->isa()) { + MS_EXCEPTION(TypeError) << "For 'graph mode', the " << i << "th arg: " << converted->ToString() + << " is not tensor."; } arg_list->push_back(converted); } diff --git a/mindspore/ccsrc/pipeline/pipeline_ge.cc b/mindspore/ccsrc/pipeline/pipeline_ge.cc index 309b482d622..27761096553 100644 --- a/mindspore/ccsrc/pipeline/pipeline_ge.cc +++ b/mindspore/ccsrc/pipeline/pipeline_ge.cc @@ -460,12 +460,12 @@ void ProcessGeArg(const std::map &info, const py:: ValuePtr converted = nullptr; bool succ = parse::ConvertData(args[i], &converted); if (!succ) { - MS_LOG(EXCEPTION) << "Args convert error"; + MS_LOG(EXCEPTION) << "The " << i << "th arg convert failed."; } if (converted->isa()) { inputs->push_back(converted->cast()); } else { - MS_EXCEPTION(TypeError) << "Args " << converted->ToString() << " is not tensor"; + MS_EXCEPTION(TypeError) << "The " << i << "th arg: " << converted->ToString() << " is not tensor."; } } } diff --git a/mindspore/ops/operations/math_ops.py b/mindspore/ops/operations/math_ops.py index 01473e76041..5bffc2feb52 100644 --- a/mindspore/ops/operations/math_ops.py +++ b/mindspore/ops/operations/math_ops.py @@ -777,7 +777,7 @@ class Sub(_MathBinaryOp): When the inputs are one tensor and one scalar, the scalar only could be a constant. - Inputs: + Inputs: - **input_x** (Union[Tensor, Number, bool]) - The first input is a number or a bool or a tensor whose data type is number or bool. - **input_y** (Union[Tensor, Number, bool]) - The second input is a number or @@ -1845,7 +1845,7 @@ class LogicalAnd(_LogicBinaryOp): Inputs: - **input_x** (Union[Tensor, bool]) - The first input is a bool or a tensor whose data type is bool. - **input_y** (Union[Tensor, bool]) - The second input is a bool when the first input is a tensor or - a tensor whose data type is bool. + a tensor whose data type is bool. Outputs: Tensor, the shape is same as the shape after broadcasting, and the data type is bool. @@ -1875,7 +1875,7 @@ class LogicalOr(_LogicBinaryOp): Inputs: - **input_x** (Union[Tensor, bool]) - The first input is a bool or a tensor whose data type is bool. - **input_y** (Union[Tensor, bool]) - The second input is a bool when the first input is a tensor or - a tensor whose data type is bool. + a tensor whose data type is bool. Outputs: Tensor, the shape is same as the shape after broadcasting,and the data type is bool. diff --git a/tests/st/ops/gpu/test_cast_op.py b/tests/st/ops/gpu/test_cast_op.py index 4e1d32d9dab..793d92d7bc4 100644 --- a/tests/st/ops/gpu/test_cast_op.py +++ b/tests/st/ops/gpu/test_cast_op.py @@ -24,13 +24,15 @@ from mindspore.ops import operations as P class Net(Cell): - def __init__(self): + def __init__(self, type0, type1): super(Net, self).__init__() self.Cast = P.Cast() + self.type0 = type0 + self.type1 = type1 - def construct(self, x0, type0, x1, type1): - output = (self.Cast(x0, type0), - self.Cast(x1, type1)) + def construct(self, x0, x1): + output = (self.Cast(x0, self.type0), + self.Cast(x1, self.type1)) return output @@ -44,8 +46,8 @@ def test_cast(): t1 = mstype.float32 context.set_context(mode=context.GRAPH_MODE, device_target='GPU') - net = Net() - output = net(x0, t0, x1, t1) + net = Net(t0, t1) + output = net(x0, x1) type0 = output[0].asnumpy().dtype assert type0 == 'float16' type1 = output[1].asnumpy().dtype @@ -62,8 +64,8 @@ def test_cast1(): t1 = mstype.float32 context.set_context(mode=context.GRAPH_MODE, device_target='GPU') - net = Net() - output = net(x0, t0, x1, t1) + net = Net(t0, t1) + output = net(x0, x1) type0 = output[0].asnumpy().dtype assert type0 == 'float32' type1 = output[1].asnumpy().dtype diff --git a/tests/st/ops/gpu/test_rmsprop.py b/tests/st/ops/gpu/test_rmsprop.py index aa27fcf2638..24d10034758 100644 --- a/tests/st/ops/gpu/test_rmsprop.py +++ b/tests/st/ops/gpu/test_rmsprop.py @@ -25,24 +25,29 @@ context.set_context(mode=context.GRAPH_MODE, device_target="GPU") class NetCenteredRMSProp(nn.Cell): - def __init__(self): + def __init__(self, lr, decay, momentum, epsilon): super(NetCenteredRMSProp, self).__init__() self.rms_opt = P.ApplyCenteredRMSProp() + self.lr = lr + self.decay = decay + self.momentum = momentum + self.epsilon = epsilon - def construct(self, var, g, mg, rms, mom, lr, decay, momentum, epsilon): - return self.rms_opt(var, mg, rms, mom, g, lr, decay, momentum, epsilon) + def construct(self, var, g, mg, rms, mom): + return self.rms_opt(var, mg, rms, mom, g, self.lr, self.decay, self.momentum, self.epsilon) class NetRMSProp(nn.Cell): - def __init__(self, decay, momentum, epsilon): + def __init__(self, lr, decay, momentum, epsilon): super(NetRMSProp, self).__init__() + self.lr = lr self.decay = decay self.momentum = momentum self.epsilon = epsilon self.rms_opt = P.ApplyRMSProp() - def construct(self, var, g, mg, rms, mom, lr): - return self.rms_opt(var, rms, mom, lr, g, self.decay, self.momentum, self.epsilon) + def construct(self, var, g, mg, rms, mom): + return self.rms_opt(var, rms, mom, self.lr, g, self.decay, self.momentum, self.epsilon) def rmsprop_numpy(variable, gradients, mean_square, moment, @@ -82,16 +87,14 @@ def test_rmsprop(): if centered: rmspropcented_numpy(variable_np, gradients_np, mean_gradients_np, mean_square_np, moment_np, learning_rate, decay, momentum, epsilon) - net = NetCenteredRMSProp() - _ = net(variable_ms, gradients_ms, mean_gradients_ms, mean_square_ms, - moment_ms, learning_rate, decay, momentum, epsilon) + net = NetCenteredRMSProp(learning_rate, decay, momentum, epsilon) + _ = net(variable_ms, gradients_ms, mean_gradients_ms, mean_square_ms, moment_ms) else: rmsprop_numpy(variable_np, gradients_np, mean_square_np, moment_np, learning_rate, decay, momentum, epsilon) - net = NetRMSProp(decay, momentum, epsilon) - _ = net(variable_ms, gradients_ms, mean_gradients_ms, mean_square_ms, - moment_ms, learning_rate) + net = NetRMSProp(learning_rate, decay, momentum, epsilon) + _ = net(variable_ms, gradients_ms, mean_gradients_ms, mean_square_ms, moment_ms) error = np.ones(shape=variable_np.shape) * 10e-6 diff = variable_ms.asnumpy() - variable_np @@ -135,15 +138,13 @@ def test_rmspropcenter(): if centered: rmspropcented_numpy(variable_np, gradients_np, mean_gradients_np, mean_square_np, moment_np, learning_rate, decay, momentum, epsilon) - net = NetCenteredRMSProp() - _ = net(variable_ms, gradients_ms, mean_gradients_ms, mean_square_ms, moment_ms, - learning_rate, decay, momentum, epsilon) + net = NetCenteredRMSProp(learning_rate, decay, momentum, epsilon) + _ = net(variable_ms, gradients_ms, mean_gradients_ms, mean_square_ms, moment_ms) else: rmsprop_numpy(variable_np, gradients_np, mean_square_np, moment_np, learning_rate, decay, momentum, epsilon) - net = NetRMSProp(decay, momentum, epsilon) - _ = net(variable_ms, gradients_ms, mean_gradients_ms, mean_square_ms, moment_ms, - learning_rate) + net = NetRMSProp(learning_rate, decay, momentum, epsilon) + _ = net(variable_ms, gradients_ms, mean_gradients_ms, mean_square_ms, moment_ms) error = np.ones(shape=variable_np.shape) * 10e-6 diff = variable_ms.asnumpy() - variable_np