forked from mindspore-Ecosystem/mindspore
more understandable exceptions under dir: mindspore/core/ops
This commit is contained in:
parent
168e119c21
commit
9ba3e0419f
|
@ -135,8 +135,8 @@ ValuePtr AbsInferValue(const PrimitivePtr &prim, const std::vector<AbstractBaseP
|
|||
default: {
|
||||
MS_EXCEPTION(TypeError) << "For '" << prim->name()
|
||||
<< "', the supported data type is ['int8', 'int16', 'int32', 'int64', 'uint8', "
|
||||
"'uint16','uint32', 'uint64','float16', 'float32', 'float64'], but got "
|
||||
<< x_tensor->ToString();
|
||||
"'uint16','uint32', 'uint64','float16', 'float32', 'float64'], but got: "
|
||||
<< x_tensor->ToString() << ".";
|
||||
}
|
||||
}
|
||||
return result_tensor;
|
||||
|
|
|
@ -52,9 +52,9 @@ abstract::ShapePtr AccumulateNV2InferShape(const PrimitivePtr &primitive,
|
|||
}
|
||||
}
|
||||
if (*shape != *shape_0) {
|
||||
MS_EXCEPTION(ValueError) << "For '" << primitive->name() << "', Shape of input[" << i
|
||||
<< "] should be not consistent with the shape of input[0], but got shape of input[" << i
|
||||
<< "]: " << shape->ToString() << ", shape of input[0]: " << shape_0->ToString();
|
||||
MS_EXCEPTION(ValueError) << "For '" << primitive->name() << "', shape of input[" << i
|
||||
<< "] must be consistent with the shape of input[0], but got shape of input[" << i
|
||||
<< "]: " << shape->ToString() << ", shape of input[0]: " << shape_0->ToString() << ".";
|
||||
}
|
||||
}
|
||||
auto in_shape = element0_shape_map[kShape];
|
||||
|
|
|
@ -44,16 +44,17 @@ abstract::ShapePtr AddNInferShape(const PrimitivePtr &primitive, const std::vect
|
|||
const auto &shape_0_vec = shape_0->cast<abstract::ShapePtr>()->shape();
|
||||
if ((shape_vec == ShapeVector({1}) && shape_0_vec == ShapeVector()) ||
|
||||
(shape_vec == ShapeVector() && shape_0_vec == ShapeVector({1}))) {
|
||||
MS_LOG(DEBUG) << "The primitive[" << primitive->name() << "]'s input[" << i << "] shape: " << shape->ToString()
|
||||
<< " are consistent with the shape of input[0]" << shape_0->ToString();
|
||||
MS_LOG(DEBUG) << "For '" << primitive->name() << "', shape of input[" << i
|
||||
<< "] and that of input[0] must be the same. But got shape of input[" << i
|
||||
<< "]: " << shape->ToString() << ", shape of input[0]: " << shape_0->ToString() << ".";
|
||||
continue;
|
||||
}
|
||||
}
|
||||
if (!shape->IsDynamic() && !shape_0->IsDynamic()) {
|
||||
if (*shape != *shape_0) {
|
||||
MS_EXCEPTION(ValueError) << "The primitive[" << primitive->name() << "]'s input shape must be same, "
|
||||
<< "but got the shape of input[" << i << "]: " << shape->ToString()
|
||||
<< ", shape of input[0]:" << shape_0->ToString();
|
||||
MS_EXCEPTION(ValueError) << "For '" << primitive->name()
|
||||
<< "', input shape must be same, but got shape of input[" << i
|
||||
<< "]: " << shape->ToString() << ", shape of input[0]: " << shape_0->ToString() << ".";
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -83,20 +83,23 @@ abstract::TupleShapePtr ApplyAdaMaxInferShape(const PrimitivePtr &primitive,
|
|||
same_shape_args_map.insert({"grad", grad_shape});
|
||||
if (!var_shape_ptr->IsDynamic() && !m_shape_ptr->IsDynamic()) {
|
||||
if (*m_shape != *var_shape) {
|
||||
MS_EXCEPTION(ValueError) << primitive->name() << " evaluator arg m shape " << m_shape->ToString()
|
||||
<< " are not consistent with var shape " << var_shape->ToString();
|
||||
MS_EXCEPTION(ValueError) << "For '" << primitive->name()
|
||||
<< "', evaluator arg 'm' and 'var' must have the same shape. But got 'm' shape: "
|
||||
<< m_shape->ToString() << ", 'var' shape: " << var_shape->ToString() << ".";
|
||||
}
|
||||
}
|
||||
if (!v_shape_ptr->IsDynamic() && !var_shape_ptr->IsDynamic()) {
|
||||
if (*v_shape != *var_shape) {
|
||||
MS_EXCEPTION(ValueError) << primitive->name() << " evaluator arg v shape " << v_shape->ToString()
|
||||
<< " are not consistent with var shape " << var_shape->ToString();
|
||||
MS_EXCEPTION(ValueError) << "For '" << primitive->name()
|
||||
<< "', evaluator arg 'v' and 'var' must have the same shape. But got 'v' shape: "
|
||||
<< v_shape->ToString() << ", 'var' shape: " << var_shape->ToString() << ".";
|
||||
}
|
||||
}
|
||||
if (!grad_shape_ptr->IsDynamic() && !var_shape_ptr->IsDynamic()) {
|
||||
if (*grad_shape != *var_shape) {
|
||||
MS_EXCEPTION(ValueError) << primitive->name() << " evaluator arg grad shape " << grad_shape->ToString()
|
||||
<< " are not consistent with var shape " << var_shape->ToString();
|
||||
MS_EXCEPTION(ValueError) << "For '" << primitive->name()
|
||||
<< "', evaluator arg 'grad' and 'var' must have the same shape. But got 'grad' shape: "
|
||||
<< grad_shape->ToString() << ", 'var' shape: " << var_shape->ToString() << ".";
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -46,22 +46,28 @@ abstract::TupleShapePtr ApplyAdadeltaInferShape(const PrimitivePtr &primitive,
|
|||
// var and accum must have the same shape when is not dynamic
|
||||
if (!var_shape_ptr->IsDynamic() && !accum_shape_ptr->IsDynamic()) {
|
||||
if (*var_shape != *accum_shape) {
|
||||
MS_EXCEPTION(ValueError) << prim_name << " evaluator arg accum shape " << accum_shape->ToString()
|
||||
<< " are not consistent with var shape " << var_shape->ToString();
|
||||
MS_EXCEPTION(ValueError)
|
||||
<< "For '" << prim_name
|
||||
<< "', 'var' and 'accum' must have the same shape when is not dynamic. But got 'var' shape: "
|
||||
<< var_shape->ToString() << ", 'accum' shape: " << accum_shape->ToString() << ".";
|
||||
}
|
||||
}
|
||||
// var and accum update must have the same shape when is not dynamic
|
||||
if (!var_shape_ptr->IsDynamic() && !accum_update_shape_ptr->IsDynamic()) {
|
||||
if (*var_shape != *accum_update_shape) {
|
||||
MS_EXCEPTION(ValueError) << prim_name << " evaluator arg accum update shape " << accum_update_shape->ToString()
|
||||
<< " are not consistent with var shape " << var_shape->ToString();
|
||||
MS_EXCEPTION(ValueError)
|
||||
<< "For '" << prim_name
|
||||
<< "', 'var' and 'accum_update' must have the same shape when is not dynamic. But got 'var' shape: "
|
||||
<< var_shape->ToString() << ", 'accum_update' shape: " << accum_update_shape->ToString() << ".";
|
||||
}
|
||||
}
|
||||
// var and grad must have the same shape when is not dynamic
|
||||
if (!var_shape_ptr->IsDynamic() && !grad_shape_ptr->IsDynamic()) {
|
||||
if (*var_shape != *grad_shape) {
|
||||
MS_EXCEPTION(ValueError) << prim_name << " evaluator arg grad shape " << grad_shape->ToString()
|
||||
<< " are not consistent with var shape " << var_shape->ToString();
|
||||
MS_EXCEPTION(ValueError)
|
||||
<< "For '" << prim_name
|
||||
<< "', 'var' and 'grad' must have the same shape when is not dynamic. But got 'var' shape: "
|
||||
<< var_shape->ToString() << ", 'grad' shape: " << grad_shape->ToString() << ".";
|
||||
}
|
||||
}
|
||||
const int64_t kShapeSize = 1;
|
||||
|
|
|
@ -52,8 +52,10 @@ abstract::TupleShapePtr ApplyAdagradInferShape(const PrimitivePtr &primitive,
|
|||
same_shape_args_map.insert({"grad", grad_shape_ptr});
|
||||
for (auto &elem : same_shape_args_map) {
|
||||
if (*elem.second != *var_shape_ptr) {
|
||||
MS_EXCEPTION(ValueError) << prim_name << " evaluator arg " << elem.first << " shape " << elem.second->ToString()
|
||||
<< " are not consistent with var shape " << var_shape_ptr->ToString();
|
||||
MS_EXCEPTION(ValueError) << "For '" << prim_name << "', evaluator arg '" << elem.first
|
||||
<< "' and 'var' must have the same shape. But got '" << elem.first
|
||||
<< "' shape: " << elem.second->ToString()
|
||||
<< ", 'var' shape: " << var_shape_ptr->ToString() << ".";
|
||||
}
|
||||
}
|
||||
return std::make_shared<abstract::TupleShape>(std::vector<abstract::BaseShapePtr>{var_shape_ptr, accum_shape_ptr});
|
||||
|
|
|
@ -51,9 +51,10 @@ abstract::TupleShapePtr ApplyAdagradV2InferShape(const PrimitivePtr &primitive,
|
|||
same_shape_args_map.insert({"grad", grad_shape});
|
||||
for (auto &elem : same_shape_args_map) {
|
||||
if (*elem.second != *var_shape) {
|
||||
MS_EXCEPTION(ValueError) << primitive->name() << " evaluator arg " << elem.first << " shape "
|
||||
<< elem.second->ToString() << " are not consistent with var shape "
|
||||
<< var_shape->ToString();
|
||||
MS_EXCEPTION(ValueError) << "For '" << primitive->name() << "', evaluator arg '" << elem.first
|
||||
<< "' and 'var' must have the same shape. But got '" << elem.first
|
||||
<< "' shape: " << elem.second->ToString() << ", 'var' shape: " << var_shape->ToString()
|
||||
<< ".";
|
||||
}
|
||||
}
|
||||
return std::make_shared<abstract::TupleShape>(std::vector<abstract::BaseShapePtr>{var_shape, accum_shape});
|
||||
|
|
|
@ -56,8 +56,10 @@ abstract::TupleShapePtr ApplyAdamWithAmsgradInferShape(const PrimitivePtr &primi
|
|||
same_shape_args_map.insert({"grad", grad_shape});
|
||||
for (auto &elem : same_shape_args_map) {
|
||||
if (*elem.second != *var_shape) {
|
||||
MS_EXCEPTION(ValueError) << prim_name << " evaluator arg " << elem.first << " shape " << elem.second->ToString()
|
||||
<< " are not consistent with var shape " << var_shape->ToString();
|
||||
MS_EXCEPTION(ValueError) << "For '" << prim_name << "', evaluator arg '" << elem.first
|
||||
<< "' and 'var' must have the same shape. But got '" << elem.first
|
||||
<< "' shape: " << elem.second->ToString() << ", 'var' shape: " << var_shape->ToString()
|
||||
<< ".";
|
||||
}
|
||||
}
|
||||
return std::make_shared<abstract::TupleShape>(
|
||||
|
|
|
@ -48,14 +48,16 @@ abstract::TupleShapePtr ApplyAddSignInferShape(const PrimitivePtr &primitive,
|
|||
auto grad_shape_ptr = grad_shape->cast<abstract::ShapePtr>();
|
||||
if (!m_shape_ptr->IsDynamic() && !var_shape_ptr->IsDynamic()) {
|
||||
if (*m_shape != *var_shape) {
|
||||
MS_EXCEPTION(ValueError) << prim_name << " evaluator arg m shape " << m_shape->ToString()
|
||||
<< " are not consistent with var shape " << var_shape->ToString();
|
||||
MS_EXCEPTION(ValueError) << "For '" << prim_name
|
||||
<< "', evaluator arg 'm' and 'var' must have the same shape. But got 'm' shape: "
|
||||
<< m_shape->ToString() << ", 'var' shape: " << var_shape->ToString() << ".";
|
||||
}
|
||||
}
|
||||
if (!grad_shape_ptr->IsDynamic() && !var_shape_ptr->IsDynamic()) {
|
||||
if (*grad_shape != *var_shape) {
|
||||
MS_EXCEPTION(ValueError) << prim_name << " evaluator arg grad shape " << grad_shape->ToString()
|
||||
<< " are not consistent with var shape " << var_shape->ToString();
|
||||
MS_EXCEPTION(ValueError) << "For '" << prim_name
|
||||
<< "', evaluator arg 'grad' and 'var' must have the same shape. But got 'grad' shape: "
|
||||
<< grad_shape->ToString() << ", 'var' shape: " << var_shape->ToString() << ".";
|
||||
}
|
||||
}
|
||||
const int64_t kShapeSize = 1;
|
||||
|
|
|
@ -42,29 +42,33 @@ abstract::ShapePtr ApplyCenteredRMSPropInferShape(const PrimitivePtr &primitive,
|
|||
// var and mg must have the same shape when is not dynamic
|
||||
if (!var_shape_ptr->IsDynamic() && !mg_shape_ptr->IsDynamic()) {
|
||||
if (*var_shape != *mg_shape) {
|
||||
MS_EXCEPTION(ValueError) << op_name << " mean gradient shape " << mg_shape->ToString()
|
||||
<< " are not consistent with var shape " << var_shape->ToString();
|
||||
MS_EXCEPTION(ValueError) << "For '" << op_name
|
||||
<< "', 'mean_gradient'must have the same shape as 'var'. But got 'mean_gradient' shape: "
|
||||
<< mg_shape->ToString() << ", 'var' shape: " << var_shape->ToString() << ".";
|
||||
}
|
||||
}
|
||||
// var and ms must have the same shape when is not dynamic
|
||||
if (!var_shape_ptr->IsDynamic() && !ms_shape_ptr->IsDynamic()) {
|
||||
if (*var_shape != *ms_shape) {
|
||||
MS_EXCEPTION(ValueError) << op_name << " mean square shape " << ms_shape->ToString()
|
||||
<< " are not consistent with var shape " << var_shape->ToString();
|
||||
MS_EXCEPTION(ValueError) << "For '" << op_name
|
||||
<< "', 'mean_square' must have the same shape as 'var'. But got 'mean_square' shape: "
|
||||
<< ms_shape->ToString() << ", 'var' shape: " << var_shape->ToString() << ".";
|
||||
}
|
||||
}
|
||||
// var and mom must have the same shape when is not dynamic
|
||||
if (!var_shape_ptr->IsDynamic() && !mom_shape_ptr->IsDynamic()) {
|
||||
if (*var_shape != *mom_shape) {
|
||||
MS_EXCEPTION(ValueError) << op_name << " moment shape " << mom_shape->ToString()
|
||||
<< " are not consistent with var shape " << var_shape->ToString();
|
||||
MS_EXCEPTION(ValueError) << "For '" << op_name
|
||||
<< "', 'moment' must have the same shape as 'var'. But got 'moment' shape: "
|
||||
<< mom_shape->ToString() << ", 'var' shape: " << var_shape->ToString() << ".";
|
||||
}
|
||||
}
|
||||
// var and grad must have the same shape when is not dynamic
|
||||
if (!var_shape_ptr->IsDynamic() && !grad_shape_ptr->IsDynamic()) {
|
||||
if (*var_shape != *grad_shape) {
|
||||
MS_EXCEPTION(ValueError) << op_name << " grad shape " << grad_shape->ToString()
|
||||
<< " are not consistent with var shape " << var_shape->ToString();
|
||||
MS_EXCEPTION(ValueError) << "For '" << op_name
|
||||
<< "', 'grad' must have the same shape as 'var'. But got 'grad' shape: "
|
||||
<< grad_shape->ToString() << ", 'var' shape: " << var_shape->ToString() << ".";
|
||||
}
|
||||
}
|
||||
auto shape_element = var_shape->cast<abstract::ShapePtr>();
|
||||
|
|
|
@ -44,8 +44,10 @@ abstract::ShapePtr ApplyFtrlInferShape(const PrimitivePtr &primitive, const std:
|
|||
same_shape_args_map.insert({"linear", linear_shape});
|
||||
for (auto &elem : same_shape_args_map) {
|
||||
if (*elem.second != *var_shape) {
|
||||
MS_EXCEPTION(ValueError) << prim_name << " evaluator arg " << elem.first << " shape " << elem.second->ToString()
|
||||
<< " are not consistent with var shape " << var_shape->ToString();
|
||||
MS_EXCEPTION(ValueError) << "For '" << prim_name << "', evaluator arg '" << elem.first
|
||||
<< "' must have the same shape as 'var'. But got '" << elem.first
|
||||
<< "' shape: " << elem.second->ToString() << ", 'var' shape: " << var_shape->ToString()
|
||||
<< ".";
|
||||
}
|
||||
}
|
||||
auto shape_ptr = var_shape->cast<abstract::ShapePtr>();
|
||||
|
|
|
@ -39,8 +39,9 @@ abstract::ShapePtr ApplyGradientDescentInferShape(const PrimitivePtr &primitive,
|
|||
auto delta_shape_ptr = delta_shape->cast<abstract::ShapePtr>();
|
||||
if (!var_shape_ptr->IsDynamic() && !delta_shape_ptr->IsDynamic()) {
|
||||
if (*var_shape != *delta_shape) {
|
||||
MS_EXCEPTION(ValueError) << prim_name << " evaluator arg delta shape " << delta_shape->ToString()
|
||||
<< " are not consistent with var shape " << var_shape->ToString();
|
||||
MS_EXCEPTION(ValueError) << "For '" << prim_name
|
||||
<< "', evaluator arg 'delta' must have the same shape as 'var'. But got 'delta' shape: "
|
||||
<< delta_shape->ToString() << ", 'var' shape: " << var_shape->ToString() << ".";
|
||||
}
|
||||
}
|
||||
// alpha must be a scalar [Number, Tensor]
|
||||
|
|
|
@ -50,8 +50,10 @@ abstract::TupleShapePtr ApplyKerasMomentumInferShape(const PrimitivePtr &primiti
|
|||
same_shape_args_map.insert({"grad", grad_shape});
|
||||
for (auto &elem : same_shape_args_map) {
|
||||
if (*elem.second != *var_shape) {
|
||||
MS_EXCEPTION(ValueError) << prim_name << " evaluator arg " << elem.first << " shape " << elem.second->ToString()
|
||||
<< " are not consistent with var shape " << var_shape->ToString();
|
||||
MS_EXCEPTION(ValueError) << "For '" << prim_name << "', evaluator arg '" << elem.first
|
||||
<< "' must have the same shape as 'var'. But got '" << elem.first
|
||||
<< "' shape: " << elem.second->ToString() << ", 'var' shape: " << var_shape->ToString()
|
||||
<< ".";
|
||||
}
|
||||
}
|
||||
return std::make_shared<abstract::TupleShape>(std::vector<abstract::BaseShapePtr>{var_shape, accum_shape});
|
||||
|
|
|
@ -73,8 +73,10 @@ abstract::TupleShapePtr ApplyPowerSignDInferShape(const PrimitivePtr &primitive,
|
|||
same_shape_args_map.insert({"grad", grad_shape});
|
||||
for (auto &elem : same_shape_args_map) {
|
||||
if (*elem.second != *var_shape) {
|
||||
MS_EXCEPTION(ValueError) << prim_name << " evaluator arg " << elem.first << " shape " << elem.second->ToString()
|
||||
<< " are not consistent with var shape " << var_shape->ToString();
|
||||
MS_EXCEPTION(ValueError) << "For '" << prim_name << "', evaluator arg '" << elem.first
|
||||
<< "' must have the same shape as 'var'. But got '" << elem.first
|
||||
<< "' shape: " << elem.second->ToString() << ", 'var' shape: " << var_shape->ToString()
|
||||
<< ".";
|
||||
}
|
||||
}
|
||||
return std::make_shared<abstract::TupleShape>(std::vector<abstract::BaseShapePtr>{var_shape, m_shape});
|
||||
|
|
|
@ -62,8 +62,10 @@ abstract::TupleShapePtr ApplyProximalAdagradInferShape(const PrimitivePtr &primi
|
|||
same_shape_args_map.insert({"grad", grad_shape_ptr});
|
||||
for (auto &elem : same_shape_args_map) {
|
||||
if (*elem.second != *var_shape_ptr) {
|
||||
MS_EXCEPTION(ValueError) << prim_name << " evaluator arg " << elem.first << " shape " << elem.second->ToString()
|
||||
<< " are not consistent with var shape " << var_shape_ptr->ToString();
|
||||
MS_EXCEPTION(ValueError) << "For '" << prim_name << "', evaluator arg '" << elem.first
|
||||
<< "' must have the same shape as 'var'. But got '" << elem.first
|
||||
<< "' shape: " << elem.second->ToString()
|
||||
<< ", 'var' shape: " << var_shape_ptr->ToString() << ".";
|
||||
}
|
||||
}
|
||||
return std::make_shared<abstract::TupleShape>(std::vector<abstract::BaseShapePtr>{var_shape_ptr, accum_shape_ptr});
|
||||
|
|
|
@ -61,8 +61,9 @@ abstract::ShapePtr ApplyProximalGradientDescentInferShape(const PrimitivePtr &pr
|
|||
auto delta_shape_ptr = delta_shape->cast<abstract::ShapePtr>();
|
||||
if (!var_shape_ptr->IsDynamic() && !delta_shape_ptr->IsDynamic()) {
|
||||
if (*var_shape != *delta_shape) {
|
||||
MS_EXCEPTION(ValueError) << primitive->name() << " evaluator arg delta shape " << delta_shape->ToString()
|
||||
<< " are not consistent with var shape " << var_shape->ToString();
|
||||
MS_EXCEPTION(ValueError) << "For '" << primitive->name()
|
||||
<< "', evaluator arg 'delta' must have the same shape as 'var'. But got 'delta' shape: "
|
||||
<< delta_shape->ToString() << ", 'var' shape: " << var_shape->ToString() << ".";
|
||||
}
|
||||
}
|
||||
auto shape_element = var_shape->cast<abstract::ShapePtr>();
|
||||
|
|
|
@ -41,8 +41,9 @@ abstract::ShapePtr ApproximateEqualInferShape(const PrimitivePtr &primitive,
|
|||
MS_EXCEPTION_IF_NULL(shape_ptr_x2);
|
||||
if (!shape_ptr_x1->IsDynamic() && !shape_ptr_x2->IsDynamic()) {
|
||||
if (shape_ptr_x1->shape() != shape_ptr_x2->shape()) {
|
||||
MS_EXCEPTION(ArgumentError) << "x1 shape and x2 shape should be same, but x1 shape: " << shape_ptr_x1->ToString()
|
||||
<< ", x2 shape:" << shape_ptr_x2->ToString();
|
||||
MS_EXCEPTION(ArgumentError) << "For '" << prim_name
|
||||
<< "', arg 'x1' must have the same shape as 'x2'. But got 'x1' shape: "
|
||||
<< shape_ptr_x1->ToString() << ", 'x2' shape: " << shape_ptr_x2->ToString() << ".";
|
||||
}
|
||||
}
|
||||
return shape_ptr_x1;
|
||||
|
|
|
@ -43,14 +43,17 @@ abstract::ShapePtr AssignInferShape(const PrimitivePtr &prim, const std::vector<
|
|||
} else if (value_shape.size() == 1 && value_shape[0] == 1 && variable_shape.empty()) {
|
||||
return shape_element;
|
||||
} else {
|
||||
MS_EXCEPTION(ValueError) << "For " << prim_name << ", the rank of value is " << value_shape.size()
|
||||
<< ". It should be same with variable's rank " << variable_shape.size() << ".";
|
||||
MS_EXCEPTION(ValueError) << "For '" << prim_name
|
||||
<< "','value' must have the same rank as 'variable'. But got 'value' rank: "
|
||||
<< value_shape.size() << ", 'variable' rank: " << variable_shape.size() << ".";
|
||||
}
|
||||
}
|
||||
for (uint64_t i = 0; i < variable_shape.size(); i++) {
|
||||
if (variable_shape[i] != value_shape[i]) {
|
||||
MS_EXCEPTION(ValueError) << "For " << prim_name << ", the shape of value is " << value_shape_ptr->ToString()
|
||||
<< ". It should be same with variable's shape " << variable_shape_ptr->ToString() << ".";
|
||||
MS_EXCEPTION(ValueError) << "For '" << prim_name
|
||||
<< "','value' must have the same shape as 'variable'. But got 'value' shape: "
|
||||
<< value_shape_ptr->ToString()
|
||||
<< ", 'variable' shape: " << variable_shape_ptr->ToString() << ".";
|
||||
}
|
||||
}
|
||||
return shape_element;
|
||||
|
|
|
@ -37,12 +37,13 @@ void GetAttrs(const PrimitivePtr &primitive, std::vector<int64_t> *kernel_size,
|
|||
// attr kernel size
|
||||
*kernel_size = GetValue<std::vector<int64_t>>(primitive->GetAttr(kKernelSize));
|
||||
if (kernel_size->size() != kKernelDims) {
|
||||
MS_LOG(EXCEPTION) << "For '" << primitive->name() << "', kernel_size must be 5, but got " << kernel_size->size();
|
||||
MS_LOG(EXCEPTION) << "For '" << primitive->name() << "', 'kernel_size' must be 5, but got " << kernel_size->size()
|
||||
<< ".";
|
||||
}
|
||||
// attr strides
|
||||
*strides = GetValue<std::vector<int64_t>>(primitive->GetAttr(kStrides));
|
||||
if (strides->size() != kStridesDims) {
|
||||
MS_LOG(EXCEPTION) << "For '" << primitive->name() << "',strides must be 5, but got " << strides->size();
|
||||
MS_LOG(EXCEPTION) << "For '" << primitive->name() << "', 'strides' must be 5, but got " << strides->size() << ".";
|
||||
}
|
||||
if (std::any_of(strides->begin(), strides->end(), [](int64_t stride) { return stride <= 0; })) {
|
||||
MS_EXCEPTION(ValueError) << "For '" << primitive->name()
|
||||
|
|
|
@ -68,16 +68,17 @@ abstract::ShapePtr BatchMatmulInferShape(const PrimitivePtr &primitive,
|
|||
auto context = MsContext::GetInstance();
|
||||
bool is_ascend = (context->get_param<std::string>(MS_CTX_DEVICE_TARGET) == kAscendDevice);
|
||||
if (!is_ascend && x_shp.size() != y_shp.size()) {
|
||||
MS_EXCEPTION(ValueError) << "For BatchMatMul on cpu/gpu, input x's size should be equal to input y's size, "
|
||||
"while x size = "
|
||||
<< x_shp.size() << ", y size = " << y_shp.size();
|
||||
MS_EXCEPTION(ValueError) << "For '" << prim_name
|
||||
<< "' on cpu/gpu, input 'x' should have the same size as input 'y'. But got 'x' size: "
|
||||
<< x_shp.size() << "'y' size: " << y_shp.size() << ".";
|
||||
}
|
||||
constexpr size_t x_dim_limit = 3;
|
||||
constexpr size_t y_dim_limit = 2;
|
||||
if (x_shp.size() < x_dim_limit || y_shp.size() < y_dim_limit) {
|
||||
MS_EXCEPTION(ValueError) << "For BatchMatMul, input x should be greater or equal to 3, input y should be greater "
|
||||
"or equal to 2 while x size = "
|
||||
<< x_shp.size() << ", y size = " << y_shp.size();
|
||||
MS_EXCEPTION(ValueError)
|
||||
<< "For '" << prim_name
|
||||
<< "', input 'x' should be greater or equal to 3, input 'y' should be greater or equal to 2. But got 'x': "
|
||||
<< x_shp.size() << ", 'y': " << y_shp.size() << ".";
|
||||
}
|
||||
constexpr size_t offset = 2;
|
||||
std::vector<int> x_last(x_shp.end() - offset, x_shp.end());
|
||||
|
@ -146,8 +147,8 @@ TypePtr BatchMatmulInferType(const PrimitivePtr &prim, const std::vector<Abstrac
|
|||
auto out_type = prim->GetAttr("cast_type");
|
||||
MS_EXCEPTION_IF_NULL(out_type);
|
||||
if (!out_type->isa<Type>()) {
|
||||
MS_EXCEPTION(ValueError) << "For '" << prim->name() << "', MatMul cast_type must be a 'Type', but got "
|
||||
<< out_type;
|
||||
MS_EXCEPTION(ValueError) << "For '" << prim->name() << "', MatMul cast_type must be a 'Type', but got: '"
|
||||
<< out_type << "'.";
|
||||
}
|
||||
x_type = out_type->cast<TypePtr>();
|
||||
}
|
||||
|
|
|
@ -51,18 +51,21 @@ abstract::ShapePtr BoundingBoxDecodeInferShape(const PrimitivePtr &primitive,
|
|||
prim_name);
|
||||
|
||||
if (anchor_box_shape[0] != deltas_shape[0]) {
|
||||
MS_EXCEPTION(ValueError) << "anchor_box first dimension must be the same as deltas first dimension, but got"
|
||||
"anchor_box_shape: "
|
||||
<< anchor_box_shape[0] << ", deltas_shape: " << deltas_shape[0];
|
||||
MS_EXCEPTION(ValueError)
|
||||
<< "For '" << prim_name
|
||||
<< "', 'anchor_box' and 'deltas' must have the same first dimension. But got anchor_box_shape[0]: "
|
||||
<< anchor_box_shape[0] << ", deltas_shape[0]: " << deltas_shape[0] << ".";
|
||||
}
|
||||
|
||||
const int64_t last_dimension = 4;
|
||||
if (anchor_box_shape[1] != last_dimension) {
|
||||
MS_EXCEPTION(ValueError) << "anchor_box last dimension must be 4, but got anchor_box_shape: "
|
||||
<< anchor_box_shape[1];
|
||||
MS_EXCEPTION(ValueError) << "For '" << prim_name
|
||||
<< "', 'anchor_box' must have a last dimension of 4, but got: " << anchor_box_shape[1]
|
||||
<< ".";
|
||||
}
|
||||
if (deltas_shape[1] != last_dimension) {
|
||||
MS_EXCEPTION(ValueError) << "deltas last dimension must be 4, but got deltas_shape: " << deltas_shape[1];
|
||||
MS_EXCEPTION(ValueError) << "For '" << prim_name
|
||||
<< "', 'deltas' must have a last dimension of 4, but got: " << deltas_shape[1] << ".";
|
||||
}
|
||||
|
||||
auto x_shape = anchor_box->cast<abstract::ShapePtr>();
|
||||
|
|
|
@ -48,24 +48,23 @@ abstract::TupleShapePtr CoalesceInferShape(const PrimitivePtr &primitive,
|
|||
auto x_values_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[kInputIndex1]->BuildShape())[kShape];
|
||||
auto x_shape_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[kInputIndex2]->BuildShape())[kShape];
|
||||
if (x_indices_shape.size() != x_indices_shape_size || x_values_shape.size() != 1 || x_shape_shape.size() != 1) {
|
||||
MS_EXCEPTION(ValueError) << "For Coalesce, x_indices should be a 2-D tensor"
|
||||
<< ", x_values should be a 1-D tensor"
|
||||
<< ", x_shape should be a 1-D tensor"
|
||||
<< ", but got x_indices is a " << x_indices_shape.size() << "-D tensor"
|
||||
<< ", got x_values is a " << x_values_shape.size() << "-D tensor"
|
||||
<< ", got x_shape is a " << x_shape_shape.size() << "-D tensor";
|
||||
MS_EXCEPTION(ValueError) << "For '" << prim_name << "' x_indices must be a 2-D tensor"
|
||||
<< ", x_values and x_shape must be a 1-D tensor, but got x_indices is a "
|
||||
<< x_indices_shape.size() << "-D tensor, got x_values is a " << x_values_shape.size()
|
||||
<< "-D tensor, got x_shape is a " << x_shape_shape.size() << "-D tensor"
|
||||
<< ".";
|
||||
}
|
||||
if (x_indices_shape[0] != x_shape_shape[0]) {
|
||||
MS_EXCEPTION(ValueError) << "For " << prim_name
|
||||
<< ", sizes of dim0 of x_indices and dim0 of x_shape should be the same"
|
||||
<< ", but size of dim0 of got x_indices is " << x_indices_shape[0]
|
||||
<< ", size of dim0 of got x_shape is " << x_shape_shape[0];
|
||||
<< ", size of dim0 of x_indices and dim0 of x_shape must be the same"
|
||||
<< ", but got x_indices dim0 size: " << x_indices_shape[0]
|
||||
<< ", x_shape dim0 size: " << x_shape_shape[0] << ".";
|
||||
}
|
||||
if (x_indices_shape[1] != x_values_shape[0]) {
|
||||
MS_EXCEPTION(ValueError) << "For " << prim_name
|
||||
<< ", sizes of dim1 of x_indices and dim0 of x_values should be the same"
|
||||
<< ", but size of dim1 of got x_indices is " << x_indices_shape[1]
|
||||
<< ", size of dim0 of got x_values is " << x_values_shape[0];
|
||||
<< ", size of dim1 of x_indices and dim0 of x_values must be the same"
|
||||
<< ", but got x_indices dim1 size: " << x_indices_shape[1]
|
||||
<< ", x_shape dim0 size: " << x_values_shape[0] << ".";
|
||||
}
|
||||
ShapeVector y_indices_shape = {x_indices_shape[0], -1};
|
||||
ShapeVector y_indices_min_shape = {x_indices_shape[0], 1};
|
||||
|
|
|
@ -96,8 +96,9 @@ ValuePtr ComplexInferValue(const PrimitivePtr &prim, const std::vector<AbstractB
|
|||
}
|
||||
|
||||
if (real_tensor->data_type() != imag_tensor->data_type()) {
|
||||
MS_EXCEPTION(TypeError) << "Inputs of Complex should be same, but got " << real_tensor->data_type() << "and "
|
||||
<< imag_tensor->data_type();
|
||||
MS_EXCEPTION(TypeError) << "For 'ComplexInfer', the real part and imaginary part of input should have the same "
|
||||
"data type. But got real type: "
|
||||
<< real_tensor->data_type() << ", imaginary type: " << imag_tensor->data_type() << ".";
|
||||
}
|
||||
|
||||
auto data_size = real_tensor->DataSize();
|
||||
|
|
|
@ -85,7 +85,8 @@ TypePtr ConcatInferType(const PrimitivePtr &primitive, const std::vector<Abstrac
|
|||
MS_EXCEPTION_IF_NULL(primitive);
|
||||
auto prim_name = primitive->name();
|
||||
if (!input_args[0]->isa<abstract::AbstractTuple>() && !input_args[0]->isa<abstract::AbstractList>()) {
|
||||
MS_EXCEPTION(TypeError) << "The input of Concat must be list or tuple of tensors.";
|
||||
MS_EXCEPTION(TypeError) << "For '" << prim_name << "', the input should be a list or tuple of tensors. But got:"
|
||||
<< input_args[0]->ToString() << ".";
|
||||
}
|
||||
auto elements = input_args[0]->isa<abstract::AbstractTuple>()
|
||||
? input_args[0]->cast<abstract::AbstractTuplePtr>()->elements()
|
||||
|
|
|
@ -44,8 +44,8 @@ constexpr size_t right_padding = 3;
|
|||
void CheckShapeAnyAndPositive(const std::string &op, const ShapeVector &shape) {
|
||||
for (size_t i = 0; i < shape.size(); ++i) {
|
||||
if ((shape[i] < 0) && (shape[i] != Shape::SHP_ANY)) {
|
||||
MS_EXCEPTION(ValueError) << op << " shape element [" << i << "] must be positive integer or SHP_ANY, but got "
|
||||
<< shape[i];
|
||||
MS_EXCEPTION(ValueError) << "For '" << op << "', shape element [" << i
|
||||
<< "] must be positive integer or SHP_ANY, but got: " << shape[i] << ".";
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -53,7 +53,8 @@ void CheckShapeAnyAndPositive(const std::string &op, const ShapeVector &shape) {
|
|||
void CheckShapeAllPositive(const std::string &op, const ShapeVector &shape) {
|
||||
for (size_t i = 0; i < shape.size(); ++i) {
|
||||
if (shape[i] < 0) {
|
||||
MS_LOG(EXCEPTION) << op << " shape element [" << i << "] must be positive integer, but got " << shape[i];
|
||||
MS_LOG(EXCEPTION) << "For '" << op << "', shape element [" << i
|
||||
<< "] must be positive integer, but got: " << shape[i] << ".";
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -62,7 +63,8 @@ int64_t CheckAttrPositiveInt64(const std::string &op, const ValuePtr &attr, cons
|
|||
MS_EXCEPTION_IF_NULL(attr);
|
||||
int64_t attr_val = attr->cast<Int64ImmPtr>()->value();
|
||||
if (attr_val <= 0) {
|
||||
MS_LOG(EXCEPTION) << op << " invalid " << attr_name << " value: " << attr_val << ", should be greater then 0";
|
||||
MS_LOG(EXCEPTION) << "For '" << op << "', '" << attr_name << "' should be greater than 0, but got: " << attr_val
|
||||
<< ".";
|
||||
}
|
||||
return attr_val;
|
||||
}
|
||||
|
|
|
@ -72,16 +72,25 @@ abstract::ShapePtr CropAndResizeGradBoxesInferShape(const PrimitivePtr &primitiv
|
|||
(void)CheckAndConvertUtils::CheckInteger("box_index rank", SizeToLong(input_shape3.size()), kEqual, kBoxIndShapeLen,
|
||||
prim_name);
|
||||
if (!(input_shape1[kHeight] > 0 && input_shape1[kWidth] > 0)) {
|
||||
MS_EXCEPTION(ValueError) << "the height and width of images must be over 0 ";
|
||||
MS_EXCEPTION(ValueError) << "For '" << prim_name
|
||||
<< "', the height and width of images must be greater than 0. But got height: "
|
||||
<< input_shape1[kHeight] << ", width: " << input_shape1[kWidth] << ".";
|
||||
}
|
||||
if (!(input_shape0[kHeight] > 0 && input_shape0[kWidth] > 0)) {
|
||||
MS_EXCEPTION(ValueError) << "the height and width of grads must be over 0 ";
|
||||
MS_EXCEPTION(ValueError) << "For '" << prim_name
|
||||
<< "', the height and width of grads must be greater than 0. But got height: "
|
||||
<< input_shape1[kHeight] << ", width: " << input_shape1[kWidth] << ".";
|
||||
}
|
||||
if (!(input_shape0[0] == input_shape3[0] && input_shape2[0] == input_shape0[0])) {
|
||||
MS_EXCEPTION(ValueError) << "the first dimension of the tensors in {grads, boxes, box_index} must be equal.";
|
||||
MS_EXCEPTION(ValueError)
|
||||
<< "For '" << prim_name
|
||||
<< "', the first dimension of the tensors in {grads, boxes, box_index} must be equal. But got grads[0]: "
|
||||
<< input_shape0[0] << ", boxes[0]: " << input_shape2[0] << ", box_index[0]: " << input_shape3[0] << ".";
|
||||
}
|
||||
if (input_shape0[kDepth] != input_shape1[kDepth]) {
|
||||
MS_EXCEPTION(ValueError) << "the depth of grads and images must be equal.";
|
||||
MS_EXCEPTION(ValueError) << "For '" << prim_name
|
||||
<< "', the depth of grads and images must be equal. But grads depth: "
|
||||
<< input_shape0[kDepth] << ", images depth: " << input_shape1[kDepth] << ".";
|
||||
}
|
||||
return std::make_shared<abstract::Shape>(input_shape2);
|
||||
}
|
||||
|
|
|
@ -57,24 +57,24 @@ abstract::ShapePtr CrossInferShape(const PrimitivePtr &primitive, const std::vec
|
|||
break;
|
||||
}
|
||||
if (i == x1_shape.size() - 1 && x1_shape[i] != dim_size_value) {
|
||||
MS_EXCEPTION(ValueError) << "For '" << primitive->name() << "', The size of inputs dim should be 3, but got "
|
||||
<< x1_shape[i];
|
||||
MS_EXCEPTION(ValueError) << "For '" << primitive->name() << "', the size of inputs dim should be 3, but got "
|
||||
<< x1_shape[i] << ".";
|
||||
}
|
||||
}
|
||||
}
|
||||
if ((dim < -static_cast<int64_t>(x1_shape.size()) || dim > static_cast<int64_t>(x1_shape.size()) - 1) &&
|
||||
dim != default_dim) {
|
||||
MS_EXCEPTION(ValueError) << "For '" << primitive->name() << "',dim should be between "
|
||||
MS_EXCEPTION(ValueError) << "For '" << primitive->name() << "', dim should be between "
|
||||
<< -static_cast<int64_t>(x1_shape.size()) << " and "
|
||||
<< static_cast<int64_t>(x1_shape.size()) - 1 << " ,but got " << dim;
|
||||
<< static_cast<int64_t>(x1_shape.size()) - 1 << " , but got " << dim << ".";
|
||||
}
|
||||
if (dim < 0 && dim != default_dim) {
|
||||
dim = static_cast<int64_t>(x1_shape.size()) + dim;
|
||||
}
|
||||
int64_t dim_size = 3;
|
||||
if (x1_shape[dim] != dim_size && x2_shape[dim] != dim_size && dim != default_dim) {
|
||||
MS_EXCEPTION(ValueError) << "For '" << primitive->name() << "', The size of inputs dim should be 3, but got "
|
||||
<< x1_shape[dim];
|
||||
MS_EXCEPTION(ValueError) << "For '" << primitive->name() << "', the size of inputs dim should be 3, but got "
|
||||
<< x1_shape[dim] << ".";
|
||||
}
|
||||
return std::make_shared<abstract::Shape>(x1_shape);
|
||||
}
|
||||
|
|
|
@ -48,10 +48,11 @@ abstract::TupleShapePtr CTCLossV2InferShape(const PrimitivePtr &primitive,
|
|||
auto targets_shape = targets_shape_map[kShape];
|
||||
if (log_probs_shape.size() != kLenLogProbs) {
|
||||
MS_LOG(EXCEPTION) << "For '" << prim_name
|
||||
<< "', Input log_probs's dims must be 3, but got :" << log_probs_shape.size();
|
||||
<< "', input log_probs's dim must be 3, but got: " << log_probs_shape.size() << ".";
|
||||
}
|
||||
if (targets_shape.size() != kLenTarget) {
|
||||
MS_LOG(EXCEPTION) << "For '" << prim_name << "', Input targets's dims must be 2, but got :" << targets_shape.size();
|
||||
MS_LOG(EXCEPTION) << "For '" << prim_name << "', input targets's dims must be 2, but got: " << targets_shape.size()
|
||||
<< ".";
|
||||
}
|
||||
int64_t T = log_probs_shape[0];
|
||||
int64_t N = log_probs_shape[1];
|
||||
|
|
|
@ -44,7 +44,7 @@ abstract::ShapePtr CTCLossV2GradInferShape(const PrimitivePtr &primitive,
|
|||
auto log_probs_shape = log_probs_shape_map[kShape];
|
||||
if (log_probs_shape.size() != kLenLogProbs) {
|
||||
MS_LOG(EXCEPTION) << "For '" << prim_name
|
||||
<< "', Input log_probs's dims must be 3, but got :" << log_probs_shape.size();
|
||||
<< "', input log_probs's dims must be 3, but got: " << log_probs_shape.size() << ".";
|
||||
}
|
||||
int64_t T = log_probs_shape[0];
|
||||
int64_t N = log_probs_shape[1];
|
||||
|
|
|
@ -58,12 +58,14 @@ void CheckCTCLossInputs(const std::vector<AbstractBasePtr> &input_args, const st
|
|||
op_name);
|
||||
|
||||
if (labels_indices_shape[0] != labels_values_shape[0]) {
|
||||
MS_EXCEPTION(ValueError) << "For CTCLoss first dim of label_indices and label_value must be same, but got "
|
||||
<< labels_indices_shape[0] << " and " << labels_values_shape[0];
|
||||
MS_EXCEPTION(ValueError)
|
||||
<< "For 'CTCLoss', the first dim of 'label_indices' and 'label_value' must be same, but got 'label_indices':"
|
||||
<< labels_indices_shape[0] << ", 'label_value': " << labels_values_shape[0] << ".";
|
||||
}
|
||||
if (inputs_shape[1] != sequence_length_shape[0]) {
|
||||
MS_EXCEPTION(ValueError) << "For CTCLoss input batch_size must be same with sequence_length batch_size, but got "
|
||||
<< inputs_shape[1] << " and " << sequence_length_shape[0];
|
||||
MS_EXCEPTION(ValueError)
|
||||
<< "For 'CTCLoss', input batch_size must be same with 'sequence_length' batch_size, but got input batch_size:"
|
||||
<< inputs_shape[1] << ", 'sequence_length' batch_size: " << sequence_length_shape[0] << ".";
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -33,13 +33,14 @@ abstract::TupleShapePtr CummaxInferShape(const PrimitivePtr &primitive,
|
|||
auto x_shape_value = CheckAndConvertUtils::ConvertShapePtrToShapeMap(x_shape)[kShape];
|
||||
auto axis = GetValue<int64_t>(primitive->GetAttr(AXIS));
|
||||
if (x_shape_value.size() <= 0) {
|
||||
MS_EXCEPTION(ValueError) << "For '" << primitive->name() << "', inputs 'axis' should be greater than 0, but got "
|
||||
<< x_shape_value.size() << ".";
|
||||
MS_EXCEPTION(ValueError) << "For '" << primitive->name()
|
||||
<< "', input tensor's shape size must be greater than 0, but got: " << x_shape_value.size()
|
||||
<< ".";
|
||||
}
|
||||
if (axis >= static_cast<int64_t>(x_shape_value.size()) || axis < -static_cast<int64_t>(x_shape_value.size())) {
|
||||
MS_EXCEPTION(ValueError) << "For '" << primitive->name() << "',The value of 'axis' should be in the range of ["
|
||||
MS_EXCEPTION(ValueError) << "For '" << primitive->name() << "', the value of 'axis' must be in the range of ["
|
||||
<< -static_cast<int64_t>(x_shape_value.size()) << ","
|
||||
<< static_cast<int64_t>(x_shape_value.size()) << "], but got axis:" << axis << ".";
|
||||
<< static_cast<int64_t>(x_shape_value.size()) << "], but got 'axis': " << axis << ".";
|
||||
}
|
||||
return std::make_shared<abstract::TupleShape>(std::vector<abstract::BaseShapePtr>{x_shape, x_shape});
|
||||
}
|
||||
|
|
|
@ -32,8 +32,8 @@ abstract::ShapePtr DiagPartInferShape(const PrimitivePtr &primitive, const std::
|
|||
auto op_name = primitive->name();
|
||||
auto input_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[0]->GetShapeTrack())[kShape];
|
||||
if ((input_shape.size() % kScaleNum) != 0 || input_shape.size() == 0) {
|
||||
MS_EXCEPTION(ValueError) << "For DiagPart, input rank must be non-zero and even, but got rank "
|
||||
<< input_shape.size();
|
||||
MS_EXCEPTION(ValueError) << "For 'DiagPart', input rank must be non-zero and even, but got rank: "
|
||||
<< input_shape.size() << ".";
|
||||
}
|
||||
auto length = input_shape.size() / kScaleNum;
|
||||
std::vector<int64_t> out_shape;
|
||||
|
|
|
@ -155,8 +155,8 @@ ValuePtr DivNoNanInferValue(const PrimitivePtr &prim, const std::vector<Abstract
|
|||
default: {
|
||||
MS_EXCEPTION(TypeError) << "For '" << prim->name()
|
||||
<< "', the supported type is in the list: ['bool', 'int8', 'int16', 'int32', 'int64', "
|
||||
"'uint8', 'uint16', 'uint32', 'uint64', 'float16', 'float32', 'float64'], but got "
|
||||
<< result_type->ToString();
|
||||
"'uint8', 'uint16', 'uint32', 'uint64', 'float16', 'float32', 'float64'], but got: "
|
||||
<< result_type->ToString() << ".";
|
||||
}
|
||||
}
|
||||
return result_tensor;
|
||||
|
|
|
@ -36,8 +36,8 @@ T GetAndCheckKeepProp(const tensor::TensorPtr &keep_prop) {
|
|||
T min = (T)0.0;
|
||||
T max = (T)1.0;
|
||||
if (*value < min || *value > max) {
|
||||
MS_EXCEPTION(ValueError) << "For 'DropoutDoMask', the 'keep_prop' input value must in the range [0, 1], but got "
|
||||
<< *value << ".";
|
||||
MS_EXCEPTION(ValueError)
|
||||
<< "For 'DropoutDoMask', the 'keep_prop' input value must be in the range [0, 1], but got: " << *value << ".";
|
||||
}
|
||||
return *value;
|
||||
}
|
||||
|
@ -59,22 +59,22 @@ abstract::ShapePtr DropoutDoMaskInferShape(const PrimitivePtr &primitive,
|
|||
x_size *= x_shape_vector[i];
|
||||
}
|
||||
if (mask_shape_vector.size() != 1) {
|
||||
MS_EXCEPTION(ValueError) << "For 'DropoutDoMask', the input mask must be 1-dimension, but got "
|
||||
MS_EXCEPTION(ValueError) << "For 'DropoutDoMask', the input 'mask' must be 1-dimension, but got: "
|
||||
<< mask_shape_vector.size() << ".";
|
||||
}
|
||||
auto mask_size = mask_shape_vector[0] * 8;
|
||||
if (x_size > mask_size) {
|
||||
MS_EXCEPTION(ValueError)
|
||||
<< "For 'DropoutDoMask', the input mask should be less than or equal to match input, but got input_x shape: "
|
||||
<< x_shape->ToString() << ", mask shape: " << mask_shape->ToString();
|
||||
<< "For 'DropoutDoMask', the input 'mask' must be less than or equal to match input, but got 'input_x' shape: "
|
||||
<< x_shape->ToString() << ", 'mask' shape: " << mask_shape->ToString() << ".";
|
||||
}
|
||||
}
|
||||
auto keep_prop = input_args[kInputIndex2];
|
||||
if (keep_prop->isa<abstract::AbstractTensor>()) {
|
||||
auto keep_prop_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(keep_prop->BuildShape())[kShape];
|
||||
if (!keep_prop_shape.empty()) {
|
||||
MS_EXCEPTION(ValueError) << "'For DropoutDoMask', the keep_prop's dim must be 0(scalar), but got "
|
||||
<< keep_prop_shape.size();
|
||||
MS_EXCEPTION(ValueError) << "'For 'DropoutDoMask', dim of 'keep_prop' must be 0(scalar), but got: "
|
||||
<< keep_prop_shape.size() << ".";
|
||||
}
|
||||
}
|
||||
return x_shape;
|
||||
|
@ -106,16 +106,16 @@ TypePtr DropoutDoMaskInferType(const PrimitivePtr &primitive, const std::vector<
|
|||
} else if (keep_prop->isa<abstract::AbstractScalar>()) {
|
||||
if (keep_prop_value != nullptr) {
|
||||
if (!keep_prop_value->isa<FloatImm>()) {
|
||||
MS_EXCEPTION(TypeError) << "For 'DropoutDoMask', the keep_prop input type must be float.";
|
||||
MS_EXCEPTION(TypeError) << "For 'DropoutDoMask', the 'keep_prop' input type must be float.";
|
||||
}
|
||||
auto value = GetValue<float>(keep_prop_value);
|
||||
if (value < 0 || value > 1) {
|
||||
MS_EXCEPTION(ValueError) << "For 'DropoutDoMask', the keep_prop input value must in the range [0, 1], but got "
|
||||
<< value << ".";
|
||||
MS_EXCEPTION(ValueError) << "For 'DropoutDoMask', the 'keep_prop' must in the range [0, 1], but got: " << value
|
||||
<< ".";
|
||||
}
|
||||
}
|
||||
} else {
|
||||
MS_EXCEPTION(TypeError) << "For 'DropoutDoMask', the keep_prop input must be a float number or tensor.";
|
||||
MS_EXCEPTION(TypeError) << "For 'DropoutDoMask', the 'keep_prop' input must be a float number or tensor.";
|
||||
}
|
||||
|
||||
(void)CheckAndConvertUtils::CheckTensorTypeValid("inputs", input_args[1]->BuildType(), {kUInt8}, op_name);
|
||||
|
|
|
@ -41,11 +41,14 @@ ShapeVector CalDynamicOutputShape(const PrimitivePtr &primitive, const ValuePtrL
|
|||
if (indexed_value->isa<Int64Imm>()) {
|
||||
value = GetValue<int64_t>(indexed_value);
|
||||
} else {
|
||||
MS_LOG(EXCEPTION) << "DropOutGenMask shape value must be int64, but " << indexed_value->ToString();
|
||||
MS_LOG(EXCEPTION) << "For '" << primitive->name()
|
||||
<< "', the type of shape value must be int64, but got: " << indexed_value->ToString() << ".";
|
||||
}
|
||||
|
||||
if (value <= 0) {
|
||||
MS_LOG(EXCEPTION) << "DropOutGenMask product of value should be > 0";
|
||||
MS_LOG(EXCEPTION) << "For '" << primitive->name()
|
||||
<< "', product of value must be greater than 0, but got: " << value << ".";
|
||||
}
|
||||
|
||||
if (std::numeric_limits<int64_t>::max() / count / value < 1) {
|
||||
|
@ -76,12 +79,13 @@ ShapeVector CalOutputShape(const PrimitivePtr &primitive, const AbstractBasePtrL
|
|||
value = GetValue<int64_t>(value_track);
|
||||
} else {
|
||||
MS_LOG(EXCEPTION) << "For '" << primitive->name()
|
||||
<< "', input x_shape elements should be int64 or int32, but got " << value_track->ToString()
|
||||
<< "', input x_shape elements must be int64 or int32, but got: " << value_track->ToString()
|
||||
<< ".";
|
||||
}
|
||||
|
||||
if (value <= 0) {
|
||||
MS_LOG(EXCEPTION) << "For '" << primitive->name() << "', product of value should be > 0, but got " << value;
|
||||
MS_LOG(EXCEPTION) << "For '" << primitive->name()
|
||||
<< "', product of value must be greater than 0, but got: " << value << ".";
|
||||
}
|
||||
|
||||
if (std::numeric_limits<int64_t>::max() / count / value < 1) {
|
||||
|
@ -128,8 +132,8 @@ abstract::ShapePtr DropoutGenMaskInferShape(const PrimitivePtr &primitive,
|
|||
auto shape = shape_base->cast<abstract::ShapePtr>();
|
||||
MS_EXCEPTION_IF_NULL(shape);
|
||||
if (shape->shape().size() != 1) {
|
||||
MS_EXCEPTION(TypeError) << "For '" << op_name << "', Input 'shape' must be a 1-D Tensor, but got "
|
||||
<< shape->shape().size() << ".";
|
||||
MS_EXCEPTION(TypeError) << "For '" << op_name
|
||||
<< "', input 'shape' must be a 1-D Tensor, but got: " << shape->shape().size() << ".";
|
||||
}
|
||||
size_t shape_rank = LongToSize(shape->shape()[0]);
|
||||
|
||||
|
@ -145,7 +149,7 @@ abstract::ShapePtr DropoutGenMaskInferShape(const PrimitivePtr &primitive,
|
|||
if (max_value.size() != shape_rank || min_value.size() != shape_rank) {
|
||||
MS_LOG(EXCEPTION)
|
||||
<< "For '" << op_name
|
||||
<< "', The size of max_value and min_value should be equal to the shape rank, but got max_value's size:"
|
||||
<< "', the size of max_value and min_value must be equal to the shape rank, but got max_value's size: "
|
||||
<< max_value.size() << ", min_value's size: " << min_value.size() << ".";
|
||||
}
|
||||
ShapeVector out_min_shape = CalDynamicOutputShape(primitive, min_value);
|
||||
|
|
|
@ -36,6 +36,8 @@ int64_t CheckInputsAndGetShape(const AbstractBasePtr &input_arg, const string &p
|
|||
auto input_size = input_shape.size();
|
||||
if (input_size != 1) {
|
||||
MS_EXCEPTION(TypeError) << "For " << prim_name << "', input must be 1-D, but dims is " << input_size;
|
||||
MS_EXCEPTION(ValueError) << "For '" << prim_name << "', input shape must be 1-D, but got: " << input_size
|
||||
<< "-D.";
|
||||
}
|
||||
if (input_shape[0] == abstract::Shape::SHP_ANY) {
|
||||
auto max_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_arg->BuildShape())[kMaxShape];
|
||||
|
|
|
@ -41,8 +41,8 @@ abstract::ShapePtr DynamicBroadcastToInferShape(const PrimitivePtr &primitive,
|
|||
y_shape = CheckAndConvertUtils::GetTensorInputShape(prim_name, input_args, 1);
|
||||
auto shape_value = y_shape->shape();
|
||||
if (shape_value.size() != 1) {
|
||||
MS_EXCEPTION(TypeError) << "For '" << prim_name << "', the shape size should be 1, but got "
|
||||
<< shape_value.size();
|
||||
MS_EXCEPTION(TypeError) << "For '" << prim_name << "', the shape size must be 1, but got: " << shape_value.size()
|
||||
<< ".";
|
||||
}
|
||||
std::vector<int64_t> output_shape;
|
||||
std::vector<int64_t> max_shape;
|
||||
|
@ -58,12 +58,17 @@ abstract::ShapePtr DynamicBroadcastToInferShape(const PrimitivePtr &primitive,
|
|||
auto min_value = input_y->cast<abstract::AbstractTensorPtr>()->get_min_value();
|
||||
auto max_value = input_y->cast<abstract::AbstractTensorPtr>()->get_max_value();
|
||||
if (!min_value || !max_value) {
|
||||
MS_EXCEPTION(ValueError) << "For BroadcastTo, inputs['shape'] min or max value is empty.";
|
||||
MS_EXCEPTION(ValueError)
|
||||
<< "For 'BroadcastTo', inputs['shape'] min or max value can not be empty. But got min: " << min_value
|
||||
<< "max: " << max_value << ".";
|
||||
}
|
||||
min_shape = GetValue<std::vector<int64_t>>(min_value);
|
||||
max_shape = GetValue<std::vector<int64_t>>(max_value);
|
||||
if (min_shape.size() != out_dims || max_shape.size() != out_dims) {
|
||||
MS_EXCEPTION(ValueError) << "For BroadcastTo, inputs['shape'] min or max value not match with out dims.";
|
||||
MS_EXCEPTION(ValueError) << "For 'BroadcastTo', inputs['shape'] min or max must have the same size as output's"
|
||||
". But got min shape size: "
|
||||
<< min_shape.size() << ", max shape size: " << max_shape.size()
|
||||
<< ", output size: " << out_dims << ".";
|
||||
}
|
||||
}
|
||||
return std::make_shared<abstract::Shape>(output_shape, min_shape, max_shape);
|
||||
|
@ -71,7 +76,7 @@ abstract::ShapePtr DynamicBroadcastToInferShape(const PrimitivePtr &primitive,
|
|||
auto out_shape = GetValue<std::vector<int64_t>>(y_value);
|
||||
return std::make_shared<abstract::Shape>(out_shape);
|
||||
}
|
||||
MS_EXCEPTION(TypeError) << "For BroadcastTo, input args must be tensor or tuple.";
|
||||
MS_EXCEPTION(TypeError) << "For 'BroadcastTo', input args must be tensor or tuple.";
|
||||
}
|
||||
|
||||
TypePtr DynamicBroadcastToInferType(const PrimitivePtr &prim, const std::vector<AbstractBasePtr> &input_args) {
|
||||
|
|
|
@ -57,12 +57,14 @@ abstract::ShapePtr DynamicResizeNearestNeighborInferShape(const PrimitivePtr &pr
|
|||
auto min_value = size->cast<abstract::AbstractTensorPtr>()->get_min_value();
|
||||
auto max_value = size->cast<abstract::AbstractTensorPtr>()->get_max_value();
|
||||
if (!min_value || !max_value) {
|
||||
MS_EXCEPTION(ValueError) << "For ResizeNearestNeighbor, inputs['size'] min or max value is empty.";
|
||||
MS_EXCEPTION(ValueError) << "For 'ResizeNearestNeighbor', inputs['size'] min or max value is can not be empty.";
|
||||
}
|
||||
min_size = GetValue<std::vector<int64_t>>(min_value);
|
||||
max_size = GetValue<std::vector<int64_t>>(max_value);
|
||||
if (min_size.size() != size_size || max_size.size() != size_size) {
|
||||
MS_EXCEPTION(ValueError) << "For ResizeNearestNeighbor, inputs['size'] min or max value size is not 2.";
|
||||
MS_EXCEPTION(ValueError)
|
||||
<< "For 'ResizeNearestNeighbor', inputs['size'] min and max value size must be 2, but got min: "
|
||||
<< min_size.size() << ", max: " << max_size.size() << ".";
|
||||
}
|
||||
}
|
||||
} else if (size->isa<abstract::AbstractTuple>()) {
|
||||
|
|
|
@ -78,8 +78,8 @@ static void seg_left_equation(const std::string &left_equation, const std::strin
|
|||
}
|
||||
found_ell = false;
|
||||
} else {
|
||||
MS_EXCEPTION(ValueError) << "For " << prim_name << ", Operand " << cur_element
|
||||
<< " in the equation contains invalid subscript, which can only consist of [a-zA-z].";
|
||||
MS_EXCEPTION(ValueError) << "For '" << prim_name << "', operand " << cur_element
|
||||
<< " in the equation can only contain [a-zA-z], but got: " << cur_element << ".";
|
||||
}
|
||||
}
|
||||
if (cur_element != input_shapes.size() - 1) {
|
||||
|
@ -234,13 +234,13 @@ abstract::ShapePtr EinsumInferShape(const PrimitivePtr &primitive, const std::ve
|
|||
auto equation = GetValue<std::string>(primitive->GetAttr(kEquation));
|
||||
equation.erase(std::remove(equation.begin(), equation.end(), ' '), equation.end());
|
||||
if (equation.length() == 0) {
|
||||
MS_EXCEPTION(ValueError) << "For " << prim_name << ", the equation is required, but got none.";
|
||||
MS_EXCEPTION(ValueError) << "For '" << prim_name << "', the equation is required, but got none.";
|
||||
}
|
||||
const std::string seg_arrow = "->";
|
||||
const auto seg_pos = equation.find(seg_arrow);
|
||||
if (seg_pos == 0) {
|
||||
MS_EXCEPTION(ValueError) << "For " << prim_name
|
||||
<< ", the equation should contain characters to the left of the arrow, but got none.";
|
||||
MS_EXCEPTION(ValueError) << "For '" << prim_name
|
||||
<< "', the equation should contain characters to the left of the arrow, but got none.";
|
||||
}
|
||||
|
||||
(void)CheckAndConvertUtils::CheckInteger("input number", SizeToLong(input_args.size()), kGreaterEqual, 1, prim_name);
|
||||
|
@ -259,13 +259,13 @@ abstract::ShapePtr EinsumInferShape(const PrimitivePtr &primitive, const std::ve
|
|||
auto shape = elements[idx]->BuildShape();
|
||||
MS_EXCEPTION_IF_NULL(shape);
|
||||
if (shape->IsDimZero()) {
|
||||
MS_EXCEPTION(ValueError) << "For " << prim_name << ", the dim of inputs' shape can not be zero, but got input["
|
||||
MS_EXCEPTION(ValueError) << "For '" << prim_name << "', the dim of inputs' shape can not be zero, but got input["
|
||||
<< idx << "] shape: " << shape->ToString() << ".";
|
||||
}
|
||||
auto &shape_vec = shape->cast<abstract::ShapePtr>()->shape();
|
||||
for (auto &val : shape_vec) {
|
||||
if (val == 0) {
|
||||
MS_EXCEPTION(ValueError) << "For " << prim_name << ", the shape can not contain zero, but got input[" << idx
|
||||
MS_EXCEPTION(ValueError) << "For '" << prim_name << "', the shape can not contain zero, but got input[" << idx
|
||||
<< "] shape: " << shape->ToString() << ".";
|
||||
}
|
||||
}
|
||||
|
|
|
@ -41,11 +41,13 @@ abstract::ShapePtr FillV2InferShape(const PrimitivePtr &primitive, const std::ve
|
|||
int64_t max_length = GetValue<int64_t>(max_length_ptr);
|
||||
auto input1_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[0]->BuildShape())[kShape];
|
||||
if (input1_shape.size() != 1) {
|
||||
MS_EXCEPTION(ValueError) << "the shape size of the input1 must be equal to 1.";
|
||||
MS_EXCEPTION(ValueError) << "For '" << primitive->name()
|
||||
<< "', the shape size of 'input1' must be 1, but got: " << input1_shape.size() << ".";
|
||||
}
|
||||
auto input2_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[1]->BuildShape())[kShape];
|
||||
if (input2_shape.size() != 0) {
|
||||
MS_EXCEPTION(ValueError) << "the shape size of the input2 must be equal to 0.";
|
||||
MS_EXCEPTION(ValueError) << "For '" << primitive->name()
|
||||
<< "', the shape size of 'input2' must be 0, but got: " << input2_shape.size() << ".";
|
||||
}
|
||||
auto input_shape = input_args[0]->cast<abstract::AbstractTensorPtr>();
|
||||
MS_EXCEPTION_IF_NULL(input_shape);
|
||||
|
@ -62,7 +64,8 @@ abstract::ShapePtr FillV2InferShape(const PrimitivePtr &primitive, const std::ve
|
|||
CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[0]->BuildShape())[kShape]);
|
||||
auto shape_v = shape_ptr->shape();
|
||||
if (shape_v.size() != kInputDims) {
|
||||
MS_EXCEPTION(ValueError) << "The input tensor must be a 1-D tensor.";
|
||||
MS_EXCEPTION(ValueError) << "For '" << primitive->name()
|
||||
<< "', input must be a 1-D tensor, but got: " << shape_v.size() << "-D.";
|
||||
}
|
||||
if (!input_args[0]->BuildValue()->isa<AnyValue>() && !input_args[0]->BuildValue()->isa<None>()) {
|
||||
std::vector<int64_t> out_shape;
|
||||
|
@ -74,7 +77,8 @@ abstract::ShapePtr FillV2InferShape(const PrimitivePtr &primitive, const std::ve
|
|||
out_shape.push_back(input_shape_ptr[i]);
|
||||
shape_m *= input_shape_ptr[i];
|
||||
} else {
|
||||
MS_EXCEPTION(ValueError) << "Each dimension must be greater than 0.";
|
||||
MS_EXCEPTION(ValueError) << "For '" << primitive->name()
|
||||
<< "', each dimension of input shape must be greater than 0.";
|
||||
}
|
||||
}
|
||||
} else if (input_type_element->type_id() == kNumberTypeInt64) {
|
||||
|
@ -84,16 +88,18 @@ abstract::ShapePtr FillV2InferShape(const PrimitivePtr &primitive, const std::ve
|
|||
out_shape.push_back(input_shape_ptr[i]);
|
||||
shape_m *= input_shape_ptr[i];
|
||||
} else {
|
||||
MS_EXCEPTION(ValueError) << "Each dimension must be greater than 0.";
|
||||
MS_EXCEPTION(ValueError) << "For '" << primitive->name()
|
||||
<< "', each dimension of input shape must be greater than 0.";
|
||||
}
|
||||
}
|
||||
} else {
|
||||
MS_EXCEPTION(TypeError) << "the datatype of the input1 not support, support datatype: int32, int64.";
|
||||
MS_EXCEPTION(TypeError) << "For '" << primitive->name() << "', the dtype of input1 must be in [int32, int64].";
|
||||
}
|
||||
if (shape_m > max_length) {
|
||||
MS_EXCEPTION(ValueError) << "The number of elements of output must be less than max length: " << max_length
|
||||
<< ", but got " << shape_m
|
||||
<< "! The shape of output should be reduced or max_length should be increased";
|
||||
MS_EXCEPTION(ValueError)
|
||||
<< "For '" << primitive->name()
|
||||
<< "', the number of elements of output must be less than 'max_length'. But got number of elements: " << shape_m
|
||||
<< ", 'max_length': " << max_length << ".";
|
||||
}
|
||||
return std::make_shared<abstract::Shape>(out_shape);
|
||||
} else {
|
||||
|
@ -120,7 +126,7 @@ TypePtr FillV2InferType(const PrimitivePtr &primitive, const std::vector<Abstrac
|
|||
const std::set<TypePtr> input1_valid_types = {kInt32, kInt64};
|
||||
(void)CheckAndConvertUtils::CheckTensorTypeValid("input1 datatype", input1_type, input1_valid_types, prim_name);
|
||||
} else {
|
||||
MS_EXCEPTION(TypeError) << "the datatype of the input1 not support, support datatype: int32, int64.";
|
||||
MS_EXCEPTION(TypeError) << "For '" << primitive->name() << "', the dtype of input1 must be in [int32, int64].";
|
||||
}
|
||||
// Check the data type of the second input and infer the data type of the output from the second input
|
||||
auto input2 = input_args[kInputIndex1];
|
||||
|
@ -131,9 +137,10 @@ TypePtr FillV2InferType(const PrimitivePtr &primitive, const std::vector<Abstrac
|
|||
output_valid_types.insert(kBool);
|
||||
(void)CheckAndConvertUtils::CheckTensorTypeValid("output datatype", input2_type, output_valid_types, prim_name);
|
||||
} else {
|
||||
MS_EXCEPTION(TypeError) << "the datatype of the input2 not support, support datatype: "
|
||||
"bool, int8, int16, int32, int64, uint8, uint16, uint32, "
|
||||
"uint64, float16, float32, float64.";
|
||||
MS_EXCEPTION(TypeError)
|
||||
<< "For '" << prim_name
|
||||
<< "', the dtype of input2 must be in [bool, int8, int16, int32, int64, uint8, uint16, uint32, "
|
||||
"uint64, float16, float32, float64].";
|
||||
}
|
||||
auto input2_tensor_type = (input2_type->cast<TensorTypePtr>())->element();
|
||||
|
||||
|
|
|
@ -58,7 +58,7 @@ TypePtr FloorDivInferType(const PrimitivePtr &primitive, const std::vector<Abstr
|
|||
if (!input_type01->isa<TensorType>() && !input_type02->isa<TensorType>()) {
|
||||
MS_EXCEPTION(TypeError) << "For " << prim_name << ","
|
||||
<< " one of the inputs must be tensor type but got " << input_type01->ToString() << " and "
|
||||
<< input_type02->ToString();
|
||||
<< input_type02->ToString() << ".";
|
||||
}
|
||||
const std::set<TypePtr> valid_types = {kFloat16, kFloat32, kFloat64, kInt8, kInt16,
|
||||
kInt32, kInt64, kUInt8, kUInt16, kBool};
|
||||
|
|
|
@ -81,8 +81,8 @@ TypePtr FloorModInferType(const PrimitivePtr &primitive, const std::vector<Abstr
|
|||
} else {
|
||||
MS_EXCEPTION(TypeError)
|
||||
<< "For '" << op_name
|
||||
<< "', Complex math binary op expecting Tensor [complex64, complex64],[complex64, float32], [float32, "
|
||||
"complex64],[complex128, complex128],[complex128, float64], [float64, complex128], but got["
|
||||
<< "', complex math binary op expecting Tensor [complex64, complex64], [complex64, float32], [float32, "
|
||||
"complex64], [complex128, complex128], [complex128, float64], [float64, complex128], but got ["
|
||||
<< type_x->ToString() << ", " << type_y->ToString() << "].";
|
||||
}
|
||||
}
|
||||
|
|
|
@ -39,8 +39,9 @@ abstract::TupleShapePtr FractionalMaxPoolInferShape(const PrimitivePtr &primitiv
|
|||
(void)CheckAndConvertUtils::CheckInteger("input_rank", SizeToLong(in_shape.size()), kEqual, x_rank, op_name);
|
||||
for (int i = 0; i < x_rank; i++) {
|
||||
if (in_shape[i] <= 0) {
|
||||
MS_EXCEPTION(ValueError) << "For '" << op_name << "', the shape of input 'x' must be > 0 "
|
||||
<< std::to_string(in_shape[i]) << ".";
|
||||
MS_EXCEPTION(ValueError) << "For '" << op_name
|
||||
<< "', input shape must be greater than 0, but got: " << std::to_string(in_shape[i])
|
||||
<< ".";
|
||||
}
|
||||
}
|
||||
auto pooling_ratio = GetValue<std::vector<float>>(primitive->GetAttr(kPoolingRatio));
|
||||
|
@ -54,20 +55,17 @@ abstract::TupleShapePtr FractionalMaxPoolInferShape(const PrimitivePtr &primitiv
|
|||
<< std::to_string(pooling_ratio[0]) << ".";
|
||||
}
|
||||
if (pooling_ratio[kInputIndex1] < 1.0) {
|
||||
MS_EXCEPTION(ValueError)
|
||||
<< "For '" << op_name
|
||||
<< "', the element of parameter 'pooling_ratio' must be larger than 1.0, but pooling_ratio[1] = "
|
||||
<< std::to_string(pooling_ratio[kInputIndex1]) << ".";
|
||||
MS_EXCEPTION(ValueError) << "For '" << op_name
|
||||
<< "', the second element of pooling ratio must be greater than or equal to 1.0, but got: "
|
||||
<< std::to_string(pooling_ratio[kInputIndex1]) << ".";
|
||||
}
|
||||
if (pooling_ratio[kInputIndex2] < 1.0) {
|
||||
MS_EXCEPTION(ValueError)
|
||||
<< "For '" << op_name
|
||||
<< "', the element of parameter 'pooling_ratio' must be larger than 1.0, but pooling_ratio[2] = "
|
||||
<< std::to_string(pooling_ratio[kInputIndex2]) << ".";
|
||||
MS_EXCEPTION(ValueError) << "For '" << op_name
|
||||
<< "', the third element of pooling ratio must be greater than or equal to 1.0, but got: "
|
||||
<< std::to_string(pooling_ratio[kInputIndex2]) << ".";
|
||||
}
|
||||
if (pooling_ratio[kInputIndex3] != 1.0) {
|
||||
MS_EXCEPTION(ValueError) << "For '" << op_name
|
||||
<< "', the last element of parameter 'pooling_ratio' must be 1.0, but got "
|
||||
MS_EXCEPTION(ValueError) << "For '" << op_name << "', the forth element of 'pooling_ratio' must be 1.0, but got: "
|
||||
<< std::to_string(pooling_ratio[kInputIndex3]) << ".";
|
||||
}
|
||||
std::vector<int64_t> out_shape(x_rank);
|
||||
|
@ -76,7 +74,8 @@ abstract::TupleShapePtr FractionalMaxPoolInferShape(const PrimitivePtr &primitiv
|
|||
}
|
||||
if (std::any_of(out_shape.begin(), out_shape.end(), [](int64_t a) { return a <= 0; })) {
|
||||
MS_EXCEPTION(ValueError) << "For '" << op_name
|
||||
<< "', output shape <=0, the value of parameter 'pooling_ratio' is not valid.";
|
||||
<< "', out shape must be greater than 0, but got out_shape: " << out_shape
|
||||
<< ". Check if the pooling ratio is valid.";
|
||||
}
|
||||
int64_t row = out_shape[kInputIndex1] + 1;
|
||||
int64_t col = out_shape[kInputIndex2] + 1;
|
||||
|
|
|
@ -65,7 +65,7 @@ AbstractBasePtr GatherInfer(const abstract::AnalysisEnginePtr &, const Primitive
|
|||
} else {
|
||||
MS_LOG(EXCEPTION) << "For '" << primitive->name()
|
||||
<< "', the third input type should be tensor or scalar, but got invalid abstract type:"
|
||||
<< input_args[kInputIndex2]->type_name();
|
||||
<< input_args[kInputIndex2]->type_name() << ".";
|
||||
}
|
||||
auto params_shp = params->shape()->shape();
|
||||
auto indices_shp = indices->shape()->shape();
|
||||
|
@ -78,8 +78,8 @@ AbstractBasePtr GatherInfer(const abstract::AnalysisEnginePtr &, const Primitive
|
|||
ShapeVector indices_shp_max = (ind_dyn) ? indices->shape()->max_shape() : indices->shape()->shape();
|
||||
// check axis_val within interval: [-params_rank, params_rank)
|
||||
if (!(-params_rank <= axis_val) || !(axis_val < params_rank)) {
|
||||
MS_LOG(EXCEPTION) << "For Gather - Axis value must be within [ " << -params_rank << ", " << params_rank << " ) "
|
||||
<< "Got " << axis_val << ".";
|
||||
MS_LOG(EXCEPTION) << "For 'Gather', axis value must be within range [" << -params_rank << ", " << params_rank
|
||||
<< "], but got: " << axis_val << ".";
|
||||
}
|
||||
if (axis_val < 0) {
|
||||
axis_val += params_rank;
|
||||
|
|
|
@ -45,7 +45,7 @@ abstract::ShapePtr GatherDInferShape(const PrimitivePtr &primitive, const std::v
|
|||
}
|
||||
for (size_t i = 0; i < x_shape.size(); ++i) {
|
||||
if (SizeToLong(i) == dim_v) continue;
|
||||
MS_LOG(INFO) << "For '" << prim_name << "', it's now being check " << i << "th x shape.";
|
||||
MS_LOG(INFO) << "For '" << prim_name << "', it's now checking " << i << "th x shape.";
|
||||
CheckAndConvertUtils::Check("x shape", x_shape[i], kEqual, index_shape[i], prim_name);
|
||||
}
|
||||
return std::make_shared<abstract::Shape>(index_shape);
|
||||
|
|
|
@ -29,23 +29,23 @@ abstract::ShapePtr GridSampler3DInferShape(const PrimitivePtr &primitive,
|
|||
auto grid_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[kInputIndex1]->BuildShape())[kShape];
|
||||
const size_t kFive = 5;
|
||||
if (input_x_shape.size() != kFive) {
|
||||
MS_EXCEPTION(ValueError) << "For '" << primitive->name() << "', Input_x must be a 5-dimensional tensor, but got "
|
||||
<< std::to_string(input_x_shape.size()) << "-dimensional tensor.";
|
||||
MS_EXCEPTION(ValueError) << "For '" << primitive->name() << "', 'input_x' must be a 5-D tensor, but got "
|
||||
<< std::to_string(input_x_shape.size()) << "-D tensor.";
|
||||
}
|
||||
if (grid_shape.size() != kFive) {
|
||||
MS_EXCEPTION(ValueError) << "For '" << primitive->name() << "', Grid must be a 5-dimensional tensor, but got "
|
||||
<< std::to_string(grid_shape.size()) << "-dimensional tensor.";
|
||||
MS_EXCEPTION(ValueError) << "For '" << primitive->name() << "', 'grid' must be a 5-D tensor, but got "
|
||||
<< std::to_string(grid_shape.size()) << "-D tensor.";
|
||||
}
|
||||
if (input_x_shape[kInputIndex0] != grid_shape[kInputIndex0]) {
|
||||
MS_EXCEPTION(ValueError)
|
||||
<< "For '" << primitive->name()
|
||||
<< "', The first dimension of 'grid' and 'input_x' must be equal, but got the shape of 'grid' is "
|
||||
<< "', the first dimension of 'grid' and 'input_x' must be equal, but got the shape of 'grid' is "
|
||||
<< input_args[kInputIndex1]->BuildShape()->ToString() << " , and the shape of 'input_x' is "
|
||||
<< input_args[kInputIndex0]->BuildShape()->ToString() << ".";
|
||||
}
|
||||
if (grid_shape[kInputIndex4] != kInputIndex3) {
|
||||
MS_EXCEPTION(ValueError) << "For '" << primitive->name() << "', The last dimension of grid must be 3, but got "
|
||||
<< std::to_string(grid_shape[kInputIndex4]);
|
||||
MS_EXCEPTION(ValueError) << "For '" << primitive->name() << "', the last dimension of grid must be 3, but got "
|
||||
<< std::to_string(grid_shape[kInputIndex4]) << ".";
|
||||
}
|
||||
std::vector<int64_t> output_shape = {input_x_shape[kInputIndex0], input_x_shape[kInputIndex1],
|
||||
grid_shape[kInputIndex1], grid_shape[kInputIndex2], grid_shape[kInputIndex3]};
|
||||
|
|
|
@ -39,8 +39,9 @@ abstract::ShapePtr IOUInferShape(const PrimitivePtr &primitive, const std::vecto
|
|||
auto x_shp = x_shape_map[kShape];
|
||||
auto y_shp = y_shape_map[kShape];
|
||||
if (x_shp.size() != 2 || y_shp.size() != 2) {
|
||||
MS_EXCEPTION(ValueError) << "For BatchMatMul, input x, y should have the same dimension size and should be greater"
|
||||
<< "or equal to 3, while x size = " << x_shp.size() << ", y size = " << y_shp.size();
|
||||
MS_EXCEPTION(ValueError) << "For 'BatchMatMul', input x, y must have the same dimension size and should be greater"
|
||||
<< "or equal to 3. But got x size = " << x_shp.size() << ", y size = " << y_shp.size()
|
||||
<< ".";
|
||||
}
|
||||
(void)CheckAndConvertUtils::CheckInteger("input numbers", SizeToLong(x_shp[1]), kGreaterEqual, 4, prim_name);
|
||||
(void)CheckAndConvertUtils::CheckInteger("input numbers", SizeToLong(y_shp[1]), kGreaterEqual, 4, prim_name);
|
||||
|
|
|
@ -48,19 +48,19 @@ abstract::ShapePtr IsCloseInferShape(const PrimitivePtr &primitive, const std::v
|
|||
other_size *= other_shape[i];
|
||||
if (input_shape[i] != other_shape[i] && (input_shape[i] != 1 || other_shape[i] != 1)) {
|
||||
MS_EXCEPTION(ValueError) << "For '" << op_name
|
||||
<< "', The size of tensor input must match the size of tensor other at the " << i
|
||||
<< " dimension, but got input size: " << input_shape[i]
|
||||
<< ", other size: " << other_shape[i] << ".";
|
||||
<< "', the size of tensor 'input' must match the size of tensor 'other' at the " << i
|
||||
<< "th dimension, but got 'input' size: " << input_shape[i]
|
||||
<< ", 'other' size: " << other_shape[i] << ".";
|
||||
}
|
||||
}
|
||||
if (input_size > MAX)
|
||||
MS_EXCEPTION(ValueError) << "For '" << op_name
|
||||
<< "', The size of tensor input must should be less than [2147483648], actual is "
|
||||
<< input_size;
|
||||
<< "', the size of tensor 'input' must be less than [2147483648], but got: "
|
||||
<< input_size << ".";
|
||||
if (other_size > MAX)
|
||||
MS_EXCEPTION(ValueError) << "For '" << op_name
|
||||
<< "', The size of tensor other must should be less than [2147483648], actual is "
|
||||
<< other_size;
|
||||
<< "', the size of tensor 'other' must be less than [2147483648], but got: "
|
||||
<< other_size << ".";
|
||||
}
|
||||
return BroadCastInferShape(op_name, input_args);
|
||||
}
|
||||
|
|
|
@ -37,28 +37,31 @@ abstract::ShapePtr LARSUpdateInferShape(const PrimitivePtr &primitive, const std
|
|||
|
||||
if (weight_shape[kShape].size() != gradient_shape[kShape].size()) {
|
||||
MS_EXCEPTION(ValueError) << "For '" << op_name
|
||||
<< "', weight shape size should be equal to gradient shape size, but got "
|
||||
<< "weight shape: " << weight_shape << " and gradient shape: " << gradient_shape;
|
||||
<< "', weight shape size must be equal to gradient shape size, but got "
|
||||
<< "weight shape size: " << weight_shape[kShape].size()
|
||||
<< ", gradient shape size: " << gradient_shape[kShape].size() << ".";
|
||||
}
|
||||
if (norm_weight_shape[kShape].size() != norm_gradient_shape[kShape].size()) {
|
||||
MS_EXCEPTION(ValueError) << "For " << op_name
|
||||
<< "', norm weight shape size should be equal to norm gradient shape size, but got "
|
||||
<< "weight shape: " << norm_weight_shape << " and gradient shape: " << norm_gradient_shape;
|
||||
<< "', norm weight shape size must be equal to norm gradient shape size, but got "
|
||||
<< "norm weight shape size: " << norm_weight_shape[kShape].size()
|
||||
<< ", norm gradient shape size: " << norm_gradient_shape[kShape].size() << ".";
|
||||
}
|
||||
for (size_t index = 0; index < weight_shape[kShape].size(); index++) {
|
||||
if (weight_shape[kShape][index] != gradient_shape[kShape][index]) {
|
||||
MS_EXCEPTION(ValueError) << "For '" << op_name << "', The " << index
|
||||
<< "'s shape of weight shape should euqal with gradient shape, but got "
|
||||
<< "weight shape: " << norm_weight_shape
|
||||
<< " and gradient shape:" << norm_gradient_shape;
|
||||
MS_EXCEPTION(ValueError) << "For '" << op_name << "', the " << index
|
||||
<< "th dim of weight shape and gradient shape must be equal, but got "
|
||||
<< "weight shape[" << index << "]: " << weight_shape[kShape][index]
|
||||
<< ", gradient shape[" << index << "]: " << gradient_shape[kShape][index] << ".";
|
||||
}
|
||||
}
|
||||
for (size_t index = 0; index < weight_shape[kShape].size(); index++) {
|
||||
if (weight_shape[kShape][index] != gradient_shape[kShape][index]) {
|
||||
MS_EXCEPTION(ValueError) << "For '" << op_name << "', The " << index
|
||||
<< "'s shape of weight shape should euqal with gradient shape, but got "
|
||||
<< "weight shape: " << norm_weight_shape
|
||||
<< " and gradient shape:" << norm_gradient_shape;
|
||||
for (size_t index = 0; index < norm_weight_shape[kShape].size(); index++) {
|
||||
if (norm_weight_shape[kShape][index] != norm_gradient_shape[kShape][index]) {
|
||||
MS_EXCEPTION(ValueError) << "For '" << op_name << "', the " << index
|
||||
<< "th dim of norm weight shape and norm gradient shape must be equal, but got "
|
||||
<< "norm weight shape[" << index << "]: " << norm_weight_shape[kShape][index]
|
||||
<< ", norm gradient shape[" << index << "]: " << norm_gradient_shape[kShape][index]
|
||||
<< ".";
|
||||
}
|
||||
}
|
||||
auto shp_len = weight_decay_shape[kShape].size();
|
||||
|
|
|
@ -58,7 +58,7 @@ AbstractBasePtr LayerNormInfer(const abstract::AnalysisEnginePtr &, const Primit
|
|||
auto const &input_shape_list = input_shape->shape();
|
||||
const size_t input_rank = input_shape_list.size();
|
||||
if (input_rank == 0) {
|
||||
MS_LOG(EXCEPTION) << "For '" << op_name << "', input_rank should not be zero, but got " << input_rank;
|
||||
MS_LOG(EXCEPTION) << "For '" << op_name << "', input_rank can not be zero, but got: " << input_rank << ".";
|
||||
}
|
||||
|
||||
// begin_norm_axis and begin_params_axis should be smaller than the size of input_x and >= -1
|
||||
|
@ -86,7 +86,7 @@ AbstractBasePtr LayerNormInfer(const abstract::AnalysisEnginePtr &, const Primit
|
|||
auto const &gamma_shape_list = gamma_shape->shape();
|
||||
auto const &beta_shape_list = beta_shape->shape();
|
||||
if (gamma_shape_list.empty() || beta_shape_list.empty()) {
|
||||
MS_LOG(EXCEPTION) << "LayerNorm evaluator gamma or beta is a AbstractScalar that is not support.";
|
||||
MS_LOG(EXCEPTION) << "For 'LayerNorm', evaluator gamma or beta can not be an AbstractScalar.";
|
||||
}
|
||||
|
||||
size_t begin_params_axis_u = LongToSize(begin_params_axis);
|
||||
|
@ -106,9 +106,9 @@ AbstractBasePtr LayerNormInfer(const abstract::AnalysisEnginePtr &, const Primit
|
|||
if ((gamma_shape_list[gamma_beta_shape_dim] != input_shape_list[i]) ||
|
||||
(beta_shape_list[gamma_beta_shape_dim] != input_shape_list[i])) {
|
||||
MS_LOG(EXCEPTION) << "For '" << op_name
|
||||
<< "', Gamma or beta shape should match input shape, but got input shape: "
|
||||
<< "', gamma or beta shape should match input shape, but got input shape: "
|
||||
<< input_shape->ToString() << ", gamma shape: " << gamma_shape->ToString()
|
||||
<< ", beta shape: " << beta_shape->ToString();
|
||||
<< ", beta shape: " << beta_shape->ToString() << ".";
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -29,16 +29,16 @@ abstract::ShapePtr LowerBoundInferShape(const PrimitivePtr &primitive, const std
|
|||
size_t size_exp = 2;
|
||||
if (x_shape.size() != size_exp) {
|
||||
MS_EXCEPTION(ValueError) << "For '" << primitive->name()
|
||||
<< "', The rank of sorted_x need to be equal to 2, but got " << values_shape.size();
|
||||
<< "', the rank of sorted_x must be 2, but got: " << values_shape.size() << ".";
|
||||
}
|
||||
if (values_shape.size() != size_exp) {
|
||||
MS_EXCEPTION(ValueError) << "For '" << primitive->name() << "', The rank of values need to be equal to 2, but got "
|
||||
<< values_shape.size();
|
||||
MS_EXCEPTION(ValueError) << "For '" << primitive->name()
|
||||
<< "', the rank of values must be 2, but got: " << values_shape.size() << ".";
|
||||
}
|
||||
if (x_shape[0] != values_shape[0]) {
|
||||
MS_EXCEPTION(ValueError)
|
||||
<< "For '" << primitive->name()
|
||||
<< "', The first dimension of shape of sorted_x must be equal to that of values, but got shape of values: "
|
||||
<< "', the first dimension of the shape of sorted_x must be equal to that of values, but got shape of values: "
|
||||
<< input_args[1]->BuildShape()->ToString() << ", shape of sorted_x:" << input_args[0]->BuildShape()->ToString()
|
||||
<< ".";
|
||||
}
|
||||
|
@ -58,8 +58,8 @@ TypePtr LowerBoundInferType(const PrimitivePtr &primitive, const std::vector<Abs
|
|||
auto out_type_id = out_type->type_id();
|
||||
MS_EXCEPTION_IF_NULL(out_type);
|
||||
if (out_type_id != kInt32->type_id() && out_type_id != kInt64->type_id()) {
|
||||
MS_EXCEPTION(TypeError) << "For '" << primitive->name() << "', 'out_type' must be int32 or int64, but got "
|
||||
<< out_type;
|
||||
MS_EXCEPTION(TypeError) << "For '" << primitive->name()
|
||||
<< "', 'out_type' must be int32 or int64, but got: " << out_type << ".";
|
||||
}
|
||||
return out_type;
|
||||
}
|
||||
|
|
|
@ -38,16 +38,17 @@ abstract::ShapePtr LstsqInferShape(const PrimitivePtr &primitive, const std::vec
|
|||
auto a_shape_map = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[1]->BuildShape());
|
||||
auto a_shape = a_shape_map[kShape];
|
||||
if (x_shape.size() != x_dim_num) {
|
||||
MS_EXCEPTION(ValueError) << "For lstsq, the dimension of x must be equal to 2, while got x_dim: " << x_shape.size()
|
||||
MS_EXCEPTION(ValueError) << "For 'Lstsq', the dimension of x must be equal to 2, but got x_dim: " << x_shape.size()
|
||||
<< ".";
|
||||
}
|
||||
if (a_shape.size() != a_dim_num_2 && a_shape.size() != a_dim_num_1) {
|
||||
MS_EXCEPTION(ValueError) << "For lstsq, the dimension of a must be equal to 2 or 1, while got a_dim: "
|
||||
MS_EXCEPTION(ValueError) << "For 'Lstsq', the dimension of 'a' must be equal to 2 or 1, but got a_dim: "
|
||||
<< a_shape.size() << ".";
|
||||
}
|
||||
if (x_shape[0] != a_shape[0]) {
|
||||
MS_EXCEPTION(ValueError) << "For lstsq, the length of x_dim[0]: " << x_shape[0]
|
||||
<< " is not equal to the length of a_dims[0]: " << a_shape[0] << ".";
|
||||
MS_EXCEPTION(ValueError)
|
||||
<< "For 'Lstsq', the length of x_dim[0] must be equal to the length of a_dims[0]. But got x_dim[0]: "
|
||||
<< x_shape[0] << ", a_dims[0]: " << a_shape[0] << ".";
|
||||
}
|
||||
ShapeVector y_shape;
|
||||
if (a_shape.size() == a_dim_num_1) {
|
||||
|
|
|
@ -60,23 +60,29 @@ abstract::ShapePtr LuSolveInferShape(const PrimitivePtr &primitive, const std::v
|
|||
auto lu_pivots_shape_map = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[2]->BuildShape());
|
||||
auto lu_pivots_shape = lu_pivots_shape_map[kShape];
|
||||
if (lu_data_shape.size() < kDimNum) {
|
||||
MS_EXCEPTION(ValueError) << "For " << op_name << " lu_data's dimensions should be greater than or equal to 2.";
|
||||
MS_EXCEPTION(ValueError) << "For '" << op_name
|
||||
<< "', lu_data's dimension must be greater than or equal to 2, but got: "
|
||||
<< lu_data_shape.size() << ".";
|
||||
}
|
||||
if (x_shape.size() < kDimNum) {
|
||||
MS_EXCEPTION(ValueError) << "For " << op_name << " x's dimensions should be greater than or equal to 2.";
|
||||
MS_EXCEPTION(ValueError) << "For '" << op_name
|
||||
<< "', x's dimension must be greater than or equal to 2, but got: " << x_shape.size()
|
||||
<< ".";
|
||||
}
|
||||
if (lu_pivots_shape.size() < 1) {
|
||||
MS_EXCEPTION(ValueError) << "For " << op_name << " lu_pivots's dimensions should be greater than or equal to 1.";
|
||||
MS_EXCEPTION(ValueError) << "For '" << op_name
|
||||
<< "', lu_pivots's dimension must be greater than or equal to 1, but got: "
|
||||
<< lu_pivots_shape.size() << ".";
|
||||
}
|
||||
if (lu_data_shape[lu_data_shape.size() - 1] != lu_data_shape[lu_data_shape.size() - kDimNum]) {
|
||||
MS_EXCEPTION(ValueError) << "For " << op_name << " input lu_data should be square matrix "
|
||||
<< "while row is " << lu_data_shape[lu_data_shape.size() - kDimNum] << ", col is "
|
||||
<< lu_data_shape[lu_data_shape.size() - 1] << ".";
|
||||
MS_EXCEPTION(ValueError) << "For '" << op_name << "', input lu_data should be a square matrix, "
|
||||
<< "but got row: " << lu_data_shape[lu_data_shape.size() - kDimNum]
|
||||
<< ", col: " << lu_data_shape[lu_data_shape.size() - 1] << ".";
|
||||
}
|
||||
if (x_shape[x_shape.size() - kDimNum] != lu_data_shape[lu_data_shape.size() - kDimNum]) {
|
||||
MS_EXCEPTION(ValueError) << "For " << op_name << " x's col rank is not same as lu_data's col rank. "
|
||||
<< "x is " << x_shape[x_shape.size() - kDimNum] << ", lu_data is "
|
||||
<< lu_data_shape[lu_data_shape.size() - kDimNum] << ".";
|
||||
MS_EXCEPTION(ValueError) << "For '" << op_name << "', x's col rank must be the same as lu_data's col rank, "
|
||||
<< "but got x's: " << x_shape[x_shape.size() - kDimNum]
|
||||
<< ", lu_data's: " << lu_data_shape[lu_data_shape.size() - kDimNum] << ".";
|
||||
}
|
||||
if (x_shape.size() == lu_data_shape.size()) {
|
||||
for (size_t i = 0; i <= x_shape.size() - kDimNum; i++) {
|
||||
|
@ -101,10 +107,10 @@ abstract::ShapePtr LuSolveInferShape(const PrimitivePtr &primitive, const std::v
|
|||
}
|
||||
}
|
||||
if (lu_pivots_shape[lu_pivots_shape.size() - 1] != lu_data_shape[lu_data_shape.size() - 1]) {
|
||||
MS_EXCEPTION(ValueError) << "For " << op_name
|
||||
<< " the last dimension of lu_pivots must be equal to the last dimension of lu_data, "
|
||||
<< "lu_data is " << lu_data_shape[lu_data_shape.size() - 1] << ", lu_pivots is "
|
||||
<< lu_pivots_shape[lu_pivots_shape.size() - 1] << ".";
|
||||
MS_EXCEPTION(ValueError) << "For '" << op_name
|
||||
<< "', the last dimension of lu_pivots must be the same as lu_data's, "
|
||||
<< "but got lu_pivots': " << lu_pivots_shape[lu_pivots_shape.size() - 1]
|
||||
<< ", lu_data's: " << lu_data_shape[lu_data_shape.size() - 1] << ".";
|
||||
}
|
||||
for (size_t i = 0; i < lu_pivots_shape.size(); i++) {
|
||||
if (lu_data_shape[i] != lu_pivots_shape[i]) {
|
||||
|
|
|
@ -40,8 +40,8 @@ abstract::ShapePtr MaskedFillInferShape(const PrimitivePtr &primitive, const std
|
|||
if (input_args[kInputIndex2]->isa<abstract::AbstractTensor>()) {
|
||||
if (value_shape.size() != 0) {
|
||||
MS_EXCEPTION(ValueError)
|
||||
<< "For " + op_name +
|
||||
", 'value' only supports a 0-dimensional value tensor or a float number, but got tensor with "
|
||||
<< "For '" << op_name
|
||||
<< "', 'value' only supports a 0-dimensional value tensor or a float number, but got tensor with "
|
||||
<< value_shape.size() << " dimension(s).";
|
||||
}
|
||||
broadcast_shape = CalBroadCastShape(broadcast_shape, value_shape, op_name);
|
||||
|
|
|
@ -68,8 +68,8 @@ TypePtr MaximumInferType(const PrimitivePtr &prim, const std::vector<AbstractBas
|
|||
} else {
|
||||
MS_EXCEPTION(TypeError)
|
||||
<< "For '" << op_name
|
||||
<< "', Complex math binary op expecting Tensor [complex64, complex64],[complex64, float32], [float32, "
|
||||
"complex64],[complex128, complex128],[complex128, float64], [float64, complex128], but got["
|
||||
<< "', complex math binary op expecting Tensor [complex64, complex64],[complex64, float32], [float32, "
|
||||
"complex64], [complex128, complex128], [complex128, float64] or [float64, complex128], but got ["
|
||||
<< type_x->ToString() << ", " << type_y->ToString() << "].";
|
||||
}
|
||||
}
|
||||
|
|
|
@ -69,8 +69,8 @@ TypePtr ModInferType(const PrimitivePtr &prim, const std::vector<AbstractBasePtr
|
|||
} else {
|
||||
MS_EXCEPTION(TypeError)
|
||||
<< "For '" << op_name
|
||||
<< "', Complex math binary op expecting Tensor [complex64, complex64],[complex64, float32], [float32, "
|
||||
"complex64],[complex128, complex128],[complex128, float64], [float64, complex128], but got["
|
||||
<< "', complex math binary op expecting Tensor [complex64, complex64],[complex64, float32], [float32, "
|
||||
"complex64], [complex128, complex128], [complex128, float64] or [float64, complex128], but got ["
|
||||
<< type_x->ToString() << ", " << type_y->ToString() << "].";
|
||||
}
|
||||
}
|
||||
|
|
|
@ -64,9 +64,9 @@ TypePtr MulNoNanInferType(const PrimitivePtr &prim, const std::vector<AbstractBa
|
|||
return type_y;
|
||||
} else {
|
||||
MS_EXCEPTION(TypeError)
|
||||
<< "For '" << prim->name()
|
||||
<< "', Complex math binary op expecting Tensor [complex64, complex64],[complex64, float32], [float32, "
|
||||
"complex64],[complex128, complex128],[complex128, float64], [float64, complex128], but got["
|
||||
<< "For '" << op_name
|
||||
<< "', complex math binary op expecting Tensor [complex64, complex64],[complex64, float32], [float32, "
|
||||
"complex64], [complex128, complex128], [complex128, float64] or [float64, complex128], but got ["
|
||||
<< type_x->ToString() << ", " << type_y->ToString() << "].";
|
||||
}
|
||||
}
|
||||
|
|
|
@ -132,7 +132,7 @@ ValuePtr NegInferValue(const PrimitivePtr &prim, const std::vector<AbstractBaseP
|
|||
<< "For '" << prim->name()
|
||||
<< "', the supported data type is ['int8', 'int16', 'int32', 'int64', 'uint8', "
|
||||
"'uint16','uint32', 'uint64','float16', 'float32', 'float64', 'complex64', 'complex128'], but got "
|
||||
<< x_tensor->ToString();
|
||||
<< x_tensor->ToString() << ".";
|
||||
}
|
||||
}
|
||||
return result_tensor;
|
||||
|
|
|
@ -47,38 +47,38 @@ void CheckAttr(const PrimitivePtr &primitive, const std::string &shape_attr_name
|
|||
try {
|
||||
auto attr = primitive->GetAttr(shape_attr_name);
|
||||
if (attr->cast<ValueTuplePtr>() == nullptr) {
|
||||
MS_EXCEPTION(TypeError) << "For '" << prim_name << "', Attr '" << shape_attr_name
|
||||
MS_EXCEPTION(TypeError) << "For '" << prim_name << "', attr '" << shape_attr_name
|
||||
<< "' is necessary, but missing it.";
|
||||
}
|
||||
attr_shapes = GetValue<ValuePtrList>(attr);
|
||||
} catch (const std::exception &) {
|
||||
MS_EXCEPTION(TypeError) << "For '" << prim_name << "', Attr " << shape_attr_name
|
||||
<< " must be a tuple(list, list, ...).";
|
||||
MS_EXCEPTION(TypeError) << "For '" << prim_name << "', attr '" << shape_attr_name
|
||||
<< "' must be a tuple(list, list, ...).";
|
||||
}
|
||||
if (!attr_shapes.empty()) {
|
||||
auto ele = attr_shapes[0]->cast<ValueSequencePtr>();
|
||||
if (ele == nullptr) {
|
||||
MS_EXCEPTION(TypeError) << "For '" << prim_name << "', Attr " << shape_attr_name
|
||||
<< " must be a tuple(list, list, ...).";
|
||||
MS_EXCEPTION(TypeError) << "For '" << prim_name << "', first element of attr " << shape_attr_name
|
||||
<< " can not be an empty pointer.";
|
||||
}
|
||||
}
|
||||
std::vector<int64_t> attr_rank_ids;
|
||||
try {
|
||||
auto attr = primitive->GetAttr(rank_ids_attr_name);
|
||||
if (attr->cast<ValueTuplePtr>() != nullptr) {
|
||||
MS_EXCEPTION(TypeError) << "For '" << prim_name << "', Attr '" << shape_attr_name
|
||||
MS_EXCEPTION(TypeError) << "For '" << prim_name << "', attr '" << shape_attr_name
|
||||
<< "' is necessary, but missing it";
|
||||
}
|
||||
attr_rank_ids = GetValue<std::vector<int64_t>>(attr);
|
||||
} catch (const std::exception &) {
|
||||
MS_EXCEPTION(TypeError) << "For '" << prim_name << "', Attr " << rank_ids_attr_name
|
||||
<< " must be a list[int, int, ...].";
|
||||
MS_EXCEPTION(TypeError) << "For '" << prim_name << "', attr '" << rank_ids_attr_name
|
||||
<< "' must be a list[int, int, ...].";
|
||||
}
|
||||
if (attr_shapes.size() != attr_rank_ids.size()) {
|
||||
MS_EXCEPTION(ValueError) << "For '" << primitive->name() << "', attr '" << shape_attr_name
|
||||
<< "' size must be equal to attr '" << rank_ids_attr_name << "' size, but got attr '"
|
||||
<< shape_attr_name << "' size: " << attr_shapes.size() << ", attr " << rank_ids_attr_name
|
||||
<< " size: " << attr_rank_ids.size();
|
||||
<< shape_attr_name << "' size: " << attr_shapes.size() << ", attr '" << rank_ids_attr_name
|
||||
<< "' size: " << attr_rank_ids.size();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -95,8 +95,8 @@ void NeighborExchangeCheck(const PrimitivePtr &primitive, const std::vector<Abst
|
|||
auto recv_type_attr = primitive->GetAttr(kNeighborExchangeRecvType);
|
||||
MS_EXCEPTION_IF_NULL(recv_type_attr);
|
||||
if (!recv_type_attr->isa<Type>()) {
|
||||
MS_EXCEPTION(TypeError) << "For '" << prim_name << "', Attr " << kNeighborExchangeRecvType
|
||||
<< " should be a mindspore data type.";
|
||||
MS_EXCEPTION(TypeError) << "For '" << prim_name << "', attr '" << kNeighborExchangeRecvType
|
||||
<< "' must be a mindspore data type.";
|
||||
}
|
||||
// check group
|
||||
auto group_attr = primitive->GetAttr(kGroup);
|
||||
|
@ -104,7 +104,7 @@ void NeighborExchangeCheck(const PrimitivePtr &primitive, const std::vector<Abst
|
|||
MS_EXCEPTION_IF_NULL(group_attr);
|
||||
(void)GetValue<std::string>(group_attr);
|
||||
} catch (const std::exception &) {
|
||||
MS_EXCEPTION(TypeError) << "For '" << prim_name << "', Attr " << kGroup << " should be a str.";
|
||||
MS_EXCEPTION(TypeError) << "For '" << prim_name << "', attr '" << kGroup << "' must be a str.";
|
||||
}
|
||||
// check empty input
|
||||
auto send_rank_ids = GetValue<std::vector<int64_t>>(primitive->GetAttr(kSendRankIds));
|
||||
|
|
Loading…
Reference in New Issue