ops whitelist amendment aafter code viewing
This commit is contained in:
parent
9edf52d71f
commit
1891cbf483
|
@ -42,9 +42,9 @@ abstract::ShapePtr ApplyCenteredRMSPropInferShape(const PrimitivePtr &primitive,
|
|||
// var and mg must have the same shape when is not dynamic
|
||||
if (!var_shape_ptr->IsDynamic() && !mg_shape_ptr->IsDynamic()) {
|
||||
if (*var_shape != *mg_shape) {
|
||||
MS_EXCEPTION(ValueError) << "For '" << op_name
|
||||
<< "', 'mean_gradient'must have the same shape as 'var'. But got 'mean_gradient' shape: "
|
||||
<< mg_shape->ToString() << ", 'var' shape: " << var_shape->ToString() << ".";
|
||||
MS_EXCEPTION(ValueError)
|
||||
<< "For '" << op_name << "', 'mean_gradient' must have the same shape as 'var'. But got 'mean_gradient' shape: "
|
||||
<< mg_shape->ToString() << ", 'var' shape: " << var_shape->ToString() << ".";
|
||||
}
|
||||
}
|
||||
// var and ms must have the same shape when is not dynamic
|
||||
|
|
|
@ -52,8 +52,10 @@ abstract::ShapePtr BatchToSpaceNDInferShape(const PrimitivePtr &primitive,
|
|||
out_shape[i + offset] = x_block_prod - crops_sum;
|
||||
}
|
||||
if (out_shape[0] % block_shape_prod != 0) {
|
||||
MS_EXCEPTION(ValueError) << prim_name << " input_x dimension 0 " << out_shape[0]
|
||||
<< " must be divisible by block_shape_prod " << block_shape_prod;
|
||||
MS_EXCEPTION(ValueError)
|
||||
<< "For '" << prim_name
|
||||
<< "', the first dim of 'input_x' must be divisible by 'block_shape_prod'. But got first dim of 'input_x': "
|
||||
<< out_shape[0] << ", 'block_shape_prod' with value: " << block_shape_prod << ".";
|
||||
}
|
||||
out_shape[0] = int64_t(floor(out_shape[0] / static_cast<float>(block_shape_prod)));
|
||||
if (input_min_shape.size() == 0 || input_max_shape.size() == 0) {
|
||||
|
@ -72,12 +74,16 @@ abstract::ShapePtr BatchToSpaceNDInferShape(const PrimitivePtr &primitive,
|
|||
output_max_shape[i + offset] = x_block_prod_max - crops_sum;
|
||||
}
|
||||
if (output_min_shape[0] % block_shape_prod != 0) {
|
||||
MS_EXCEPTION(ValueError) << prim_name << " input_x dimension 0 " << output_min_shape[0]
|
||||
<< " must be divisible by block_shape_prod " << block_shape_prod;
|
||||
MS_EXCEPTION(ValueError) << "For '" << prim_name
|
||||
<< "', the first dim of output min shape must be divisible by 'block_shape_prod'. But got "
|
||||
"first dim of output min shape: "
|
||||
<< output_min_shape[0] << ", 'block_shape_prod' with value: " << block_shape_prod << ".";
|
||||
}
|
||||
if (output_max_shape[0] % block_shape_prod != 0) {
|
||||
MS_EXCEPTION(ValueError) << prim_name << " input_x dimension 0 " << output_max_shape[0]
|
||||
<< " must be divisible by block_shape_prod " << block_shape_prod;
|
||||
MS_EXCEPTION(ValueError) << "For '" << prim_name
|
||||
<< "', the first dim of output max shape must be divisible by 'block_shape_prod'. But got "
|
||||
"first dim of output max shape: "
|
||||
<< output_min_shape[0] << ", 'block_shape_prod' with value: " << block_shape_prod << ".";
|
||||
}
|
||||
output_min_shape[0] = int64_t(floor(output_min_shape[0] / static_cast<float>(block_shape_prod)));
|
||||
output_max_shape[0] = int64_t(floor(output_max_shape[0] / static_cast<float>(block_shape_prod)));
|
||||
|
|
|
@ -60,8 +60,8 @@ abstract::ShapePtr BiasAddInferShape(const PrimitivePtr &primitive, const std::v
|
|||
auto is_ascend = (context_ptr->get_param<std::string>(MS_CTX_DEVICE_TARGET) == kAscendDevice);
|
||||
if (data_format == Format::NCDHW && input_shape.size() != 5 && is_ascend) {
|
||||
MS_EXCEPTION(ValueError) << "For '" << prim_name
|
||||
<< "', NCDHW format only support 5-dims input in Ascend target, but got " << data_format
|
||||
<< ".";
|
||||
<< "', NCDHW format only support 5 dims input in Ascend target, but got "
|
||||
<< input_shape.size() << " dims.";
|
||||
}
|
||||
auto x_channel = data_format == Format::NHWC ? input_shape[input_shape.size() - 1] : input_shape[1];
|
||||
bool x_not_dyn = std::all_of(input_shape.begin(), input_shape.end(),
|
||||
|
|
|
@ -60,12 +60,11 @@ abstract::ShapePtr BoundingBoxDecodeInferShape(const PrimitivePtr &primitive,
|
|||
const int64_t last_dimension = 4;
|
||||
if (anchor_box_shape[1] != last_dimension) {
|
||||
MS_EXCEPTION(ValueError) << "For '" << prim_name
|
||||
<< "', 'anchor_box' must have a last dimension of 4, but got: " << anchor_box_shape[1]
|
||||
<< ".";
|
||||
<< "', 'anchor_box' last dimension must be 4, but got: " << anchor_box_shape[1] << ".";
|
||||
}
|
||||
if (deltas_shape[1] != last_dimension) {
|
||||
MS_EXCEPTION(ValueError) << "For '" << prim_name
|
||||
<< "', 'deltas' must have a last dimension of 4, but got: " << deltas_shape[1] << ".";
|
||||
<< "', 'deltas' last dimension must be 4, but got: " << deltas_shape[1] << ".";
|
||||
}
|
||||
|
||||
auto x_shape = anchor_box->cast<abstract::ShapePtr>();
|
||||
|
|
|
@ -57,9 +57,11 @@ abstract::ShapePtr BroadcastToInferShape(const PrimitivePtr &primitive,
|
|||
(void)primitive->AddAttr("shape", MakeValue(input_x));
|
||||
for (size_t i = 0; i < x_shape.size(); i++) {
|
||||
if (input_x[i + outer_dim_offset] != x_shape[i] && x_shape[i] != 1) {
|
||||
MS_EXCEPTION(ValueError) << "Not support shapes for broadcast, x_shape: "
|
||||
<< input_args[0]->BuildShape()->ToString()
|
||||
<< ", target shape: " << x_shape_ptr->ToString();
|
||||
MS_EXCEPTION(ValueError)
|
||||
<< "For '" << prim_name
|
||||
<< "', in order to broadcast, each dimension pair must be equal or input dimension is 1 or target "
|
||||
"dimension is -1. But got x_shape: "
|
||||
<< input_args[0]->BuildShape()->ToString() << ", target shape: " << x_shape_ptr->ToString() << ".";
|
||||
}
|
||||
}
|
||||
return x_shape_ptr;
|
||||
|
|
|
@ -51,20 +51,19 @@ abstract::TupleShapePtr CoalesceInferShape(const PrimitivePtr &primitive,
|
|||
MS_EXCEPTION(ValueError) << "For '" << prim_name << "' x_indices must be a 2-D tensor"
|
||||
<< ", x_values and x_shape must be a 1-D tensor, but got x_indices is a "
|
||||
<< x_indices_shape.size() << "-D tensor, got x_values is a " << x_values_shape.size()
|
||||
<< "-D tensor, got x_shape is a " << x_shape_shape.size() << "-D tensor"
|
||||
<< ".";
|
||||
<< "-D tensor, got x_shape is a " << x_shape_shape.size() << "-D tensor.";
|
||||
}
|
||||
if (x_indices_shape[0] != x_shape_shape[0]) {
|
||||
MS_EXCEPTION(ValueError) << "For " << prim_name
|
||||
<< ", size of dim0 of x_indices and dim0 of x_shape must be the same"
|
||||
<< ", but got x_indices dim0 size: " << x_indices_shape[0]
|
||||
<< ", x_shape dim0 size: " << x_shape_shape[0] << ".";
|
||||
<< ", first dim of x_indices and first dim of x_shape must be the same"
|
||||
<< ", but got first dim of x_indices: " << x_indices_shape[0]
|
||||
<< ", first dim of x_shape: " << x_shape_shape[0] << ".";
|
||||
}
|
||||
if (x_indices_shape[1] != x_values_shape[0]) {
|
||||
MS_EXCEPTION(ValueError) << "For " << prim_name
|
||||
<< ", size of dim1 of x_indices and dim0 of x_values must be the same"
|
||||
<< ", but got x_indices dim1 size: " << x_indices_shape[1]
|
||||
<< ", x_shape dim0 size: " << x_values_shape[0] << ".";
|
||||
<< ", second dim of x_indices and first dim of x_values must be the same"
|
||||
<< ", but got second dim of x_indices: " << x_indices_shape[1]
|
||||
<< ", first dim of x_values: " << x_values_shape[0] << ".";
|
||||
}
|
||||
ShapeVector y_indices_shape = {x_indices_shape[0], -1};
|
||||
ShapeVector y_indices_min_shape = {x_indices_shape[0], 1};
|
||||
|
|
|
@ -57,7 +57,10 @@ abstract::ShapePtr ConcatInferShape(const PrimitivePtr &primitive, const std::ve
|
|||
for (size_t j = 0; j < element0_rank; ++j) {
|
||||
if (j != axis && elementi_shape[j] != element0_shape[j]) {
|
||||
MS_LOG(EXCEPTION) << "For '" << prim_name << "', element " << i
|
||||
<< " shape in input should concat with first element, but it can not.";
|
||||
<< " shape in input can not concat with first element. To perform concat in the axis 0 "
|
||||
"direction, except for the 0th axis, all other axes must have the same shape. But got "
|
||||
<< "element" << i << "_shape[" << j << "]: " << elementi_shape[j] << ", element0_shape[" << j
|
||||
<< "]: " << element0_shape[j] << ".";
|
||||
}
|
||||
}
|
||||
all_shp = all_shp == -1 || elementi_shape[axis] == -1 ? -1 : all_shp + elementi_shape[axis];
|
||||
|
|
|
@ -45,7 +45,7 @@ void CheckShapeAnyAndPositive(const std::string &op, const ShapeVector &shape) {
|
|||
for (size_t i = 0; i < shape.size(); ++i) {
|
||||
if ((shape[i] < 0) && (shape[i] != Shape::SHP_ANY)) {
|
||||
MS_EXCEPTION(ValueError) << "For '" << op << "', shape element [" << i
|
||||
<< "] must be positive integer or SHP_ANY, but got: " << shape[i] << ".";
|
||||
<< "] must be positive integer or -1, but got: " << shape[i] << ".";
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -217,10 +217,10 @@ abstract::ShapePtr Conv2dInferShape(const PrimitivePtr &primitive, const std::ve
|
|||
if ((x_shape[c_axis] != Shape::SHP_ANY) && (w_shape[c_axis] != Shape::SHP_ANY) &&
|
||||
((x_shape[c_axis] / group) != w_shape[c_axis])) {
|
||||
MS_LOG(EXCEPTION) << "For '" << prim_name
|
||||
<< "', 'C_in' of input 'x' shape divide by parameter 'group' should be "
|
||||
<< "', 'C_in' of input 'x' shape divide by parameter 'group' must be "
|
||||
"equal to 'C_in' of input 'weight' shape: "
|
||||
<< w_shape[c_axis] << ", but got 'C_in' of input 'x' shape: " << x_shape[c_axis]
|
||||
<< ", and 'group': " << group;
|
||||
<< ", and 'group': " << group << ".";
|
||||
}
|
||||
int64_t out_channel = CheckAttrPositiveInt64(prim_name, primitive->GetAttr("out_channel"), "out_channel");
|
||||
if ((w_shape[n_axis] != Shape::SHP_ANY) && (w_shape[n_axis] != out_channel)) {
|
||||
|
@ -245,8 +245,8 @@ abstract::ShapePtr Conv2dInferShape(const PrimitivePtr &primitive, const std::ve
|
|||
int64_t pad_mode;
|
||||
CheckAndConvertUtils::GetPadModEnumValue(primitive->GetAttr("pad_mode"), &pad_mode);
|
||||
if (!CheckConv2dShape(prim_name, input_args, x_shape, w_shape, padding, pad_mode, w_axis, h_axis)) {
|
||||
MS_LOG(EXCEPTION)
|
||||
<< "Shape error for Conv2d, input shape's h and w after padding is less than kernel_size's h and w dims.";
|
||||
MS_LOG(EXCEPTION) << "For 'Conv2d', input shape's h and w after padding must be greater than or equal to "
|
||||
"kernel_size's h and w respectively.";
|
||||
}
|
||||
|
||||
std::vector<int64_t> output_hw;
|
||||
|
|
|
@ -150,7 +150,7 @@ abstract::ShapePtr DropoutGenMaskInferShape(const PrimitivePtr &primitive,
|
|||
MS_LOG(EXCEPTION)
|
||||
<< "For '" << op_name
|
||||
<< "', the size of max_value and min_value must be equal to the shape rank, but got max_value's size: "
|
||||
<< max_value.size() << ", min_value's size: " << min_value.size() << ".";
|
||||
<< max_value.size() << ", min_value's size: " << min_value.size() << ", shape rank: " << shape_rank << ".";
|
||||
}
|
||||
ShapeVector out_min_shape = CalDynamicOutputShape(primitive, min_value);
|
||||
ShapeVector out_max_shape = CalDynamicOutputShape(primitive, max_value);
|
||||
|
|
|
@ -36,7 +36,7 @@ int64_t CheckInputsAndGetShape(const AbstractBasePtr &input_arg, const string &p
|
|||
auto input_size = input_shape.size();
|
||||
if (input_size != 1) {
|
||||
MS_EXCEPTION(TypeError) << "For " << prim_name << "', input must be 1-D, but dims is " << input_size;
|
||||
MS_EXCEPTION(ValueError) << "For '" << prim_name << "', input shape must be 1-D, but got: " << input_size
|
||||
MS_EXCEPTION(ValueError) << "For '" << prim_name << "', input shape must be 1-D, but got: " << input_size
|
||||
<< "-D.";
|
||||
}
|
||||
if (input_shape[0] == abstract::Shape::SHP_ANY) {
|
||||
|
|
|
@ -67,14 +67,13 @@ static void seg_left_equation(const std::string &left_equation, const std::strin
|
|||
if ((found_ell && (*left_elements)[cur_element].size() > input_shapes[cur_element].size() + 1) ||
|
||||
(!found_ell && (*left_elements)[cur_element].size() != input_shapes[cur_element].size())) {
|
||||
MS_EXCEPTION(ValueError) << "For '" << prim_name << "', the number of subscript in " << cur_element
|
||||
<< " operand in the eqaution must match inputs[" << cur_element
|
||||
<< "].dim(), but it does not.";
|
||||
<< " operand in the eqaution does not match inputs[" << cur_element << "].dim().";
|
||||
}
|
||||
++cur_element;
|
||||
if (cur_element >= input_shapes.size()) {
|
||||
MS_EXCEPTION(ValueError)
|
||||
<< "For '" << prim_name
|
||||
<< "', the number of inputs must be equal to the number of inputs and equation's operand, but it does not.";
|
||||
<< "', the number of inputs must be equal to the number of inputs and equation's operand.";
|
||||
}
|
||||
found_ell = false;
|
||||
} else {
|
||||
|
@ -83,15 +82,14 @@ static void seg_left_equation(const std::string &left_equation, const std::strin
|
|||
}
|
||||
}
|
||||
if (cur_element != input_shapes.size() - 1) {
|
||||
MS_EXCEPTION(ValueError)
|
||||
<< "For '" << prim_name
|
||||
<< "', the number of inputs must be equal to the number of inputs and equation's operand, but it does not.";
|
||||
MS_EXCEPTION(ValueError) << "For '" << prim_name
|
||||
<< "', the number of inputs must be equal to the number of equation's operand.";
|
||||
}
|
||||
for (size_t i = 0; i < (*left_elements).size(); ++i) {
|
||||
auto it = std::find((*left_elements)[i].begin(), (*left_elements)[i].end(), kEinsumEllVal);
|
||||
if ((*left_elements)[i].size() != input_shapes[i].size() && it == (*left_elements)[i].end()) {
|
||||
MS_EXCEPTION(ValueError) << "For '" << prim_name << "', the number of subscript in " << i
|
||||
<< " operand in the eqaution must match inputs[" << i << "].dim(), but it does not.";
|
||||
<< " operand in the eqaution does not match inputs[" << i << "].dim().";
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -207,7 +205,7 @@ static void element_map_shape(const std::string &prim_name, const std::vector<st
|
|||
if ((*element_shape_map)[kEinsumEllVal] != temp_vec) {
|
||||
MS_EXCEPTION(ValueError)
|
||||
<< "For '" << prim_name
|
||||
<< "', the same ellipsis in equation can only represent the same dimension in inputs, but it does not.";
|
||||
<< "', the same ellipsis in equation can only represent the same dimension in inputs.";
|
||||
}
|
||||
} else {
|
||||
(*element_shape_map)[kEinsumEllVal] = temp_vec;
|
||||
|
|
|
@ -139,7 +139,7 @@ ValuePtr EqualInferValue(const PrimitivePtr &prim, const std::vector<AbstractBas
|
|||
MS_EXCEPTION(TypeError) << "For '" << prim->name()
|
||||
<< "', the supported type is in the list: ['bool', 'int8', 'int16', 'int32', 'int64', "
|
||||
"'complex64', 'complex128', 'uint8', 'float16', 'float32', 'float64'], but got "
|
||||
<< result_type->ToString();
|
||||
<< result_type->ToString() << ".";
|
||||
}
|
||||
}
|
||||
return result_tensor;
|
||||
|
|
|
@ -32,7 +32,7 @@ namespace ops {
|
|||
MIND_API_OPERATOR_IMPL(FillV2, BaseOperator);
|
||||
abstract::ShapePtr FillV2InferShape(const PrimitivePtr &primitive, const std::vector<AbstractBasePtr> &input_args) {
|
||||
if (!input_args[0]->isa<abstract::AbstractTensor>()) {
|
||||
MS_EXCEPTION(TypeError) << "Input[0] only support tensor!";
|
||||
MS_EXCEPTION(TypeError) << "For '" << primitive->name() << "', input[0] must be tensor.";
|
||||
}
|
||||
MS_EXCEPTION_IF_NULL(primitive);
|
||||
const uint32_t kInputDims = 1;
|
||||
|
@ -65,7 +65,7 @@ abstract::ShapePtr FillV2InferShape(const PrimitivePtr &primitive, const std::ve
|
|||
auto shape_v = shape_ptr->shape();
|
||||
if (shape_v.size() != kInputDims) {
|
||||
MS_EXCEPTION(ValueError) << "For '" << primitive->name()
|
||||
<< "', input must be a 1-D tensor, but got: " << shape_v.size() << "-D.";
|
||||
<< "', input must be a 1-D tensor, but got a: " << shape_v.size() << "-D tensor.";
|
||||
}
|
||||
if (!input_args[0]->BuildValue()->isa<AnyValue>() && !input_args[0]->BuildValue()->isa<None>()) {
|
||||
std::vector<int64_t> out_shape;
|
||||
|
@ -78,7 +78,8 @@ abstract::ShapePtr FillV2InferShape(const PrimitivePtr &primitive, const std::ve
|
|||
shape_m *= input_shape_ptr[i];
|
||||
} else {
|
||||
MS_EXCEPTION(ValueError) << "For '" << primitive->name()
|
||||
<< "', each dimension of input shape must be greater than 0.";
|
||||
<< "', each dimension of input shape must be greater than 0, but got input shape "
|
||||
<< i << ": " << input_shape_ptr[i] << ".";
|
||||
}
|
||||
}
|
||||
} else if (input_type_element->type_id() == kNumberTypeInt64) {
|
||||
|
@ -89,7 +90,8 @@ abstract::ShapePtr FillV2InferShape(const PrimitivePtr &primitive, const std::ve
|
|||
shape_m *= input_shape_ptr[i];
|
||||
} else {
|
||||
MS_EXCEPTION(ValueError) << "For '" << primitive->name()
|
||||
<< "', each dimension of input shape must be greater than 0.";
|
||||
<< "', each dimension of input shape must be greater than 0, but got input shape "
|
||||
<< i << ": " << input_shape_ptr[i] << ".";
|
||||
}
|
||||
}
|
||||
} else {
|
||||
|
|
|
@ -46,12 +46,12 @@ abstract::TupleShapePtr FractionalMaxPoolInferShape(const PrimitivePtr &primitiv
|
|||
}
|
||||
auto pooling_ratio = GetValue<std::vector<float>>(primitive->GetAttr(kPoolingRatio));
|
||||
if (pooling_ratio.size() != kPoolingRatioDims) {
|
||||
MS_EXCEPTION(ValueError) << "For '" << op_name << "', the size of parameter 'pooling_ratio' must be 4, but got "
|
||||
MS_EXCEPTION(ValueError) << "For '" << op_name << "', the size of parameter pooling ratio must be 4, but got "
|
||||
<< std::to_string(pooling_ratio.size()) << ".";
|
||||
}
|
||||
if (pooling_ratio[kInputIndex0] != 1.0) {
|
||||
MS_EXCEPTION(ValueError) << "For '" << op_name
|
||||
<< "', the first element of parameter 'pooling_ratio' must be 1.0, but got "
|
||||
<< "', the first element of parameter pooling ratio must be 1.0, but got "
|
||||
<< std::to_string(pooling_ratio[0]) << ".";
|
||||
}
|
||||
if (pooling_ratio[kInputIndex1] < 1.0) {
|
||||
|
@ -65,7 +65,7 @@ abstract::TupleShapePtr FractionalMaxPoolInferShape(const PrimitivePtr &primitiv
|
|||
<< std::to_string(pooling_ratio[kInputIndex2]) << ".";
|
||||
}
|
||||
if (pooling_ratio[kInputIndex3] != 1.0) {
|
||||
MS_EXCEPTION(ValueError) << "For '" << op_name << "', the forth element of 'pooling_ratio' must be 1.0, but got: "
|
||||
MS_EXCEPTION(ValueError) << "For '" << op_name << "', the forth element of pooling ratio must be 1.0, but got: "
|
||||
<< std::to_string(pooling_ratio[kInputIndex3]) << ".";
|
||||
}
|
||||
std::vector<int64_t> out_shape(x_rank);
|
||||
|
|
|
@ -42,7 +42,8 @@ abstract::ShapePtr AvgPool3DGradInferShape(const PrimitivePtr &primitive,
|
|||
if (input_args[0]->isa<abstract::AbstractTuple>()) { // origin_size is tuple
|
||||
origin_input_size = GetValue<std::vector<int64_t>>(input_args[0]->BuildValue());
|
||||
} else {
|
||||
MS_LOG(EXCEPTION) << "For '" << op_name << "', the first input data size must be a tuple.";
|
||||
MS_LOG(EXCEPTION) << "For '" << op_name << "', the first input data size must be a tuple, but got: "
|
||||
<< input_args[0]->BuildShape()->ToString() << ".";
|
||||
}
|
||||
return std::make_shared<abstract::Shape>(origin_input_size);
|
||||
}
|
||||
|
|
|
@ -120,7 +120,10 @@ abstract::ShapePtr Conv2DBackpropInputInferShape(const PrimitivePtr &primitive,
|
|||
|
||||
auto x_size_len = LongToSize(shape_shape[0]);
|
||||
if (shape_max.size() != x_size_len || shape_min.size() != x_size_len) {
|
||||
MS_LOG(EXCEPTION) << "For " << prim_name << ", x size's min or max value is valid.";
|
||||
MS_LOG(EXCEPTION) << "For " << prim_name
|
||||
<< ", x size's min and max value must be equal to x size len, but got min value: "
|
||||
<< shape_min.size() << ", max value: " << shape_max.size() << ", x size len: " << x_size_len
|
||||
<< ".";
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < x_size_len; i++) {
|
||||
|
@ -140,7 +143,7 @@ abstract::ShapePtr Conv2DBackpropInputInferShape(const PrimitivePtr &primitive,
|
|||
auto size_type = input_size->BuildType();
|
||||
MS_EXCEPTION_IF_NULL(size_type);
|
||||
MS_EXCEPTION(TypeError) << "For '" << prim_name
|
||||
<< "', input[x size] must be a tuple or Tensor, but got: " << size_type->ToString() << ".";
|
||||
<< "', input x's size must be a tuple or Tensor, but got: " << size_type->ToString() << ".";
|
||||
}
|
||||
auto dout_shape =
|
||||
CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[kConv2DBackpropInputDoutIndex]->BuildShape())[kShape];
|
||||
|
|
|
@ -52,7 +52,7 @@ abstract::ShapePtr FractionalMaxPoolGradInferShape(const PrimitivePtr &primitive
|
|||
}
|
||||
if (shape_error) {
|
||||
MS_EXCEPTION(ValueError) << "For '" << op_name
|
||||
<< "', the shape of 'out_backprop' must be consistent with the shape of 'orig_output'.";
|
||||
<< "', the shape of 'out_backprop' does not consistent with the shape of 'orig_output'.";
|
||||
}
|
||||
auto infer_shape = std::make_shared<abstract::Shape>(in_shape);
|
||||
return infer_shape;
|
||||
|
|
|
@ -36,15 +36,15 @@ abstract::TupleShapePtr GridSampler3DGradInferShape(const PrimitivePtr &primitiv
|
|||
auto input_x_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[kOne]->BuildShape())[kShape];
|
||||
auto grid_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[kTwo]->BuildShape())[kShape];
|
||||
if (grad_shape.size() != kFive) {
|
||||
MS_EXCEPTION(ValueError) << "For '" << primitive->name() << "', grad must be a 5-D tensor, but got a "
|
||||
MS_EXCEPTION(ValueError) << "For '" << primitive->name() << "', 'grad' must be a 5-D tensor, but got a "
|
||||
<< std::to_string(grad_shape.size()) << "-D tensor.";
|
||||
}
|
||||
if (input_x_shape.size() != kFive) {
|
||||
MS_EXCEPTION(ValueError) << "For '" << primitive->name() << "', input_x must be a 5-D tensor, but got a "
|
||||
MS_EXCEPTION(ValueError) << "For '" << primitive->name() << "', 'input_x' must be a 5-D tensor, but got a "
|
||||
<< std::to_string(input_x_shape.size()) << "-D tensor.";
|
||||
}
|
||||
if (grid_shape.size() != kFive) {
|
||||
MS_EXCEPTION(ValueError) << "For '" << primitive->name() << "', grid must be a 5-D tensor, but got a "
|
||||
MS_EXCEPTION(ValueError) << "For '" << primitive->name() << "', 'grid' must be a 5-D tensor, but got a "
|
||||
<< std::to_string(grid_shape.size()) << "-D tensor.";
|
||||
}
|
||||
if (input_x_shape[kZero] != grid_shape[kZero]) {
|
||||
|
@ -55,7 +55,7 @@ abstract::TupleShapePtr GridSampler3DGradInferShape(const PrimitivePtr &primitiv
|
|||
<< ", the shape of 'input_x': " << input_args[kOne]->BuildShape()->ToString() << ".";
|
||||
}
|
||||
if (grid_shape[kFour] != kThree) {
|
||||
MS_EXCEPTION(ValueError) << "For '" << primitive->name() << "', the last dimension of grid must be 3, but got "
|
||||
MS_EXCEPTION(ValueError) << "For '" << primitive->name() << "', the last dimension of 'grid' must be 3, but got "
|
||||
<< std::to_string(grid_shape[kFour]) << ".";
|
||||
}
|
||||
std::vector<int64_t> out_shape = {input_x_shape[kZero], input_x_shape[kOne], grid_shape[kOne], grid_shape[kTwo],
|
||||
|
@ -69,7 +69,7 @@ abstract::TupleShapePtr GridSampler3DGradInferShape(const PrimitivePtr &primitiv
|
|||
}
|
||||
if (shape_error) {
|
||||
MS_EXCEPTION(ValueError) << "For '" << primitive->name()
|
||||
<< "', the shape of grad must be the same as that of output, but got grad shape: "
|
||||
<< "', the shape of 'grad' must be the same as that of output, but got 'grad' shape: "
|
||||
<< input_args[kZero]->BuildShape()->ToString() << ", output shape: ("
|
||||
<< std::to_string(out_shape[kZero]) << ", " << std::to_string(out_shape[kOne]) << ", "
|
||||
<< std::to_string(out_shape[kTwo]) << ", " << std::to_string(out_shape[kThree]) << ", "
|
||||
|
|
|
@ -46,8 +46,8 @@ abstract::ShapePtr SoftplusGradInfershape(const PrimitivePtr &primitive,
|
|||
if (!x_shape_ptr->IsDynamic() && !output_shape_ptr->IsDynamic()) {
|
||||
if (*x_shape != *output_shape) {
|
||||
MS_EXCEPTION(ValueError) << "For '" << prim_name
|
||||
<< "', evaluator arg x and output must have the same shape, but got x shape: "
|
||||
<< x_shape->ToString() << ", output shape: " << output_shape->ToString() << ".";
|
||||
<< "', evaluator arg 'x' and 'output' must have the same shape, but got 'x' shape: "
|
||||
<< x_shape->ToString() << ", 'output' shape: " << output_shape->ToString() << ".";
|
||||
}
|
||||
}
|
||||
auto shape_element = x_shape_ptr;
|
||||
|
|
|
@ -86,8 +86,8 @@ abstract::ShapePtr StridedSliceGradInferShape(const PrimitivePtr &primitive,
|
|||
if (shape_max.size() != shapex_len || shape_min.size() != shapex_len) {
|
||||
MS_LOG(EXCEPTION)
|
||||
<< "For '" << prim_name
|
||||
<< "', shapex's min value size and max value size must match with shapex size. But got min value size: "
|
||||
<< shape_min.size() << ", max value size: " << shape_max.size() << ", shapex size: " << shapex_len << ".";
|
||||
<< "', 'shapex' min value size and max value size must match with 'shapex' size. But got min value size: "
|
||||
<< shape_min.size() << ", max value size: " << shape_max.size() << ", 'shapex' size: " << shapex_len << ".";
|
||||
}
|
||||
for (size_t i = 0; i < shapex_len; i++) {
|
||||
if (shape_min[i] == shape_max[i]) {
|
||||
|
|
|
@ -39,9 +39,9 @@ abstract::ShapePtr IOUInferShape(const PrimitivePtr &primitive, const std::vecto
|
|||
auto x_shp = x_shape_map[kShape];
|
||||
auto y_shp = y_shape_map[kShape];
|
||||
if (x_shp.size() != 2 || y_shp.size() != 2) {
|
||||
MS_EXCEPTION(ValueError) << "For 'BatchMatMul', input x, y must have the same dimension size and must be greater"
|
||||
<< "or equal to 3. But got x size = " << x_shp.size() << ", y size = " << y_shp.size()
|
||||
<< ".";
|
||||
MS_EXCEPTION(ValueError)
|
||||
<< "For 'BatchMatMul', input x, y must have the same dimension size and must be 2. But got x size = "
|
||||
<< x_shp.size() << ", y size = " << y_shp.size() << ".";
|
||||
}
|
||||
(void)CheckAndConvertUtils::CheckInteger("input numbers", SizeToLong(x_shp[1]), kGreaterEqual, 4, prim_name);
|
||||
(void)CheckAndConvertUtils::CheckInteger("input numbers", SizeToLong(y_shp[1]), kGreaterEqual, 4, prim_name);
|
||||
|
|
|
@ -95,11 +95,11 @@ AbstractBasePtr LayerNormInfer(const abstract::AnalysisEnginePtr &, const Primit
|
|||
(beta_shape_list.size() + begin_params_axis_u < input_shape_list.size())) {
|
||||
MS_EXCEPTION(ValueError)
|
||||
<< "For '" << op_name
|
||||
<< ", begin_params_axis must be less than or equal to input_x shape size, gama shape size add "
|
||||
<< "', begin_params_axis must be less than or equal to input_x shape size, gamma shape size add "
|
||||
"begin_params_axis must be equal to or greater than input_x shape size, and beta shape size add "
|
||||
"begin_params_axis must be equal to or greater than input_x shape size, but got begin_params_axis: "
|
||||
<< begin_params_axis << ", input_x shape size: " << input_shape_list.size()
|
||||
<< ", gama shape size: " << gamma_shape_list.size() << ", beta shape size: " << beta_shape_list.size() << ".";
|
||||
"begin_params_axis must be equal to or greater than input_x shape size, But got begin_params_axis: "
|
||||
<< begin_params_axis_u << ", input_x shape size: " << input_shape_list.size()
|
||||
<< ", gamma shape size: " << gamma_shape_list.size() << ", beta shape size: " << beta_shape_list.size() << ".";
|
||||
}
|
||||
for (size_t i = begin_params_axis_u; i < input_shape_list.size(); ++i) {
|
||||
size_t gamma_beta_shape_dim = i - begin_params_axis_u;
|
||||
|
|
|
@ -29,18 +29,18 @@ abstract::ShapePtr LowerBoundInferShape(const PrimitivePtr &primitive, const std
|
|||
size_t size_exp = 2;
|
||||
if (x_shape.size() != size_exp) {
|
||||
MS_EXCEPTION(ValueError) << "For '" << primitive->name()
|
||||
<< "', the rank of sorted_x must be 2, but got: " << values_shape.size() << ".";
|
||||
<< "', the rank of 'sorted_x' must be 2, but got: " << values_shape.size() << ".";
|
||||
}
|
||||
if (values_shape.size() != size_exp) {
|
||||
MS_EXCEPTION(ValueError) << "For '" << primitive->name()
|
||||
<< "', the rank of values must be 2, but got: " << values_shape.size() << ".";
|
||||
<< "', the rank of 'values' must be 2, but got: " << values_shape.size() << ".";
|
||||
}
|
||||
if (x_shape[0] != values_shape[0]) {
|
||||
MS_EXCEPTION(ValueError)
|
||||
<< "For '" << primitive->name()
|
||||
<< "', the first dimension of the shape of sorted_x must be equal to that of values, but got shape of values: "
|
||||
<< input_args[1]->BuildShape()->ToString() << ", shape of sorted_x:" << input_args[0]->BuildShape()->ToString()
|
||||
<< ".";
|
||||
MS_EXCEPTION(ValueError) << "For '" << primitive->name()
|
||||
<< "', the first dimension of the shape of 'sorted_x' must be equal to that of 'values', "
|
||||
"but got shape of 'values': "
|
||||
<< input_args[1]->BuildShape()->ToString()
|
||||
<< ", shape of 'sorted_x':" << input_args[0]->BuildShape()->ToString() << ".";
|
||||
}
|
||||
return std::make_shared<abstract::Shape>(values_shape);
|
||||
}
|
||||
|
|
|
@ -108,9 +108,9 @@ abstract::ShapePtr LuSolveInferShape(const PrimitivePtr &primitive, const std::v
|
|||
}
|
||||
if (lu_pivots_shape[lu_pivots_shape.size() - 1] != lu_data_shape[lu_data_shape.size() - 1]) {
|
||||
MS_EXCEPTION(ValueError) << "For '" << op_name
|
||||
<< "', the last dimension of lu_pivots must be the same as lu_data's, "
|
||||
<< "but got lu_pivots': " << lu_pivots_shape[lu_pivots_shape.size() - 1]
|
||||
<< ", lu_data's: " << lu_data_shape[lu_data_shape.size() - 1] << ".";
|
||||
<< "', the last dim of lu_pivots must be the same as lu_data's last dim, "
|
||||
<< "but got lu_pivots' last dim: " << lu_pivots_shape[lu_pivots_shape.size() - 1]
|
||||
<< ", lu_data's last dim: " << lu_data_shape[lu_data_shape.size() - 1] << ".";
|
||||
}
|
||||
for (size_t i = 0; i < lu_pivots_shape.size(); i++) {
|
||||
if (lu_data_shape[i] != lu_pivots_shape[i]) {
|
||||
|
|
|
@ -54,8 +54,10 @@ std::vector<int64_t> CheckAttrSize(const PrimitivePtr &primitive, const std::str
|
|||
}
|
||||
|
||||
if (attr_value.size() != attr_size) {
|
||||
MS_EXCEPTION(ValueError) << "For '" << primitive->name() << "', attr '" << attr_name << "' size must be equal to "
|
||||
<< attr_size << ", but got: " << attr_value.size() << ".";
|
||||
MS_EXCEPTION(ValueError) << "For '" << primitive->name() << "', attr '" << attr_name
|
||||
<< "' size must be equal to attr_value size"
|
||||
<< ", but got attr " << attr_name << " size: " << attr_size
|
||||
<< ", attr_value size: " << attr_value.size() << ".";
|
||||
}
|
||||
|
||||
return attr_value;
|
||||
|
|
|
@ -47,7 +47,7 @@ TypePtr OnesInferType(const PrimitivePtr &prim, const std::vector<AbstractBasePt
|
|||
// check
|
||||
auto dtype_value = input_args[1]->BuildValue();
|
||||
if (!dtype_value->isa<Type>()) {
|
||||
MS_EXCEPTION(TypeError) << "For '" << prim_name << "', the dtype of Ones must be Type(), but got an invalid type!";
|
||||
MS_EXCEPTION(TypeError) << "For '" << prim_name << "', 'dtype' must be Type(), but got an invalid type!";
|
||||
}
|
||||
auto output_type = dtype_value->cast<TypePtr>();
|
||||
const std::set<TypePtr> valid_types = {kBool, kInt8, kInt16, kInt32, kInt64, kUInt8,
|
||||
|
|
|
@ -58,7 +58,7 @@ abstract::TupleShapePtr SplitVInferShape(const PrimitivePtr &primitive,
|
|||
auto excessive_default_idx = std::find(size_splits.begin(), size_splits.end(), -1);
|
||||
if (excessive_default_idx != size_splits.end()) {
|
||||
MS_EXCEPTION(ValueError) << "For '" << prim_name
|
||||
<< "', 'size_splits' default value can contain one -1, but got more than one.";
|
||||
<< "', 'size_splits' default value can contain only one -1, but got more than one.";
|
||||
} else {
|
||||
int64_t sum_of_size_splits = 0;
|
||||
for (int64_t i = 0; i < num_split - 1; i++) {
|
||||
|
|
|
@ -319,7 +319,8 @@ bool CheckAndGetDynamicSlice(const AbstractBasePtr &input_arg, const std::string
|
|||
*slice_len = LongToSize(slice_shape->shape()[0]);
|
||||
}
|
||||
} else {
|
||||
MS_EXCEPTION(TypeError) << "For 'StridedSlice', '" << arg_name << "' must be tuple or Tensor.";
|
||||
MS_EXCEPTION(TypeError) << "For 'StridedSlice', '" << arg_name
|
||||
<< "' must be tuple or Tensor, but got: " << input_arg->BuildType()->ToString() << ".";
|
||||
}
|
||||
|
||||
if (arg_name == "strides") {
|
||||
|
|
|
@ -29,18 +29,18 @@ abstract::ShapePtr UpperBoundInferShape(const PrimitivePtr &primitive, const std
|
|||
size_t size_exp = 2;
|
||||
if (x_shape.size() != size_exp) {
|
||||
MS_EXCEPTION(ValueError) << "For '" << primitive->name()
|
||||
<< "', the rank of sorted_x must be equal to 2, but got: " << x_shape.size() << ".";
|
||||
<< "', the rank of 'sorted_x' must be equal to 2, but got: " << x_shape.size() << ".";
|
||||
}
|
||||
if (values_shape.size() != size_exp) {
|
||||
MS_EXCEPTION(ValueError) << "For '" << primitive->name()
|
||||
<< "', the rank of values must be equal to 2, but got: " << values_shape.size() << ".";
|
||||
<< "', the rank of 'values' must be equal to 2, but got: " << values_shape.size() << ".";
|
||||
}
|
||||
if (x_shape[0] != values_shape[0]) {
|
||||
MS_EXCEPTION(ValueError)
|
||||
<< "For '" << primitive->name()
|
||||
<< "', the number of rows of sorted_x must be consistent with that of values, but got the shape of values: "
|
||||
<< "', the number of rows of 'sorted_x' must be consistent with that of 'values', but got the shape of 'values': "
|
||||
<< input_args[1]->BuildShape()->ToString()
|
||||
<< ", the shape of sorted_x: " << input_args[0]->BuildShape()->ToString() << ".";
|
||||
<< ", the shape of 'sorted_x': " << input_args[0]->BuildShape()->ToString() << ".";
|
||||
}
|
||||
return std::make_shared<abstract::Shape>(values_shape);
|
||||
}
|
||||
|
|
|
@ -55,4 +55,4 @@ def test_lenet5_exception():
|
|||
net = train_step_with_loss_warp(LeNet5())
|
||||
with pytest.raises(RuntimeError) as info:
|
||||
_cell_graph_executor.compile(net, predict, label)
|
||||
assert "'C_in' of input 'x' shape divide by parameter 'group' should be " in str(info.value)
|
||||
assert "'C_in' of input 'x' shape divide by parameter 'group' must be " in str(info.value)
|
||||
|
|
Loading…
Reference in New Issue