forked from mindspore-Ecosystem/mindspore
static code check
This commit is contained in:
parent
72cb83f56e
commit
57b5897af5
|
@ -72,9 +72,9 @@ abstract::TupleShapePtr AdamInferShape(const PrimitivePtr &primitive, const std:
|
||||||
auto m_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[kInputIndex1]->BuildShape())[kShape];
|
auto m_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[kInputIndex1]->BuildShape())[kShape];
|
||||||
auto v_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[kInputIndex2]->BuildShape())[kShape];
|
auto v_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[kInputIndex2]->BuildShape())[kShape];
|
||||||
auto grad_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[kInputIndex9]->BuildShape())[kShape];
|
auto grad_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[kInputIndex9]->BuildShape())[kShape];
|
||||||
CheckAndConvertUtils::Check("var_shape", var_shape, kEqual, "m_shape", m_shape, prim_name);
|
CheckAndConvertUtils::Check("var_shape", var_shape, kEqual, m_shape, prim_name);
|
||||||
CheckAndConvertUtils::Check("var_shape", var_shape, kEqual, "v_shape", v_shape, prim_name);
|
CheckAndConvertUtils::Check("var_shape", var_shape, kEqual, v_shape, prim_name);
|
||||||
CheckAndConvertUtils::Check("var_shape", var_shape, kEqual, "grad_shape", grad_shape, prim_name);
|
CheckAndConvertUtils::Check("var_shape", var_shape, kEqual, grad_shape, prim_name);
|
||||||
return std::make_shared<abstract::TupleShape>(
|
return std::make_shared<abstract::TupleShape>(
|
||||||
std::vector<abstract::BaseShapePtr>{var_shape_ptr, m_shape_ptr, v_shape_ptr});
|
std::vector<abstract::BaseShapePtr>{var_shape_ptr, m_shape_ptr, v_shape_ptr});
|
||||||
}
|
}
|
||||||
|
|
|
@ -96,14 +96,13 @@ AbstractBasePtr BatchNormInfer(const abstract::AnalysisEnginePtr &, const Primit
|
||||||
input_shape_norm.push_back(input_x[2]);
|
input_shape_norm.push_back(input_x[2]);
|
||||||
}
|
}
|
||||||
(void)CheckAndConvertUtils::CheckInteger("scale rank", SizeToLong(scale.size()), kEqual, 1, prim_name);
|
(void)CheckAndConvertUtils::CheckInteger("scale rank", SizeToLong(scale.size()), kEqual, 1, prim_name);
|
||||||
CheckAndConvertUtils::Check("scale shape", scale, kEqual, "bias shape", bias, prim_name, TypeError);
|
CheckAndConvertUtils::Check("scale shape", scale, kEqual, bias, prim_name, TypeError);
|
||||||
CheckAndConvertUtils::Check("scale shape[0]", scale[0], kEqual, "input_x channel", input_shape_norm[1], prim_name,
|
CheckAndConvertUtils::Check("scale shape[0]", scale[0], kEqual, input_shape_norm[1], prim_name, TypeError);
|
||||||
TypeError);
|
|
||||||
|
|
||||||
if (!GetValue<bool>(primitive->GetAttr(kIsTraining))) {
|
if (!GetValue<bool>(primitive->GetAttr(kIsTraining))) {
|
||||||
(void)CheckAndConvertUtils::CheckInteger("mean rank", SizeToLong(mean.size()), kEqual, 1, prim_name);
|
(void)CheckAndConvertUtils::CheckInteger("mean rank", SizeToLong(mean.size()), kEqual, 1, prim_name);
|
||||||
CheckAndConvertUtils::Check("mean shape", mean, kEqual, "variance shape", variance, prim_name, TypeError);
|
CheckAndConvertUtils::Check("mean shape", mean, kEqual, variance, prim_name, TypeError);
|
||||||
CheckAndConvertUtils::Check("mean shape", mean, kEqual, "scale shape", scale, prim_name, TypeError);
|
CheckAndConvertUtils::Check("mean shape", mean, kEqual, scale, prim_name, TypeError);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Infer type
|
// Infer type
|
||||||
|
|
|
@ -70,7 +70,7 @@ AbstractBasePtr BatchToSpaceInfer(const abstract::AnalysisEnginePtr &, const Pri
|
||||||
for (size_t i = 0; i < 2; ++i) {
|
for (size_t i = 0; i < 2; ++i) {
|
||||||
auto x_block_prod = out_shape[i + 2] * block_size[i];
|
auto x_block_prod = out_shape[i + 2] * block_size[i];
|
||||||
auto crops_sum = crops[i][0] + crops[i][1];
|
auto crops_sum = crops[i][0] + crops[i][1];
|
||||||
CheckAndConvertUtils::Check("x block shape prod", x_block_prod, kGreaterThan, "crops sum", attr_size, prim_name);
|
CheckAndConvertUtils::Check("x block shape prod", x_block_prod, kGreaterThan, attr_size, prim_name);
|
||||||
out_shape[i + 2] = x_block_prod - crops_sum;
|
out_shape[i + 2] = x_block_prod - crops_sum;
|
||||||
}
|
}
|
||||||
(void)CheckAndConvertUtils::CheckInteger("x_shape[0] % (block_size[0]*block_size[1])",
|
(void)CheckAndConvertUtils::CheckInteger("x_shape[0] % (block_size[0]*block_size[1])",
|
||||||
|
|
|
@ -47,7 +47,7 @@ abstract::ShapePtr BatchToSpaceNDInferShape(const PrimitivePtr &primitive,
|
||||||
block_shape_prod = block_shape_prod * block_shape[i];
|
block_shape_prod = block_shape_prod * block_shape[i];
|
||||||
auto x_block_prod = out_shape[i + offset] * block_shape[i];
|
auto x_block_prod = out_shape[i + offset] * block_shape[i];
|
||||||
auto crops_sum = crops[i][0] + crops[i][1];
|
auto crops_sum = crops[i][0] + crops[i][1];
|
||||||
CheckAndConvertUtils::Check("x block shape prod", x_block_prod, kGreaterThan, "crops sum", crops_sum, prim_name);
|
CheckAndConvertUtils::Check("x block shape prod", x_block_prod, kGreaterThan, crops_sum, prim_name);
|
||||||
out_shape[i + offset] = x_block_prod - crops_sum;
|
out_shape[i + offset] = x_block_prod - crops_sum;
|
||||||
}
|
}
|
||||||
if (out_shape[0] % block_shape_prod != 0) {
|
if (out_shape[0] % block_shape_prod != 0) {
|
||||||
|
@ -65,10 +65,8 @@ abstract::ShapePtr BatchToSpaceNDInferShape(const PrimitivePtr &primitive,
|
||||||
auto x_block_prod_min = output_min_shape[i + offset] * block_shape[i];
|
auto x_block_prod_min = output_min_shape[i + offset] * block_shape[i];
|
||||||
auto x_block_prod_max = output_max_shape[i + offset] * block_shape[i];
|
auto x_block_prod_max = output_max_shape[i + offset] * block_shape[i];
|
||||||
auto crops_sum = crops[i][0] + crops[i][1];
|
auto crops_sum = crops[i][0] + crops[i][1];
|
||||||
CheckAndConvertUtils::Check("x block shape prod min", x_block_prod_min, kGreaterThan, "crops sum", crops_sum,
|
CheckAndConvertUtils::Check("x block shape prod min", x_block_prod_min, kGreaterThan, crops_sum, prim_name);
|
||||||
prim_name);
|
CheckAndConvertUtils::Check("x block shape prod max", x_block_prod_max, kGreaterThan, crops_sum, prim_name);
|
||||||
CheckAndConvertUtils::Check("x block shape prod max", x_block_prod_max, kGreaterThan, "crops sum", crops_sum,
|
|
||||||
prim_name);
|
|
||||||
output_min_shape[i + offset] = x_block_prod_min - crops_sum;
|
output_min_shape[i + offset] = x_block_prod_min - crops_sum;
|
||||||
output_max_shape[i + offset] = x_block_prod_max - crops_sum;
|
output_max_shape[i + offset] = x_block_prod_max - crops_sum;
|
||||||
}
|
}
|
||||||
|
@ -102,7 +100,7 @@ void BatchToSpaceND::set_crops(std::vector<std::vector<int64_t>> crops) {
|
||||||
size_t h = crops.size();
|
size_t h = crops.size();
|
||||||
size_t w = crops[0].size();
|
size_t w = crops[0].size();
|
||||||
std::vector<size_t> temp_w = {2, 2};
|
std::vector<size_t> temp_w = {2, 2};
|
||||||
CheckAndConvertUtils::Check(kCrops, {h, w}, kEqual, "paddings_shape(2,2)", temp_w, this->name());
|
CheckAndConvertUtils::Check(kCrops, {h, w}, kEqual, temp_w, this->name());
|
||||||
for (size_t i = 0; i < h; i++) {
|
for (size_t i = 0; i < h; i++) {
|
||||||
for (size_t j = 0; j < w; j++) {
|
for (size_t j = 0; j < w; j++) {
|
||||||
(void)CheckAndConvertUtils::CheckInteger(kCrops, crops[i][j], kGreaterEqual, 0, this->name());
|
(void)CheckAndConvertUtils::CheckInteger(kCrops, crops[i][j], kGreaterEqual, 0, this->name());
|
||||||
|
|
|
@ -45,10 +45,10 @@ abstract::ShapePtr BinaryCrossEntroyInferShape(const PrimitivePtr &primitive,
|
||||||
auto y_shape_ptr = y_shape_BaseShapePtr->cast<abstract::ShapePtr>();
|
auto y_shape_ptr = y_shape_BaseShapePtr->cast<abstract::ShapePtr>();
|
||||||
auto weight_shape_ptr = weight_shape_BaseShapePtr->cast<abstract::ShapePtr>();
|
auto weight_shape_ptr = weight_shape_BaseShapePtr->cast<abstract::ShapePtr>();
|
||||||
if (!x_shape_ptr->IsDynamic() && !y_shape_ptr->IsDynamic())
|
if (!x_shape_ptr->IsDynamic() && !y_shape_ptr->IsDynamic())
|
||||||
CheckAndConvertUtils::Check("x shape", x_shape, kEqual, "y shape", y_shape, prim_name, ValueError);
|
CheckAndConvertUtils::Check("x shape", x_shape, kEqual, y_shape, prim_name, ValueError);
|
||||||
if (weight_shape.size() > 0) {
|
if (weight_shape.size() > 0) {
|
||||||
if (!y_shape_ptr->IsDynamic() && !weight_shape_ptr->IsDynamic())
|
if (!y_shape_ptr->IsDynamic() && !weight_shape_ptr->IsDynamic())
|
||||||
CheckAndConvertUtils::Check("y shape", y_shape, kEqual, "weight shape", weight_shape, prim_name, ValueError);
|
CheckAndConvertUtils::Check("y shape", y_shape, kEqual, weight_shape, prim_name, ValueError);
|
||||||
}
|
}
|
||||||
auto out_shape = x_shape;
|
auto out_shape = x_shape;
|
||||||
int64_t reduction;
|
int64_t reduction;
|
||||||
|
|
|
@ -27,8 +27,7 @@ abstract::ShapePtr BroadcastToInferShape(const PrimitivePtr &primitive,
|
||||||
auto x_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[0]->BuildShape())[kShape];
|
auto x_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[0]->BuildShape())[kShape];
|
||||||
auto value_ptr = primitive->GetAttr(kShape);
|
auto value_ptr = primitive->GetAttr(kShape);
|
||||||
auto input_x = GetValue<std::vector<int64_t>>(value_ptr);
|
auto input_x = GetValue<std::vector<int64_t>>(value_ptr);
|
||||||
CheckAndConvertUtils::Check("x shape", SizeToLong(x_shape.size()), kLessEqual, "input_x", SizeToLong(input_x.size()),
|
CheckAndConvertUtils::Check("x shape", SizeToLong(x_shape.size()), kLessEqual, SizeToLong(input_x.size()), prim_name);
|
||||||
prim_name);
|
|
||||||
auto outer_dim_offset = input_x.size() - x_shape.size();
|
auto outer_dim_offset = input_x.size() - x_shape.size();
|
||||||
bool flag = true;
|
bool flag = true;
|
||||||
if (input_x.end() == find(input_x.begin(), input_x.end(), -1)) {
|
if (input_x.end() == find(input_x.begin(), input_x.end(), -1)) {
|
||||||
|
|
|
@ -289,10 +289,10 @@ void Conv2D::set_pad_mode(const PadMode &pad_mode) {
|
||||||
std::vector<int64_t> pad = get_pad();
|
std::vector<int64_t> pad = get_pad();
|
||||||
if (pad_mode == PAD) {
|
if (pad_mode == PAD) {
|
||||||
for (auto item : pad) {
|
for (auto item : pad) {
|
||||||
CheckAndConvertUtils::Check(kPadItem, item, kGreaterEqual, "zeros_list", 0, name());
|
CheckAndConvertUtils::Check(kPadItem, item, kGreaterEqual, 0, name());
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
CheckAndConvertUtils::Check(kPad, pad, kEqual, "zeros_list", {0, 0, 0, 0}, name());
|
CheckAndConvertUtils::Check(kPad, pad, kEqual, {0, 0, 0, 0}, name());
|
||||||
}
|
}
|
||||||
int64_t swi = pad_mode;
|
int64_t swi = pad_mode;
|
||||||
(void)AddAttr(kPadMode, MakeValue(swi));
|
(void)AddAttr(kPadMode, MakeValue(swi));
|
||||||
|
|
|
@ -81,10 +81,10 @@ void Conv2DTranspose::set_pad_mode(const PadMode &pad_mode) {
|
||||||
std::vector<int64_t> pad = get_pad();
|
std::vector<int64_t> pad = get_pad();
|
||||||
if (pad_mode == PAD) {
|
if (pad_mode == PAD) {
|
||||||
for (auto item : pad) {
|
for (auto item : pad) {
|
||||||
CheckAndConvertUtils::Check(kPadItem, item, kGreaterEqual, "zeros_list", 0, name());
|
CheckAndConvertUtils::Check(kPadItem, item, kGreaterEqual, 0, name());
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
CheckAndConvertUtils::Check(kPad, pad, kEqual, "zeros_list", {0, 0, 0, 0}, name());
|
CheckAndConvertUtils::Check(kPad, pad, kEqual, {0, 0, 0, 0}, name());
|
||||||
}
|
}
|
||||||
int64_t swi = pad_mode;
|
int64_t swi = pad_mode;
|
||||||
(void)AddAttr(kPadMode, MakeValue(swi));
|
(void)AddAttr(kPadMode, MakeValue(swi));
|
||||||
|
|
|
@ -26,7 +26,7 @@
|
||||||
namespace mindspore {
|
namespace mindspore {
|
||||||
namespace ops {
|
namespace ops {
|
||||||
void DepthToSpace::set_block_size(const int64_t block_size) {
|
void DepthToSpace::set_block_size(const int64_t block_size) {
|
||||||
CheckAndConvertUtils::Check(kBlockSize, block_size, kGreaterEqual, "", 2, this->name());
|
CheckAndConvertUtils::Check(kBlockSize, block_size, kGreaterEqual, 2, this->name());
|
||||||
(void)this->AddAttr(kBlockSize, MakeValue(block_size));
|
(void)this->AddAttr(kBlockSize, MakeValue(block_size));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -137,10 +137,8 @@ AbstractBasePtr DetectionPostProcessInfer(const abstract::AnalysisEnginePtr &, c
|
||||||
auto num_classes = GetValue<int64_t>(primitive->GetAttr(kNumClasses));
|
auto num_classes = GetValue<int64_t>(primitive->GetAttr(kNumClasses));
|
||||||
CheckAndConvertUtils::CheckInRange("scores_shape[2]", scores_shape[2], kIncludeBoth, {num_classes, num_classes + 1},
|
CheckAndConvertUtils::CheckInRange("scores_shape[2]", scores_shape[2], kIncludeBoth, {num_classes, num_classes + 1},
|
||||||
prim_name);
|
prim_name);
|
||||||
CheckAndConvertUtils::Check("boxes_shape[1]", boxes_shape[1], kEqual, "scores_shape[1]", scores_shape[1], prim_name,
|
CheckAndConvertUtils::Check("boxes_shape[1]", boxes_shape[1], kEqual, scores_shape[1], prim_name, ValueError);
|
||||||
ValueError);
|
CheckAndConvertUtils::Check("boxes_shape[1]", boxes_shape[1], kEqual, anchors_shape[0], prim_name, ValueError);
|
||||||
CheckAndConvertUtils::Check("boxes_shape[1]", boxes_shape[1], kEqual, "anchors_shape[0]", anchors_shape[0], prim_name,
|
|
||||||
ValueError);
|
|
||||||
|
|
||||||
// Infer shape
|
// Infer shape
|
||||||
auto max_detections = GetValue<int64_t>(primitive->GetAttr(kMaxDetections));
|
auto max_detections = GetValue<int64_t>(primitive->GetAttr(kMaxDetections));
|
||||||
|
|
|
@ -38,7 +38,7 @@ abstract::ShapePtr DiagPartInferShape(const PrimitivePtr &primitive, const std::
|
||||||
std::vector<int64_t> out_shape;
|
std::vector<int64_t> out_shape;
|
||||||
for (size_t i = 0; i < length; i++) {
|
for (size_t i = 0; i < length; i++) {
|
||||||
CheckAndConvertUtils::Check("input_shape[i + rank(input_shape) / 2]", input_shape[i + length], kEqual,
|
CheckAndConvertUtils::Check("input_shape[i + rank(input_shape) / 2]", input_shape[i + length], kEqual,
|
||||||
"input_shape[i]", input_shape[i], op_name, ValueError);
|
input_shape[i], op_name, ValueError);
|
||||||
(void)out_shape.emplace_back(input_shape[i]);
|
(void)out_shape.emplace_back(input_shape[i]);
|
||||||
}
|
}
|
||||||
return std::make_shared<abstract::Shape>(out_shape);
|
return std::make_shared<abstract::Shape>(out_shape);
|
||||||
|
|
|
@ -41,10 +41,10 @@ abstract::ShapePtr ExtractVolumePatchesInferShape(const PrimitivePtr &primitive,
|
||||||
(void)CheckAndConvertUtils::CheckInteger("strides_length", strides.size(), kEqual, 5, primitive->name());
|
(void)CheckAndConvertUtils::CheckInteger("strides_length", strides.size(), kEqual, 5, primitive->name());
|
||||||
auto padding = GetValue<std::string>(primitive->GetAttr(kPadding));
|
auto padding = GetValue<std::string>(primitive->GetAttr(kPadding));
|
||||||
for (auto &item : strides) {
|
for (auto &item : strides) {
|
||||||
(void)CheckAndConvertUtils::Check("strides", item, kGreaterThan, "zero", 0, primitive->name());
|
(void)CheckAndConvertUtils::Check("strides", item, kGreaterThan, 0, primitive->name());
|
||||||
}
|
}
|
||||||
for (auto &item : kernel_size) {
|
for (auto &item : kernel_size) {
|
||||||
(void)CheckAndConvertUtils::Check("kernel_size", item, kGreaterThan, "zero", 0, primitive->name());
|
(void)CheckAndConvertUtils::Check("kernel_size", item, kGreaterThan, 0, primitive->name());
|
||||||
}
|
}
|
||||||
std::vector<int64_t> y_shape(5);
|
std::vector<int64_t> y_shape(5);
|
||||||
int64_t padding_needed = 0;
|
int64_t padding_needed = 0;
|
||||||
|
|
|
@ -33,7 +33,7 @@ abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector<A
|
||||||
auto min_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[kInputIndex1]->BuildShape())[kShape];
|
auto min_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[kInputIndex1]->BuildShape())[kShape];
|
||||||
auto max_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[kInputIndex2]->BuildShape())[kShape];
|
auto max_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[kInputIndex2]->BuildShape())[kShape];
|
||||||
(void)CheckAndConvertUtils::CheckInteger("x_rank", SizeToLong(in_shape.size()), kGreaterEqual, 1, prim_name);
|
(void)CheckAndConvertUtils::CheckInteger("x_rank", SizeToLong(in_shape.size()), kGreaterEqual, 1, prim_name);
|
||||||
CheckAndConvertUtils::Check("min_shape", min_shape, kEqual, "max_shape", max_shape, prim_name);
|
CheckAndConvertUtils::Check("min_shape", min_shape, kEqual, max_shape, prim_name);
|
||||||
(void)CheckAndConvertUtils::CheckInteger("min_shape", SizeToLong(min_shape.size()), kEqual, 1, prim_name);
|
(void)CheckAndConvertUtils::CheckInteger("min_shape", SizeToLong(min_shape.size()), kEqual, 1, prim_name);
|
||||||
int64_t shape_val = 1;
|
int64_t shape_val = 1;
|
||||||
for (size_t i = 0; i < in_shape.size(); i++) {
|
for (size_t i = 0; i < in_shape.size(); i++) {
|
||||||
|
|
|
@ -48,9 +48,9 @@ AbstractBasePtr FakeQuantWithMinMaxVarsPerChannelInfer(const abstract::AnalysisE
|
||||||
auto min_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[kInputIndex1]->BuildShape())[kShape];
|
auto min_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[kInputIndex1]->BuildShape())[kShape];
|
||||||
auto max_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[kInputIndex2]->BuildShape())[kShape];
|
auto max_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[kInputIndex2]->BuildShape())[kShape];
|
||||||
(void)CheckAndConvertUtils::CheckInteger("x rank", (int64_t)x_shape.size(), kGreaterThan, 1, op_name);
|
(void)CheckAndConvertUtils::CheckInteger("x rank", (int64_t)x_shape.size(), kGreaterThan, 1, op_name);
|
||||||
CheckAndConvertUtils::Check("min shape", min_shape, kEqual, "max shape", max_shape, op_name);
|
CheckAndConvertUtils::Check("min shape", min_shape, kEqual, max_shape, op_name);
|
||||||
(void)CheckAndConvertUtils::CheckInteger("min shape", (int64_t)min_shape.size(), kEqual, 1, op_name);
|
(void)CheckAndConvertUtils::CheckInteger("min shape", (int64_t)min_shape.size(), kEqual, 1, op_name);
|
||||||
CheckAndConvertUtils::Check("min shape", min_shape[0], kEqual, "x shape", x_shape[x_shape.size() - 1], op_name);
|
CheckAndConvertUtils::Check("min shape", min_shape[0], kEqual, x_shape[x_shape.size() - 1], op_name);
|
||||||
|
|
||||||
auto x_type = input_args[kInputIndex0]->BuildType();
|
auto x_type = input_args[kInputIndex0]->BuildType();
|
||||||
auto min_type = input_args[kInputIndex1]->BuildType();
|
auto min_type = input_args[kInputIndex1]->BuildType();
|
||||||
|
|
|
@ -51,8 +51,8 @@ AbstractBasePtr SliceFusionInfer(const abstract::AnalysisEnginePtr &, const Prim
|
||||||
}
|
}
|
||||||
auto begin = GetValue<std::vector<int64_t>>(begin_v);
|
auto begin = GetValue<std::vector<int64_t>>(begin_v);
|
||||||
auto size = GetValue<std::vector<int64_t>>(size_v);
|
auto size = GetValue<std::vector<int64_t>>(size_v);
|
||||||
CheckAndConvertUtils::Check("len of begin", (int64_t)begin.size(), kEqual, "len x's dim", SizeToLong(x_shape_len));
|
CheckAndConvertUtils::Check("len of begin", (int64_t)begin.size(), kEqual, SizeToLong(x_shape_len));
|
||||||
CheckAndConvertUtils::Check("len of size", (int64_t)size.size(), kEqual, "len x's dim", SizeToLong(x_shape_len));
|
CheckAndConvertUtils::Check("len of size", (int64_t)size.size(), kEqual, SizeToLong(x_shape_len));
|
||||||
|
|
||||||
for (size_t i = 0; i < x_shape_len; i++) {
|
for (size_t i = 0; i < x_shape_len; i++) {
|
||||||
(void)CheckAndConvertUtils::CheckInteger("input size[" + std::to_string(i) + "]", size[i], kGreaterThan, 0, "");
|
(void)CheckAndConvertUtils::CheckInteger("input size[" + std::to_string(i) + "]", size[i], kGreaterThan, 0, "");
|
||||||
|
|
|
@ -32,12 +32,12 @@ abstract::ShapePtr GatherDInferShape(const PrimitivePtr &primitive, const std::v
|
||||||
auto x_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[kInputIndex0]->BuildShape())[kShape];
|
auto x_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[kInputIndex0]->BuildShape())[kShape];
|
||||||
auto index_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[kInputIndex2]->BuildShape())[kShape];
|
auto index_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[kInputIndex2]->BuildShape())[kShape];
|
||||||
int64_t x_rank = SizeToLong(x_shape.size());
|
int64_t x_rank = SizeToLong(x_shape.size());
|
||||||
CheckAndConvertUtils::Check("x_rank", x_rank, kEqual, "index_rank", SizeToLong(index_shape.size()), prim_name);
|
CheckAndConvertUtils::Check("x_rank", x_rank, kEqual, SizeToLong(index_shape.size()), prim_name);
|
||||||
auto value_ptr = input_args[1]->BuildValue();
|
auto value_ptr = input_args[1]->BuildValue();
|
||||||
MS_EXCEPTION_IF_NULL(value_ptr);
|
MS_EXCEPTION_IF_NULL(value_ptr);
|
||||||
auto dim_v = GetValue<int64_t>(value_ptr);
|
auto dim_v = GetValue<int64_t>(value_ptr);
|
||||||
CheckAndConvertUtils::Check("dim value", dim_v, kGreaterEqual, "negative index_rank", -x_rank, prim_name);
|
CheckAndConvertUtils::Check("dim value", dim_v, kGreaterEqual, -x_rank, prim_name);
|
||||||
CheckAndConvertUtils::Check("dim value", dim_v, kLessThan, "index_rank", x_rank, prim_name);
|
CheckAndConvertUtils::Check("dim value", dim_v, kLessThan, x_rank, prim_name);
|
||||||
|
|
||||||
if (dim_v < 0) {
|
if (dim_v < 0) {
|
||||||
dim_v = dim_v + x_rank;
|
dim_v = dim_v + x_rank;
|
||||||
|
@ -45,7 +45,7 @@ abstract::ShapePtr GatherDInferShape(const PrimitivePtr &primitive, const std::v
|
||||||
for (size_t i = 0; i < x_shape.size(); ++i) {
|
for (size_t i = 0; i < x_shape.size(); ++i) {
|
||||||
if (SizeToLong(i) == dim_v) continue;
|
if (SizeToLong(i) == dim_v) continue;
|
||||||
MS_LOG(INFO) << "Check " << i << "th x shape";
|
MS_LOG(INFO) << "Check " << i << "th x shape";
|
||||||
CheckAndConvertUtils::Check("x shape", x_shape[i], kEqual, "index_rank", index_shape[i], prim_name);
|
CheckAndConvertUtils::Check("x shape", x_shape[i], kEqual, index_shape[i], prim_name);
|
||||||
}
|
}
|
||||||
return std::make_shared<abstract::Shape>(index_shape);
|
return std::make_shared<abstract::Shape>(index_shape);
|
||||||
}
|
}
|
||||||
|
|
|
@ -56,7 +56,7 @@ AbstractBasePtr BatchNormGradInfer(const abstract::AnalysisEnginePtr &, const Pr
|
||||||
input_num, primitive->name());
|
input_num, primitive->name());
|
||||||
auto y_backprop_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[0]->BuildShape())[kShape];
|
auto y_backprop_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[0]->BuildShape())[kShape];
|
||||||
auto x_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[1]->BuildShape())[kShape];
|
auto x_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[1]->BuildShape())[kShape];
|
||||||
CheckAndConvertUtils::Check("BatchNorm y_backprop_shape", y_backprop_shape, kEqual, "BatchNorm x_shape", x_shape);
|
CheckAndConvertUtils::Check("BatchNorm y_backprop_shape", y_backprop_shape, kEqual, x_shape);
|
||||||
|
|
||||||
auto dx = input_args[kInputIndex1]->Broaden();
|
auto dx = input_args[kInputIndex1]->Broaden();
|
||||||
auto dscale = input_args[kInputIndex2]->Broaden();
|
auto dscale = input_args[kInputIndex2]->Broaden();
|
||||||
|
|
|
@ -30,9 +30,9 @@ abstract::ShapePtr BinaryCrossEntroyGradInferShape(const PrimitivePtr &primitive
|
||||||
auto x_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[kInputIndex0]->BuildShape())[kShape];
|
auto x_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[kInputIndex0]->BuildShape())[kShape];
|
||||||
auto y_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[kInputIndex1]->BuildShape())[kShape];
|
auto y_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[kInputIndex1]->BuildShape())[kShape];
|
||||||
auto weight_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[kInputIndex2]->BuildShape())[kShape];
|
auto weight_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[kInputIndex2]->BuildShape())[kShape];
|
||||||
CheckAndConvertUtils::Check("x shape", x_shape, kEqual, "y shape", y_shape, prim_name);
|
CheckAndConvertUtils::Check("x shape", x_shape, kEqual, y_shape, prim_name);
|
||||||
if (weight_shape.size() < 1) {
|
if (weight_shape.size() < 1) {
|
||||||
CheckAndConvertUtils::Check("y shape", y_shape, kEqual, "weight shape", weight_shape, prim_name);
|
CheckAndConvertUtils::Check("y shape", y_shape, kEqual, weight_shape, prim_name);
|
||||||
}
|
}
|
||||||
return std::make_shared<abstract::Shape>(x_shape);
|
return std::make_shared<abstract::Shape>(x_shape);
|
||||||
}
|
}
|
||||||
|
|
|
@ -35,7 +35,7 @@ abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector<A
|
||||||
auto cdist_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[3]->BuildShape())[kShape];
|
auto cdist_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[3]->BuildShape())[kShape];
|
||||||
auto x_size = x_shape.size();
|
auto x_size = x_shape.size();
|
||||||
auto y_size = y_shape.size();
|
auto y_size = y_shape.size();
|
||||||
CheckAndConvertUtils::Check("grad shape", grad_shape, kEqual, "cdist shape", cdist_shape, prim_name, ValueError);
|
CheckAndConvertUtils::Check("grad shape", grad_shape, kEqual, cdist_shape, prim_name, ValueError);
|
||||||
if (x_size != y_size) {
|
if (x_size != y_size) {
|
||||||
MS_EXCEPTION(ValueError) << "For CdistGrad, rank of input_x and input_y should be equal.";
|
MS_EXCEPTION(ValueError) << "For CdistGrad, rank of input_x and input_y should be equal.";
|
||||||
}
|
}
|
||||||
|
|
|
@ -211,10 +211,10 @@ void Conv2DBackpropInput::set_pad_mode(const PadMode &pad_mode) {
|
||||||
std::vector<int64_t> pad = get_pad();
|
std::vector<int64_t> pad = get_pad();
|
||||||
if (pad_mode == PAD) {
|
if (pad_mode == PAD) {
|
||||||
for (auto item : pad) {
|
for (auto item : pad) {
|
||||||
CheckAndConvertUtils::Check(kPadItem, item, kGreaterEqual, "zeros_list", 0, name());
|
CheckAndConvertUtils::Check(kPadItem, item, kGreaterEqual, 0, name());
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
CheckAndConvertUtils::Check(kPad, pad, kEqual, "zeros_list", {0, 0, 0, 0}, name());
|
CheckAndConvertUtils::Check(kPad, pad, kEqual, {0, 0, 0, 0}, name());
|
||||||
}
|
}
|
||||||
int64_t swi = pad_mode;
|
int64_t swi = pad_mode;
|
||||||
(void)AddAttr(kPadMode, MakeValue(swi));
|
(void)AddAttr(kPadMode, MakeValue(swi));
|
||||||
|
|
|
@ -35,8 +35,7 @@ abstract::ShapePtr HShrinkGradInferShape(const PrimitivePtr &primitive,
|
||||||
auto gradients_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[0]->BuildShape())[kShape];
|
auto gradients_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[0]->BuildShape())[kShape];
|
||||||
auto features_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[1]->BuildShape())[kShape];
|
auto features_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[1]->BuildShape())[kShape];
|
||||||
|
|
||||||
CheckAndConvertUtils::Check("gradients_shape", gradients_shape, kEqual, "features_shape", features_shape, prim_name,
|
CheckAndConvertUtils::Check("gradients_shape", gradients_shape, kEqual, features_shape, prim_name, TypeError);
|
||||||
TypeError);
|
|
||||||
return std::make_shared<abstract::Shape>(gradients_shape);
|
return std::make_shared<abstract::Shape>(gradients_shape);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -41,7 +41,7 @@ abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector<A
|
||||||
auto prim_name = primitive->name();
|
auto prim_name = primitive->name();
|
||||||
auto grads_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[0]->BuildShape())[kShape];
|
auto grads_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[0]->BuildShape())[kShape];
|
||||||
auto input_x_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[1]->BuildShape())[kShape];
|
auto input_x_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[1]->BuildShape())[kShape];
|
||||||
CheckAndConvertUtils::Check("grads_shape", grads_shape, kEqual, "input_x_shape", input_x_shape, prim_name, TypeError);
|
CheckAndConvertUtils::Check("grads_shape", grads_shape, kEqual, input_x_shape, prim_name, TypeError);
|
||||||
return std::make_shared<abstract::Shape>(grads_shape);
|
return std::make_shared<abstract::Shape>(grads_shape);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -32,9 +32,9 @@ abstract::ShapePtr SoftMarginLossGradInferShape(const PrimitivePtr &primitive,
|
||||||
auto predict = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[kInputIndex0]->BuildShape())[kShape];
|
auto predict = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[kInputIndex0]->BuildShape())[kShape];
|
||||||
auto label = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[kInputIndex1]->BuildShape())[kShape];
|
auto label = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[kInputIndex1]->BuildShape())[kShape];
|
||||||
auto dout = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[kInputIndex2]->BuildShape())[kShape];
|
auto dout = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[kInputIndex2]->BuildShape())[kShape];
|
||||||
CheckAndConvertUtils::Check("logits shape", predict, kEqual, "labels shape", label, op_name, ValueError);
|
CheckAndConvertUtils::Check("logits shape", predict, kEqual, label, op_name, ValueError);
|
||||||
if (dout.size() > 1) {
|
if (dout.size() > 1) {
|
||||||
CheckAndConvertUtils::Check("logits shape", predict, kEqual, "dout shape", dout, op_name, ValueError);
|
CheckAndConvertUtils::Check("logits shape", predict, kEqual, dout, op_name, ValueError);
|
||||||
}
|
}
|
||||||
return std::make_shared<abstract::Shape>(predict);
|
return std::make_shared<abstract::Shape>(predict);
|
||||||
}
|
}
|
||||||
|
|
|
@ -38,8 +38,7 @@ abstract::ShapePtr SoftShrinkGradInferShape(const PrimitivePtr &primitive,
|
||||||
auto input_grad_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[0]->BuildShape())[kShape];
|
auto input_grad_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[0]->BuildShape())[kShape];
|
||||||
auto input_x_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[1]->BuildShape())[kShape];
|
auto input_x_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[1]->BuildShape())[kShape];
|
||||||
auto prim_name = primitive->name();
|
auto prim_name = primitive->name();
|
||||||
CheckAndConvertUtils::Check("input_grad_shape", input_grad_shape, kEqual, "input_x_shape", input_x_shape, prim_name,
|
CheckAndConvertUtils::Check("input_grad_shape", input_grad_shape, kEqual, input_x_shape, prim_name, TypeError);
|
||||||
TypeError);
|
|
||||||
return std::make_shared<abstract::Shape>(input_grad_shape);
|
return std::make_shared<abstract::Shape>(input_grad_shape);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -35,7 +35,7 @@ abstract::ShapePtr IndexAddInferShape(const PrimitivePtr &primitive, const std::
|
||||||
auto y_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[kInputIndex2]->BuildShape())[kShape];
|
auto y_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[kInputIndex2]->BuildShape())[kShape];
|
||||||
auto x_rank = SizeToLong(x_shape.size());
|
auto x_rank = SizeToLong(x_shape.size());
|
||||||
auto y_rank = SizeToLong(y_shape.size());
|
auto y_rank = SizeToLong(y_shape.size());
|
||||||
CheckAndConvertUtils::Check("x rank", x_rank, kEqual, "y rank", y_rank, prim_name);
|
CheckAndConvertUtils::Check("x rank", x_rank, kEqual, y_rank, prim_name);
|
||||||
auto axis = GetValue<int64_t>(primitive->GetAttr(kAxis));
|
auto axis = GetValue<int64_t>(primitive->GetAttr(kAxis));
|
||||||
CheckAndConvertUtils::CheckInRange("axis", axis, kIncludeNeither, {-x_rank - 1, x_rank}, prim_name);
|
CheckAndConvertUtils::CheckInRange("axis", axis, kIncludeNeither, {-x_rank - 1, x_rank}, prim_name);
|
||||||
auto idx_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[1]->BuildShape())[kShape];
|
auto idx_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[1]->BuildShape())[kShape];
|
||||||
|
@ -45,11 +45,10 @@ abstract::ShapePtr IndexAddInferShape(const PrimitivePtr &primitive, const std::
|
||||||
if (axis < 0) {
|
if (axis < 0) {
|
||||||
axis_rank = axis + x_rank;
|
axis_rank = axis + x_rank;
|
||||||
}
|
}
|
||||||
(void)CheckAndConvertUtils::Check("size of indices", idx_shape[0], kEqual, "dimension of y[axis]", y_shape[axis_rank],
|
(void)CheckAndConvertUtils::Check("size of indices", idx_shape[0], kEqual, y_shape[axis_rank], prim_name);
|
||||||
prim_name);
|
|
||||||
for (int dim = 0; dim < x_rank; dim = dim + 1) {
|
for (int dim = 0; dim < x_rank; dim = dim + 1) {
|
||||||
if (dim != axis_rank) {
|
if (dim != axis_rank) {
|
||||||
(void)CheckAndConvertUtils::Check("x dim", x_shape[dim], kEqual, "y dim", y_shape[dim], prim_name);
|
(void)CheckAndConvertUtils::Check("x dim", x_shape[dim], kEqual, y_shape[dim], prim_name);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return std::make_shared<abstract::Shape>(x_shape);
|
return std::make_shared<abstract::Shape>(x_shape);
|
||||||
|
|
|
@ -40,7 +40,7 @@ abstract::ShapePtr IsCloseInferShape(const PrimitivePtr &primitive, const std::v
|
||||||
auto other_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[kInputIndex1]->BuildShape())[kShape];
|
auto other_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[kInputIndex1]->BuildShape())[kShape];
|
||||||
auto input_rank = SizeToLong(input_shape.size());
|
auto input_rank = SizeToLong(input_shape.size());
|
||||||
auto other_rank = SizeToLong(other_shape.size());
|
auto other_rank = SizeToLong(other_shape.size());
|
||||||
CheckAndConvertUtils::Check("input rank", input_rank, kEqual, "other rank", other_rank, op_name);
|
CheckAndConvertUtils::Check("input rank", input_rank, kEqual, other_rank, op_name);
|
||||||
int64_t input_size = 1, other_size = 1;
|
int64_t input_size = 1, other_size = 1;
|
||||||
for (size_t i = 0; i < input_shape.size(); i++) {
|
for (size_t i = 0; i < input_shape.size(); i++) {
|
||||||
input_size *= input_shape[i];
|
input_size *= input_shape[i];
|
||||||
|
|
|
@ -31,8 +31,7 @@ abstract::TupleShapePtr InferShape(const PrimitivePtr &primitive, const std::vec
|
||||||
const constexpr int64_t kNumber1 = 1;
|
const constexpr int64_t kNumber1 = 1;
|
||||||
const constexpr int64_t kNumber2 = 2;
|
const constexpr int64_t kNumber2 = 2;
|
||||||
CheckAndConvertUtils::CheckInteger("x rank", x_rank, kGreaterEqual, kNumber2, prim_name);
|
CheckAndConvertUtils::CheckInteger("x rank", x_rank, kGreaterEqual, kNumber2, prim_name);
|
||||||
CheckAndConvertUtils::Check("row size", x_shape[x_rank - kNumber1], kEqual, "column size", x_shape[x_rank - kNumber2],
|
CheckAndConvertUtils::Check("row size", x_shape[x_rank - kNumber1], kEqual, x_shape[x_rank - kNumber2], prim_name);
|
||||||
prim_name);
|
|
||||||
CheckAndConvertUtils::CheckInteger("row size", x_shape[x_rank - kNumber1], kGreaterEqual, kNumber2, prim_name);
|
CheckAndConvertUtils::CheckInteger("row size", x_shape[x_rank - kNumber1], kGreaterEqual, kNumber2, prim_name);
|
||||||
CheckAndConvertUtils::CheckInteger("column size", x_shape[x_rank - kNumber2], kGreaterEqual, kNumber2, prim_name);
|
CheckAndConvertUtils::CheckInteger("column size", x_shape[x_rank - kNumber2], kGreaterEqual, kNumber2, prim_name);
|
||||||
std::vector<int64_t> shape(x_shape.begin(), (x_shape.end() - kNumber2));
|
std::vector<int64_t> shape(x_shape.begin(), (x_shape.end() - kNumber2));
|
||||||
|
|
|
@ -41,7 +41,7 @@ AbstractBasePtr LstmInfer(const PrimitivePtr &primitive, const std::vector<Abstr
|
||||||
|
|
||||||
(void)CheckAndConvertUtils::CheckInteger("h_shape.size()", SizeToLong(h_input_shape.size()), kEqual, shape_size,
|
(void)CheckAndConvertUtils::CheckInteger("h_shape.size()", SizeToLong(h_input_shape.size()), kEqual, shape_size,
|
||||||
prim_name);
|
prim_name);
|
||||||
CheckAndConvertUtils::Check("h_shape", h_input_shape, kEqual, "c_shape", c_input_shape, prim_name);
|
CheckAndConvertUtils::Check("h_shape", h_input_shape, kEqual, c_input_shape, prim_name);
|
||||||
|
|
||||||
int64_t num_layers = GetValue<int64_t>(primitive->GetAttr(kNumLayers));
|
int64_t num_layers = GetValue<int64_t>(primitive->GetAttr(kNumLayers));
|
||||||
bool bidirectional = GetValue<bool>(primitive->GetAttr(kBidirectional));
|
bool bidirectional = GetValue<bool>(primitive->GetAttr(kBidirectional));
|
||||||
|
|
|
@ -31,8 +31,7 @@ abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector<A
|
||||||
const constexpr int64_t kNumber1 = 1;
|
const constexpr int64_t kNumber1 = 1;
|
||||||
const constexpr int64_t kNumber2 = 2;
|
const constexpr int64_t kNumber2 = 2;
|
||||||
CheckAndConvertUtils::CheckInteger("x rank", x_rank, kGreaterEqual, kNumber2, prim_name);
|
CheckAndConvertUtils::CheckInteger("x rank", x_rank, kGreaterEqual, kNumber2, prim_name);
|
||||||
CheckAndConvertUtils::Check("row size", x_shape[x_rank - kNumber1], kEqual, "column size", x_shape[x_rank - kNumber2],
|
CheckAndConvertUtils::Check("row size", x_shape[x_rank - kNumber1], kEqual, x_shape[x_rank - kNumber2], prim_name);
|
||||||
prim_name);
|
|
||||||
CheckAndConvertUtils::CheckInteger("row size", x_shape[x_rank - kNumber1], kGreaterEqual, kNumber2, prim_name);
|
CheckAndConvertUtils::CheckInteger("row size", x_shape[x_rank - kNumber1], kGreaterEqual, kNumber2, prim_name);
|
||||||
CheckAndConvertUtils::CheckInteger("column size", x_shape[x_rank - kNumber2], kGreaterEqual, kNumber2, prim_name);
|
CheckAndConvertUtils::CheckInteger("column size", x_shape[x_rank - kNumber2], kGreaterEqual, kNumber2, prim_name);
|
||||||
std::vector<int64_t> out_shape(x_shape.begin(), (x_shape.end() - kNumber2));
|
std::vector<int64_t> out_shape(x_shape.begin(), (x_shape.end() - kNumber2));
|
||||||
|
|
|
@ -31,8 +31,7 @@ abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector<A
|
||||||
const constexpr int64_t kNumber1 = 1;
|
const constexpr int64_t kNumber1 = 1;
|
||||||
const constexpr int64_t kNumber2 = 2;
|
const constexpr int64_t kNumber2 = 2;
|
||||||
CheckAndConvertUtils::CheckInteger("x rank", x_rank, kGreaterEqual, kNumber2, prim_name);
|
CheckAndConvertUtils::CheckInteger("x rank", x_rank, kGreaterEqual, kNumber2, prim_name);
|
||||||
CheckAndConvertUtils::Check("row size", x_shape[x_rank - kNumber1], kEqual, "column size", x_shape[x_rank - kNumber2],
|
CheckAndConvertUtils::Check("row size", x_shape[x_rank - kNumber1], kEqual, x_shape[x_rank - kNumber2], prim_name);
|
||||||
prim_name);
|
|
||||||
CheckAndConvertUtils::CheckInteger("row size", x_shape[x_rank - kNumber1], kGreaterEqual, kNumber2, prim_name);
|
CheckAndConvertUtils::CheckInteger("row size", x_shape[x_rank - kNumber1], kGreaterEqual, kNumber2, prim_name);
|
||||||
CheckAndConvertUtils::CheckInteger("column size", x_shape[x_rank - kNumber2], kGreaterEqual, kNumber2, prim_name);
|
CheckAndConvertUtils::CheckInteger("column size", x_shape[x_rank - kNumber2], kGreaterEqual, kNumber2, prim_name);
|
||||||
return std::make_shared<abstract::Shape>(x_shape);
|
return std::make_shared<abstract::Shape>(x_shape);
|
||||||
|
|
|
@ -34,8 +34,7 @@ std::vector<int64_t> _get_pack_shape(std::vector<BaseShapePtr> x_shapes, std::ve
|
||||||
MS_EXCEPTION_IF_NULL(type);
|
MS_EXCEPTION_IF_NULL(type);
|
||||||
auto type0 = x_types[0]->cast<TensorTypePtr>()->element();
|
auto type0 = x_types[0]->cast<TensorTypePtr>()->element();
|
||||||
MS_EXCEPTION_IF_NULL(type0);
|
MS_EXCEPTION_IF_NULL(type0);
|
||||||
CheckAndConvertUtils::Check("x_type[" + std::to_string(i) + "]", type->type_id(), kEqual, "base", type0->type_id(),
|
CheckAndConvertUtils::Check("x_type[" + std::to_string(i) + "]", type->type_id(), kEqual, type0->type_id(), name);
|
||||||
name);
|
|
||||||
auto shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(x_shapes[LongToSize(i)])[kShape];
|
auto shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(x_shapes[LongToSize(i)])[kShape];
|
||||||
if (shape != output_shape) {
|
if (shape != output_shape) {
|
||||||
MS_EXCEPTION(ValueError) << "For '" + name + "' element " + std::to_string(i) +
|
MS_EXCEPTION(ValueError) << "For '" + name + "' element " + std::to_string(i) +
|
||||||
|
|
|
@ -31,7 +31,7 @@ abstract::ShapePtr SoftMarginLossInferShape(const PrimitivePtr &primitive,
|
||||||
(void)CheckAndConvertUtils::CheckInteger("input numbers", SizeToLong(input_args.size()), kEqual, kInputSize, op_name);
|
(void)CheckAndConvertUtils::CheckInteger("input numbers", SizeToLong(input_args.size()), kEqual, kInputSize, op_name);
|
||||||
auto predict = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[0]->BuildShape())[kShape];
|
auto predict = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[0]->BuildShape())[kShape];
|
||||||
auto label = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[1]->BuildShape())[kShape];
|
auto label = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[1]->BuildShape())[kShape];
|
||||||
CheckAndConvertUtils::Check("logits shape", predict, kEqual, "labels shape", label, op_name, ValueError);
|
CheckAndConvertUtils::Check("logits shape", predict, kEqual, label, op_name, ValueError);
|
||||||
auto out_shape = predict;
|
auto out_shape = predict;
|
||||||
int64_t reduction;
|
int64_t reduction;
|
||||||
CheckAndConvertUtils::GetReductionEnumValue(primitive->GetAttr(kReduction), &reduction);
|
CheckAndConvertUtils::GetReductionEnumValue(primitive->GetAttr(kReduction), &reduction);
|
||||||
|
|
|
@ -61,7 +61,7 @@ void SpaceToBatch::set_paddings(const std::vector<std::vector<int64_t>> &padding
|
||||||
int64_t h = SizeToLong(paddings.size());
|
int64_t h = SizeToLong(paddings.size());
|
||||||
int64_t w = SizeToLong(paddings[0].size());
|
int64_t w = SizeToLong(paddings[0].size());
|
||||||
std::vector<int64_t> temp_w = {2, 2};
|
std::vector<int64_t> temp_w = {2, 2};
|
||||||
CheckAndConvertUtils::Check(kPaddings, {h, w}, kEqual, "paddings_shape(2,2)", temp_w, this->name());
|
CheckAndConvertUtils::Check(kPaddings, {h, w}, kEqual, temp_w, this->name());
|
||||||
for (size_t i = 0; i < LongToSize(h); i++) {
|
for (size_t i = 0; i < LongToSize(h); i++) {
|
||||||
for (size_t j = 0; j < LongToSize(w); j++) {
|
for (size_t j = 0; j < LongToSize(w); j++) {
|
||||||
(void)CheckAndConvertUtils::CheckInteger(kPadding, paddings[i][j], kGreaterEqual, 0, this->name());
|
(void)CheckAndConvertUtils::CheckInteger(kPadding, paddings[i][j], kGreaterEqual, 0, this->name());
|
||||||
|
|
|
@ -66,7 +66,7 @@ void SpaceToBatchND::set_paddings(std::vector<std::vector<int64_t>> paddings) {
|
||||||
size_t h = paddings.size();
|
size_t h = paddings.size();
|
||||||
size_t w = paddings[0].size();
|
size_t w = paddings[0].size();
|
||||||
std::vector<size_t> temp_w = {2, 2};
|
std::vector<size_t> temp_w = {2, 2};
|
||||||
CheckAndConvertUtils::Check(kPaddings, {h, w}, kEqual, "paddings_shape(2,2)", temp_w, this->name());
|
CheckAndConvertUtils::Check(kPaddings, {h, w}, kEqual, temp_w, this->name());
|
||||||
for (size_t i = 0; i < h; i++) {
|
for (size_t i = 0; i < h; i++) {
|
||||||
for (size_t j = 0; j < w; j++) {
|
for (size_t j = 0; j < w; j++) {
|
||||||
(void)CheckAndConvertUtils::CheckInteger(kPaddings, paddings[i][j], kGreaterEqual, 0LL, this->name());
|
(void)CheckAndConvertUtils::CheckInteger(kPaddings, paddings[i][j], kGreaterEqual, 0LL, this->name());
|
||||||
|
|
|
@ -24,7 +24,7 @@ void SpaceToDepth::Init(const int64_t block_size, const Format &format) {
|
||||||
}
|
}
|
||||||
|
|
||||||
void SpaceToDepth::set_block_size(const int64_t block_size) {
|
void SpaceToDepth::set_block_size(const int64_t block_size) {
|
||||||
CheckAndConvertUtils::Check(kBlockSize, block_size, kGreaterEqual, "", 2, this->name());
|
CheckAndConvertUtils::Check(kBlockSize, block_size, kGreaterEqual, 2, this->name());
|
||||||
(void)AddAttr(kBlockSize, MakeValue(block_size));
|
(void)AddAttr(kBlockSize, MakeValue(block_size));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -55,7 +55,7 @@ abstract::TupleShapePtr InferShape(const PrimitivePtr &primitive, const std::vec
|
||||||
same_shape_args_map.insert({"accum_updata shape", accum_updata_shape});
|
same_shape_args_map.insert({"accum_updata shape", accum_updata_shape});
|
||||||
same_shape_args_map.insert({"grad shape", grad_shape});
|
same_shape_args_map.insert({"grad shape", grad_shape});
|
||||||
for (auto &elem : same_shape_args_map) {
|
for (auto &elem : same_shape_args_map) {
|
||||||
CheckAndConvertUtils::Check(elem.first, elem.second, kEqual, "var shape", var_shape, prim_name);
|
CheckAndConvertUtils::Check(elem.first, elem.second, kEqual, var_shape, prim_name);
|
||||||
}
|
}
|
||||||
// Indices must be rank 1
|
// Indices must be rank 1
|
||||||
(void)CheckAndConvertUtils::CheckInteger("indices dimension", indices_shape.size(), kEqual, 1, prim_name);
|
(void)CheckAndConvertUtils::CheckInteger("indices dimension", indices_shape.size(), kEqual, 1, prim_name);
|
||||||
|
|
|
@ -51,7 +51,7 @@ abstract::TupleShapePtr InferShape(const PrimitivePtr &primitive, const std::vec
|
||||||
(void)same_shape_args_map.insert({"shape of mom ", mom_shape});
|
(void)same_shape_args_map.insert({"shape of mom ", mom_shape});
|
||||||
(void)same_shape_args_map.insert({"shape of grad ", grad_shape});
|
(void)same_shape_args_map.insert({"shape of grad ", grad_shape});
|
||||||
for (auto &elem : same_shape_args_map) {
|
for (auto &elem : same_shape_args_map) {
|
||||||
CheckAndConvertUtils::Check(elem.first, elem.second, kEqual, "var shape", var_shape, prim_name);
|
CheckAndConvertUtils::Check(elem.first, elem.second, kEqual, var_shape, prim_name);
|
||||||
}
|
}
|
||||||
// Indices must be rank 1
|
// Indices must be rank 1
|
||||||
const int64_t input_num1 = 1;
|
const int64_t input_num1 = 1;
|
||||||
|
@ -61,8 +61,7 @@ abstract::TupleShapePtr InferShape(const PrimitivePtr &primitive, const std::vec
|
||||||
(void)CheckAndConvertUtils::CheckInteger("dimension of var", SizeToLong(var_shape.size()), kGreaterEqual, input_num1,
|
(void)CheckAndConvertUtils::CheckInteger("dimension of var", SizeToLong(var_shape.size()), kGreaterEqual, input_num1,
|
||||||
prim_name);
|
prim_name);
|
||||||
// Indices shape must be equal to the first dimension of var
|
// Indices shape must be equal to the first dimension of var
|
||||||
CheckAndConvertUtils::Check("indices shape", indices_shape[0], kEqual, "the first dimension of var", var_shape[0],
|
CheckAndConvertUtils::Check("indices shape", indices_shape[0], kEqual, var_shape[0], prim_name);
|
||||||
prim_name);
|
|
||||||
return std::make_shared<abstract::TupleShape>(
|
return std::make_shared<abstract::TupleShape>(
|
||||||
std::vector<abstract::BaseShapePtr>{var_shape_ptr, ms_shape_ptr, mom_shape_ptr});
|
std::vector<abstract::BaseShapePtr>{var_shape_ptr, ms_shape_ptr, mom_shape_ptr});
|
||||||
}
|
}
|
||||||
|
|
|
@ -40,8 +40,7 @@ abstract::TupleShapePtr InferShape(const PrimitivePtr &primitive, const std::vec
|
||||||
auto num_split = GetValue<int64_t>(primitive->GetAttr("num_split"));
|
auto num_split = GetValue<int64_t>(primitive->GetAttr("num_split"));
|
||||||
(void)CheckAndConvertUtils::CheckInteger("num_split", num_split, kGreaterEqual, 1, prim_name);
|
(void)CheckAndConvertUtils::CheckInteger("num_split", num_split, kGreaterEqual, 1, prim_name);
|
||||||
auto size_splits = GetValue<std::vector<int64_t>>(primitive->GetAttr(kSizeSplits));
|
auto size_splits = GetValue<std::vector<int64_t>>(primitive->GetAttr(kSizeSplits));
|
||||||
CheckAndConvertUtils::Check("num_split", num_split, kEqual, "rank of size_splits", SizeToLong(size_splits.size()),
|
CheckAndConvertUtils::Check("num_split", num_split, kEqual, SizeToLong(size_splits.size()), prim_name);
|
||||||
prim_name);
|
|
||||||
auto default_idx = std::find(size_splits.begin(), size_splits.end(), -1);
|
auto default_idx = std::find(size_splits.begin(), size_splits.end(), -1);
|
||||||
if (default_idx == size_splits.end()) {
|
if (default_idx == size_splits.end()) {
|
||||||
int64_t sum_of_size_splits = 0;
|
int64_t sum_of_size_splits = 0;
|
||||||
|
@ -50,8 +49,7 @@ abstract::TupleShapePtr InferShape(const PrimitivePtr &primitive, const std::vec
|
||||||
{0, shape_of_split_dim}, prim_name);
|
{0, shape_of_split_dim}, prim_name);
|
||||||
sum_of_size_splits += size_splits[LongToSize(i)];
|
sum_of_size_splits += size_splits[LongToSize(i)];
|
||||||
}
|
}
|
||||||
CheckAndConvertUtils::Check("sum of size_splits", sum_of_size_splits, kEqual, "dimension of value along split_dim",
|
CheckAndConvertUtils::Check("sum of size_splits", sum_of_size_splits, kEqual, shape_of_split_dim, prim_name);
|
||||||
shape_of_split_dim, prim_name);
|
|
||||||
} else {
|
} else {
|
||||||
(void)size_splits.erase(default_idx);
|
(void)size_splits.erase(default_idx);
|
||||||
auto excessive_default_idx = std::find(size_splits.begin(), size_splits.end(), -1);
|
auto excessive_default_idx = std::find(size_splits.begin(), size_splits.end(), -1);
|
||||||
|
|
|
@ -42,14 +42,14 @@ AbstractBasePtr UnsortedSegmentSumInfer(const abstract::AnalysisEnginePtr &, con
|
||||||
auto segment_ids_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[1]->BuildShape())[kShape];
|
auto segment_ids_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[1]->BuildShape())[kShape];
|
||||||
(void)CheckAndConvertUtils::CheckInteger("segment_ids_shape", SizeToLong(segment_ids_shape.size()), kGreaterThan, 0,
|
(void)CheckAndConvertUtils::CheckInteger("segment_ids_shape", SizeToLong(segment_ids_shape.size()), kGreaterThan, 0,
|
||||||
prim_name);
|
prim_name);
|
||||||
CheckAndConvertUtils::Check("input_x", int64_t(x_shape.size()), kGreaterEqual, "segment_ids_shape",
|
CheckAndConvertUtils::Check("input_x", int64_t(x_shape.size()), kGreaterEqual, int64_t(segment_ids_shape.size()),
|
||||||
int64_t(segment_ids_shape.size()), prim_name);
|
prim_name);
|
||||||
|
|
||||||
if ((x_shape.end() != find(x_shape.begin(), x_shape.end(), -1)) &&
|
if ((x_shape.end() != find(x_shape.begin(), x_shape.end(), -1)) &&
|
||||||
(segment_ids_shape.end() != find(segment_ids_shape.begin(), segment_ids_shape.end(), -1))) {
|
(segment_ids_shape.end() != find(segment_ids_shape.begin(), segment_ids_shape.end(), -1))) {
|
||||||
size_t size = segment_ids_shape.size();
|
size_t size = segment_ids_shape.size();
|
||||||
for (size_t i = 0; i < size; ++i) {
|
for (size_t i = 0; i < size; ++i) {
|
||||||
CheckAndConvertUtils::Check("segment_ids_shp", segment_ids_shape[i], kEqual, "x_shape", x_shape[i], prim_name);
|
CheckAndConvertUtils::Check("segment_ids_shp", segment_ids_shape[i], kEqual, x_shape[i], prim_name);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -457,8 +457,8 @@ TypePtr CheckAndConvertUtils::GetTensorInputType(const std::string &prim_name,
|
||||||
return type;
|
return type;
|
||||||
}
|
}
|
||||||
|
|
||||||
void CheckAndConvertUtils::Check(const string &arg_name, int64_t arg_value, CompareEnum compare_type, const string &,
|
void CheckAndConvertUtils::Check(const string &arg_name, int64_t arg_value, CompareEnum compare_type, int64_t value,
|
||||||
int64_t value, const string &prim_name, ExceptionType) {
|
const string &prim_name, ExceptionType) {
|
||||||
auto iter = kCompareMap<float>.find(compare_type);
|
auto iter = kCompareMap<float>.find(compare_type);
|
||||||
if (iter == kCompareMap<float>.end()) {
|
if (iter == kCompareMap<float>.end()) {
|
||||||
MS_EXCEPTION(NotExistsError) << "the compare type :" << compare_type << " is not in the compare map";
|
MS_EXCEPTION(NotExistsError) << "the compare type :" << compare_type << " is not in the compare map";
|
||||||
|
@ -783,8 +783,10 @@ void CheckAndConvertUtils::CheckMinMaxShape(const ShapeVector &shape, ShapeVecto
|
||||||
int64_t CheckAndConvertUtils::GetAndCheckFormat(const ValuePtr &value) {
|
int64_t CheckAndConvertUtils::GetAndCheckFormat(const ValuePtr &value) {
|
||||||
int64_t data_format;
|
int64_t data_format;
|
||||||
bool result = CheckAndConvertUtils::GetDataFormatEnumValue(value, &data_format);
|
bool result = CheckAndConvertUtils::GetDataFormatEnumValue(value, &data_format);
|
||||||
if (!result || (data_format != Format::NHWC && data_format != Format::NCHW && data_format != Format::NCDHW)) {
|
if (!result ||
|
||||||
MS_LOG(EXCEPTION) << "data format is invalid, only support NCHW, NHWC and NCDHW";
|
(data_format != static_cast<int64_t>(Format::NHWC) && data_format != static_cast<int64_t>(Format::NCHW) &&
|
||||||
|
data_format != static_cast<int64_t>(Format::NCDHW))) {
|
||||||
|
MS_LOG(EXCEPTION) << "data format value " << data_format << " is invalid, only support NCHW, NHWC and NCDHW";
|
||||||
}
|
}
|
||||||
return data_format;
|
return data_format;
|
||||||
}
|
}
|
||||||
|
|
|
@ -117,8 +117,8 @@ class CheckAndConvertUtils {
|
||||||
template <typename T>
|
template <typename T>
|
||||||
static T CheckValue(const std::string &arg_name, T arg_value, CompareEnum compare_operator, T match_value,
|
static T CheckValue(const std::string &arg_name, T arg_value, CompareEnum compare_operator, T match_value,
|
||||||
const std::string &prim_name) {
|
const std::string &prim_name) {
|
||||||
auto iter = kCompareMap<float>.find(compare_operator);
|
auto iter = kCompareMap<T>.find(compare_operator);
|
||||||
if (iter == kCompareMap<float>.end()) {
|
if (iter == kCompareMap<T>.end()) {
|
||||||
MS_EXCEPTION(NotExistsError) << "compare_operator " << compare_operator << " cannot find in the compare map";
|
MS_EXCEPTION(NotExistsError) << "compare_operator " << compare_operator << " cannot find in the compare map";
|
||||||
}
|
}
|
||||||
if (iter->second(arg_value, match_value)) {
|
if (iter->second(arg_value, match_value)) {
|
||||||
|
@ -142,8 +142,8 @@ class CheckAndConvertUtils {
|
||||||
template <typename T>
|
template <typename T>
|
||||||
static void CheckInRange(const std::string &arg_name, T arg_value, CompareRange compare_operator,
|
static void CheckInRange(const std::string &arg_name, T arg_value, CompareRange compare_operator,
|
||||||
const std::pair<T, T> &range, const std::string &prim_name) {
|
const std::pair<T, T> &range, const std::string &prim_name) {
|
||||||
auto iter = kCompareRangeMap<float>.find(compare_operator);
|
auto iter = kCompareRangeMap<T>.find(compare_operator);
|
||||||
if (iter == kCompareRangeMap<float>.end()) {
|
if (iter == kCompareRangeMap<T>.end()) {
|
||||||
MS_EXCEPTION(NotExistsError) << "compare_operator " << compare_operator << " cannot find in the compare map";
|
MS_EXCEPTION(NotExistsError) << "compare_operator " << compare_operator << " cannot find in the compare map";
|
||||||
}
|
}
|
||||||
if (range.first >= range.second) {
|
if (range.first >= range.second) {
|
||||||
|
@ -175,13 +175,12 @@ class CheckAndConvertUtils {
|
||||||
const std::vector<AbstractBasePtr> &input_args, size_t index);
|
const std::vector<AbstractBasePtr> &input_args, size_t index);
|
||||||
static TypePtr GetTensorInputType(const std::string &prim_name, const std::vector<AbstractBasePtr> &input_args,
|
static TypePtr GetTensorInputType(const std::string &prim_name, const std::vector<AbstractBasePtr> &input_args,
|
||||||
size_t index);
|
size_t index);
|
||||||
static void Check(const std::string &arg_name, int64_t arg_value, CompareEnum compare_type,
|
static void Check(const std::string &arg_name, int64_t arg_value, CompareEnum compare_type, int64_t value,
|
||||||
const std::string &value_name, int64_t value, const std::string &prim_name = "",
|
const std::string &prim_name = "", ExceptionType exception_type = ValueError);
|
||||||
ExceptionType exception_type = ValueError);
|
|
||||||
|
|
||||||
template <typename T>
|
template <typename T>
|
||||||
static void Check(const std::string &arg_name, const std::vector<T> &arg_value, CompareEnum compare_type,
|
static void Check(const std::string &arg_name, const std::vector<T> &arg_value, CompareEnum compare_type,
|
||||||
const std::string &value_name, const std::vector<T> &value, const std::string &prim_name = "",
|
const std::vector<T> &value, const std::string &prim_name = "",
|
||||||
ExceptionType exception_type = ValueError) {
|
ExceptionType exception_type = ValueError) {
|
||||||
if (compare_type != kEqual) {
|
if (compare_type != kEqual) {
|
||||||
auto iter = kCompareToString.find(compare_type);
|
auto iter = kCompareToString.find(compare_type);
|
||||||
|
|
|
@ -61,7 +61,7 @@ class CompactSet {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
iterator erase(iterator pos) { return data_.erase(pos); }
|
iterator erase(const iterator &pos) { return data_.erase(pos); }
|
||||||
|
|
||||||
void clear() { data_.clear(); }
|
void clear() { data_.clear(); }
|
||||||
|
|
||||||
|
|
|
@ -52,6 +52,9 @@ class Counter {
|
||||||
|
|
||||||
Counter(const Counter &other) { *this = other; }
|
Counter(const Counter &other) { *this = other; }
|
||||||
Counter &operator=(const Counter &other) {
|
Counter &operator=(const Counter &other) {
|
||||||
|
if (&other == this) {
|
||||||
|
return *this;
|
||||||
|
}
|
||||||
map_.clear();
|
map_.clear();
|
||||||
list_ = other.list_;
|
list_ = other.list_;
|
||||||
for (auto iter = list_.begin(); iter != list_.end(); ++iter) {
|
for (auto iter = list_.begin(); iter != list_.end(); ++iter) {
|
||||||
|
|
|
@ -310,7 +310,7 @@ std::unique_ptr<Byte[]> Encrypt(size_t *encrypt_len, const Byte *plain_data, siz
|
||||||
MS_EXCEPTION_IF_NULL(key);
|
MS_EXCEPTION_IF_NULL(key);
|
||||||
|
|
||||||
size_t block_enc_buf_len = MAX_BLOCK_SIZE + RESERVED_BYTE_PER_BLOCK;
|
size_t block_enc_buf_len = MAX_BLOCK_SIZE + RESERVED_BYTE_PER_BLOCK;
|
||||||
size_t encrypt_buf_len = plain_len + (plain_len + MAX_BLOCK_SIZE) / MAX_BLOCK_SIZE * RESERVED_BYTE_PER_BLOCK;
|
size_t encrypt_buf_len = plain_len + ((plain_len + MAX_BLOCK_SIZE) / MAX_BLOCK_SIZE) * RESERVED_BYTE_PER_BLOCK;
|
||||||
std::vector<Byte> int_buf(sizeof(int32_t));
|
std::vector<Byte> int_buf(sizeof(int32_t));
|
||||||
std::vector<Byte> block_buf;
|
std::vector<Byte> block_buf;
|
||||||
std::vector<Byte> block_enc_buf(block_enc_buf_len);
|
std::vector<Byte> block_enc_buf(block_enc_buf_len);
|
||||||
|
|
|
@ -128,7 +128,7 @@ LocationPtr GraphDebugInfo::location() const {
|
||||||
// Function may have decorator which is included in its location.
|
// Function may have decorator which is included in its location.
|
||||||
auto loc = DebugInfo::location();
|
auto loc = DebugInfo::location();
|
||||||
if (deco_loc_ != nullptr && loc != nullptr) {
|
if (deco_loc_ != nullptr && loc != nullptr) {
|
||||||
auto loc_line = loc->line() + (deco_loc_->line_end() - deco_loc_->line() + 1);
|
auto loc_line = loc->line() + ((deco_loc_->line_end() - deco_loc_->line()) + 1);
|
||||||
return std::make_shared<Location>(loc->file_name(), loc_line, loc->line_end(), loc->column(), loc->column_end());
|
return std::make_shared<Location>(loc->file_name(), loc_line, loc->line_end(), loc->column(), loc->column_end());
|
||||||
}
|
}
|
||||||
return loc;
|
return loc;
|
||||||
|
|
|
@ -359,7 +359,7 @@ bool ParseLogLevel(const std::string &str_level, MsLogLevel *ptr_level) {
|
||||||
if (str_level.size() == 1) {
|
if (str_level.size() == 1) {
|
||||||
int ch = str_level.c_str()[0];
|
int ch = str_level.c_str()[0];
|
||||||
ch = ch - number_start; // subtract ASCII code of '0', which is 48
|
ch = ch - number_start; // subtract ASCII code of '0', which is 48
|
||||||
if (ch >= DEBUG && ch <= EXCEPTION) {
|
if (ch >= static_cast<int>(DEBUG) && ch <= static_cast<int>(EXCEPTION)) {
|
||||||
if (ptr_level != nullptr) {
|
if (ptr_level != nullptr) {
|
||||||
*ptr_level = static_cast<MsLogLevel>(ch);
|
*ptr_level = static_cast<MsLogLevel>(ch);
|
||||||
}
|
}
|
||||||
|
@ -390,7 +390,7 @@ static MsLogLevel GetGlobalLogLevel() {
|
||||||
void InitSubModulesLogLevel() {
|
void InitSubModulesLogLevel() {
|
||||||
// initialize submodule's log level using global
|
// initialize submodule's log level using global
|
||||||
auto global_log_level = GetGlobalLogLevel();
|
auto global_log_level = GetGlobalLogLevel();
|
||||||
for (int i = 0; i < NUM_SUBMODUES; ++i) {
|
for (int i = 0; i < static_cast<int>(NUM_SUBMODUES); ++i) {
|
||||||
g_ms_submodule_log_levels[i] = global_log_level;
|
g_ms_submodule_log_levels[i] = global_log_level;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -401,7 +401,7 @@ void InitSubModulesLogLevel() {
|
||||||
auto configs = parser.Parse();
|
auto configs = parser.Parse();
|
||||||
for (const auto &cfg : configs) {
|
for (const auto &cfg : configs) {
|
||||||
int mod_idx = -1;
|
int mod_idx = -1;
|
||||||
for (int i = 0; i < NUM_SUBMODUES; ++i) {
|
for (int i = 0; i < static_cast<int>(NUM_SUBMODUES); ++i) {
|
||||||
if (cfg.first == GetSubModuleName(static_cast<SubModuleId>(i))) {
|
if (cfg.first == GetSubModuleName(static_cast<SubModuleId>(i))) {
|
||||||
mod_idx = i;
|
mod_idx = i;
|
||||||
break;
|
break;
|
||||||
|
@ -416,7 +416,7 @@ void InitSubModulesLogLevel() {
|
||||||
MS_LOG(WARNING) << "Illegal log level value " << cfg.second << " for " << cfg.first << ", ignore it.";
|
MS_LOG(WARNING) << "Illegal log level value " << cfg.second << " for " << cfg.first << ", ignore it.";
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
g_ms_submodule_log_levels[mod_idx] = submodule_log_level;
|
g_ms_submodule_log_levels[mod_idx] = static_cast<int>(submodule_log_level);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} // namespace mindspore
|
} // namespace mindspore
|
||||||
|
@ -438,7 +438,7 @@ MS_CORE_API void common_log_init(void) {
|
||||||
FLAGS_logbufsecs = 0;
|
FLAGS_logbufsecs = 0;
|
||||||
// Set default log level to WARNING
|
// Set default log level to WARNING
|
||||||
if (mindspore::GetEnv("GLOG_v").empty()) {
|
if (mindspore::GetEnv("GLOG_v").empty()) {
|
||||||
FLAGS_v = mindspore::WARNING;
|
FLAGS_v = static_cast<int>(mindspore::WARNING);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set default log file mode to 0640
|
// Set default log file mode to 0640
|
||||||
|
|
|
@ -207,8 +207,10 @@ class LogWriter {
|
||||||
/// \param[in] stream The input log stream.
|
/// \param[in] stream The input log stream.
|
||||||
MS_CORE_API void operator^(const LogStream &stream) const __attribute__((noreturn));
|
MS_CORE_API void operator^(const LogStream &stream) const __attribute__((noreturn));
|
||||||
|
|
||||||
static void set_exception_handler(ExceptionHandler exception_handler) { exception_handler_ = exception_handler; }
|
static void set_exception_handler(const ExceptionHandler &exception_handler) {
|
||||||
static void set_trace_provider(TraceProvider trace_provider) { trace_provider_ = trace_provider; }
|
exception_handler_ = exception_handler;
|
||||||
|
}
|
||||||
|
static void set_trace_provider(const TraceProvider &trace_provider) { trace_provider_ = trace_provider; }
|
||||||
static TraceProvider trace_provider() { return trace_provider_; }
|
static TraceProvider trace_provider() { return trace_provider_; }
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
|
|
@ -159,26 +159,26 @@ class MsContext {
|
||||||
void CreateTensorPrintThread(const PrintThreadCrt &ctr);
|
void CreateTensorPrintThread(const PrintThreadCrt &ctr);
|
||||||
void DestroyTensorPrintThread();
|
void DestroyTensorPrintThread();
|
||||||
#endif
|
#endif
|
||||||
static void device_seter(DeviceSeter device) { seter_ = device; }
|
static void device_seter(const DeviceSeter &device) { seter_ = device; }
|
||||||
static void device_type_seter(DeviceTypeSeter device_type) { device_type_seter_ = device_type; }
|
static void device_type_seter(const DeviceTypeSeter &device_type) { device_type_seter_ = device_type; }
|
||||||
|
|
||||||
template <typename T>
|
template <typename T>
|
||||||
void set_param(MsCtxParam param, const T &value) {
|
void set_param(MsCtxParam, const T &value) {
|
||||||
MS_LOG(EXCEPTION) << "Need to implement " << __FUNCTION__ << " for type " << typeid(T).name() << ".";
|
MS_LOG(EXCEPTION) << "Need to implement " << __FUNCTION__ << " for type " << typeid(T).name() << ".";
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename T>
|
template <typename T>
|
||||||
const T &get_param(MsCtxParam param) const {
|
const T &get_param(MsCtxParam) const {
|
||||||
MS_LOG(EXCEPTION) << "Need to implement " << __FUNCTION__ << " for type " << typeid(T).name() << ".";
|
MS_LOG(EXCEPTION) << "Need to implement " << __FUNCTION__ << " for type " << typeid(T).name() << ".";
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename T>
|
template <typename T>
|
||||||
void increase_param(MsCtxParam param) {
|
void increase_param(MsCtxParam) {
|
||||||
MS_LOG(EXCEPTION) << "Need to implement " << __FUNCTION__ << " for type " << typeid(T).name() << ".";
|
MS_LOG(EXCEPTION) << "Need to implement " << __FUNCTION__ << " for type " << typeid(T).name() << ".";
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename T>
|
template <typename T>
|
||||||
void decrease_param(MsCtxParam param) {
|
void decrease_param(MsCtxParam) {
|
||||||
MS_LOG(EXCEPTION) << "Need to implement " << __FUNCTION__ << " for type " << typeid(T).name() << ".";
|
MS_LOG(EXCEPTION) << "Need to implement " << __FUNCTION__ << " for type " << typeid(T).name() << ".";
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -23,6 +23,7 @@ namespace mindspore {
|
||||||
class ExceptionListener {
|
class ExceptionListener {
|
||||||
public:
|
public:
|
||||||
virtual void OnException() = 0;
|
virtual void OnException() = 0;
|
||||||
|
virtual ~ExceptionListener() = default;
|
||||||
};
|
};
|
||||||
|
|
||||||
class MsException {
|
class MsException {
|
||||||
|
@ -53,7 +54,7 @@ class MsException {
|
||||||
|
|
||||||
private:
|
private:
|
||||||
MsException() = default;
|
MsException() = default;
|
||||||
~MsException() = default;
|
~MsException() { listener_ = nullptr; }
|
||||||
DISABLE_COPY_AND_ASSIGN(MsException)
|
DISABLE_COPY_AND_ASSIGN(MsException)
|
||||||
ExceptionListener *listener_{nullptr};
|
ExceptionListener *listener_{nullptr};
|
||||||
std::exception_ptr exception_ptr_{nullptr};
|
std::exception_ptr exception_ptr_{nullptr};
|
||||||
|
|
|
@ -191,7 +191,7 @@ class OrderedMap {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Remove the element given by Iterator.
|
// Remove the element given by Iterator.
|
||||||
iterator erase(iterator iter) {
|
iterator erase(const iterator &iter) {
|
||||||
(void)map_data_.erase(&(iter->first));
|
(void)map_data_.erase(&(iter->first));
|
||||||
return sequential_data_.erase(iter);
|
return sequential_data_.erase(iter);
|
||||||
}
|
}
|
||||||
|
@ -355,7 +355,7 @@ class OrderedMap<std::shared_ptr<T>, ValueT> {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Remove the element given by Iterator.
|
// Remove the element given by Iterator.
|
||||||
iterator erase(iterator iter) {
|
iterator erase(const iterator &iter) {
|
||||||
(void)map_data_.erase(iter->first.get());
|
(void)map_data_.erase(iter->first.get());
|
||||||
return sequential_data_.erase(iter);
|
return sequential_data_.erase(iter);
|
||||||
}
|
}
|
||||||
|
|
|
@ -350,7 +350,7 @@ class OrderedSet<std::shared_ptr<T>> {
|
||||||
|
|
||||||
OrderedSet &operator=(OrderedSet &&other) = default;
|
OrderedSet &operator=(OrderedSet &&other) = default;
|
||||||
|
|
||||||
std::pair<iterator, bool> insert(iterator pos, const element_type &e) {
|
std::pair<iterator, bool> insert(const iterator &pos, const element_type &e) {
|
||||||
auto [map_iter, inserted] = map_.emplace(e.get(), iterator{});
|
auto [map_iter, inserted] = map_.emplace(e.get(), iterator{});
|
||||||
if (inserted) {
|
if (inserted) {
|
||||||
map_iter->second = ordered_data_.emplace(pos, e);
|
map_iter->second = ordered_data_.emplace(pos, e);
|
||||||
|
@ -358,7 +358,7 @@ class OrderedSet<std::shared_ptr<T>> {
|
||||||
return {map_iter->second, inserted};
|
return {map_iter->second, inserted};
|
||||||
}
|
}
|
||||||
|
|
||||||
std::pair<iterator, bool> insert(iterator pos, element_type &&e) {
|
std::pair<iterator, bool> insert(const iterator &pos, element_type &&e) {
|
||||||
auto [map_iter, inserted] = map_.emplace(e.get(), iterator{});
|
auto [map_iter, inserted] = map_.emplace(e.get(), iterator{});
|
||||||
if (inserted) {
|
if (inserted) {
|
||||||
map_iter->second = ordered_data_.emplace(pos, std::move(e));
|
map_iter->second = ordered_data_.emplace(pos, std::move(e));
|
||||||
|
@ -389,7 +389,7 @@ class OrderedSet<std::shared_ptr<T>> {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
iterator erase(iterator pos) {
|
iterator erase(const iterator &pos) {
|
||||||
(void)map_.erase(pos->get());
|
(void)map_.erase(pos->get());
|
||||||
return ordered_data_.erase(pos);
|
return ordered_data_.erase(pos);
|
||||||
}
|
}
|
||||||
|
|
|
@ -335,7 +335,7 @@ static void PrintTimeStat(std::ostringstream &oss, const TimeInfoGroup &group, c
|
||||||
oss << "------[" << prefix << "] " << std::setw(10) << std::fixed << std::setprecision(6) << group.total_time
|
oss << "------[" << prefix << "] " << std::setw(10) << std::fixed << std::setprecision(6) << group.total_time
|
||||||
<< std::setw(6) << group.total_count << "\n";
|
<< std::setw(6) << group.total_count << "\n";
|
||||||
for (const auto &iter : group.items) {
|
for (const auto &iter : group.items) {
|
||||||
oss << std::setw(5) << std::fixed << std::setprecision(2) << iter->second.time_ / group.total_time * 100
|
oss << std::setw(5) << std::fixed << std::setprecision(2) << (iter->second.time_ / group.total_time) * 100
|
||||||
<< "% : " << std::setw(12) << std::fixed << std::setprecision(6) << iter->second.time_ << "s : " << std::setw(6)
|
<< "% : " << std::setw(12) << std::fixed << std::setprecision(6) << iter->second.time_ << "s : " << std::setw(6)
|
||||||
<< iter->second.count_ << ": " << iter->first << "\n";
|
<< iter->second.count_ << ": " << iter->first << "\n";
|
||||||
}
|
}
|
||||||
|
|
|
@ -121,8 +121,8 @@ class ProfTransaction {
|
||||||
|
|
||||||
class NoProfTransaction {
|
class NoProfTransaction {
|
||||||
public:
|
public:
|
||||||
explicit NoProfTransaction(ProfileBase *prof) {}
|
explicit NoProfTransaction(ProfileBase *) {}
|
||||||
explicit NoProfTransaction(ProfContext *ctx) {}
|
explicit NoProfTransaction(ProfContext *) {}
|
||||||
~NoProfTransaction() = default;
|
~NoProfTransaction() = default;
|
||||||
|
|
||||||
template <class Function>
|
template <class Function>
|
||||||
|
|
Loading…
Reference in New Issue