!20346 clean pclint
Merge pull request !20346 from wangnan39/pclint_clean
This commit is contained in:
commit
2c94b631a7
|
@ -1591,13 +1591,13 @@ AnfNodePtr DfGraphConvertor::GetRealOpNode(AnfNodePtr node) {
|
|||
// make_tuple apply inputs:make_tuple, [tuple_items,]
|
||||
if (IsPrimitiveCNode(node_inputs[1], prim::kPrimMakeTuple)) {
|
||||
auto tuple_inputs = node->cast<CNodePtr>()->inputs();
|
||||
if (tuple_inputs.size() < IntToSize(index + 1)) {
|
||||
if (tuple_inputs.size() < LongToSize(index + 1L)) {
|
||||
MS_LOG(ERROR) << "make tuple input items node not correct! size:" << tuple_inputs.size()
|
||||
<< ", item index:" << index;
|
||||
error_ = FAILED;
|
||||
return node;
|
||||
}
|
||||
return GetRealOpNode(tuple_inputs[IntToSize(index + 1)]);
|
||||
return GetRealOpNode(tuple_inputs[LongToSize(index + 1L)]);
|
||||
}
|
||||
return GetRealOpNode(node_inputs[1]);
|
||||
}
|
||||
|
|
|
@ -1164,7 +1164,7 @@ AbstractBasePtr InferImplDynamicStitch(const AnalysisEnginePtr &, const Primitiv
|
|||
const AbstractBasePtrList &args_spec_list) {
|
||||
MS_EXCEPTION_IF_NULL(primitive);
|
||||
auto prim_name = primitive->name();
|
||||
CheckAndConvertUtils::CheckInteger("input number", args_spec_list.size(), kEqual, 2, prim_name);
|
||||
(void)CheckAndConvertUtils::CheckInteger("input number", args_spec_list.size(), kEqual, 2, prim_name);
|
||||
for (const auto &item : args_spec_list) {
|
||||
MS_EXCEPTION_IF_NULL(item);
|
||||
}
|
||||
|
|
|
@ -80,7 +80,7 @@ AbstractBasePtr InferImplPooling(const AnalysisEnginePtr &, const PrimitivePtr &
|
|||
auto pad_mode_ptr = primitive->GetAttr("pad_mode");
|
||||
if (pad_mode_ptr != nullptr) {
|
||||
int64_t pad_mode;
|
||||
CheckAndConvertUtils::GetPadModEnumValue(pad_mode_ptr, &pad_mode, true);
|
||||
(void)CheckAndConvertUtils::GetPadModEnumValue(pad_mode_ptr, &pad_mode, true);
|
||||
if (pad_mode == PadMode::VALID) {
|
||||
padding = 0;
|
||||
} else if (pad_mode == PadMode::SAME) {
|
||||
|
@ -322,7 +322,7 @@ AbstractBasePtr InferImplConv2D(const AnalysisEnginePtr &, const PrimitivePtr &p
|
|||
std::vector<int64_t> padding =
|
||||
CheckAttrIntOrTuple(op_name, primitive->GetAttr("pad"), padding_start_idx, padding_num_element);
|
||||
int64_t pad_mode;
|
||||
CheckAndConvertUtils::GetPadModEnumValue(primitive->GetAttr("pad_mode"), &pad_mode);
|
||||
(void)CheckAndConvertUtils::GetPadModEnumValue(primitive->GetAttr("pad_mode"), &pad_mode);
|
||||
std::vector<int64_t> output_hw;
|
||||
std::vector<int64_t> pad_list;
|
||||
std::vector<int64_t> output_hw_min;
|
||||
|
|
|
@ -533,7 +533,7 @@ bool MSANFModelParser::GetAttrValueForCNode(const PrimitivePtr &prim, const mind
|
|||
ValuePtr res = ObtainCNodeAttrInSingleScalarForm(attr_proto);
|
||||
const std::string &op_type = prim->name();
|
||||
if (!IsLite()) {
|
||||
CheckAndConvertUtils::ConvertAttrValueInLoad(op_type, attr_name, &res);
|
||||
(void)CheckAndConvertUtils::ConvertAttrValueInLoad(op_type, attr_name, &res);
|
||||
}
|
||||
prim->AddAttr(attr_name, res);
|
||||
break;
|
||||
|
|
|
@ -226,9 +226,9 @@ std::shared_ptr<FuncGraph> LoadMindIR(const std::string &file_name, bool is_lite
|
|||
return nullptr;
|
||||
}
|
||||
|
||||
int file_size = files.size();
|
||||
size_t file_size = files.size();
|
||||
mind_ir::GraphProto *mod_graph = origin_model.mutable_graph();
|
||||
for (auto file_index = 0; file_index < file_size; file_index++) {
|
||||
for (size_t file_index = 0; file_index < file_size; file_index++) {
|
||||
mind_ir::GraphProto param_graph;
|
||||
if (!ParseGraphProto(¶m_graph, files[file_index], dec_key, key_len, dec_mode)) {
|
||||
return nullptr;
|
||||
|
|
|
@ -28,7 +28,7 @@ namespace {
|
|||
abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector<AbstractBasePtr> &input_args) {
|
||||
MS_EXCEPTION_IF_NULL(primitive);
|
||||
auto prim_name = primitive->name();
|
||||
CheckAndConvertUtils::CheckInteger("input numbers", input_args.size(), kEqual, 2, prim_name);
|
||||
(void)CheckAndConvertUtils::CheckInteger("input numbers", SizeToLong(input_args.size()), kEqual, 2, prim_name);
|
||||
for (const auto &item : input_args) {
|
||||
MS_EXCEPTION_IF_NULL(item);
|
||||
}
|
||||
|
@ -40,7 +40,7 @@ TypePtr InferType(const PrimitivePtr &prim, const std::vector<AbstractBasePtr> &
|
|||
MS_EXCEPTION_IF_NULL(item);
|
||||
}
|
||||
auto op_name = prim->name();
|
||||
CheckAndConvertUtils::CheckInteger("Add infer", input_args.size(), kGreaterEqual, 2, op_name);
|
||||
(void)CheckAndConvertUtils::CheckInteger("Add infer", SizeToLong(input_args.size()), kGreaterEqual, 2, op_name);
|
||||
std::map<std::string, TypePtr> types;
|
||||
types.emplace("x", input_args[0]->BuildType());
|
||||
types.emplace("y", input_args[1]->BuildType());
|
||||
|
|
|
@ -33,14 +33,14 @@ void Adder::Init(const int64_t in_channel, const int64_t out_channel, const std:
|
|||
set_format(format);
|
||||
}
|
||||
|
||||
void Adder::set_in_channel(const int64_t in_channel) { this->AddAttr(kInChannel, MakeValue(in_channel)); }
|
||||
void Adder::set_in_channel(const int64_t in_channel) { (void)this->AddAttr(kInChannel, MakeValue(in_channel)); }
|
||||
|
||||
int64_t Adder::get_in_channel() const {
|
||||
auto value_ptr = GetAttr(kInChannel);
|
||||
return GetValue<int64_t>(value_ptr);
|
||||
}
|
||||
|
||||
void Adder::set_out_channel(const int64_t out_channel) { this->AddAttr(kOutChannel, MakeValue(out_channel)); }
|
||||
void Adder::set_out_channel(const int64_t out_channel) { (void)this->AddAttr(kOutChannel, MakeValue(out_channel)); }
|
||||
|
||||
int64_t Adder::get_out_channel() const {
|
||||
auto value_ptr = GetAttr(kOutChannel);
|
||||
|
@ -66,28 +66,28 @@ PadMode Adder::get_pad_mode() const {
|
|||
return PadMode(GetValue<int64_t>(value_ptr));
|
||||
}
|
||||
|
||||
void Adder::set_stride(const std::vector<int64_t> &stride) { this->AddAttr(kStride, MakeValue(stride)); }
|
||||
void Adder::set_stride(const std::vector<int64_t> &stride) { (void)this->AddAttr(kStride, MakeValue(stride)); }
|
||||
|
||||
std::vector<int64_t> Adder::get_stride() const {
|
||||
auto value_ptr = GetAttr(kStride);
|
||||
return GetValue<std::vector<int64_t>>(value_ptr);
|
||||
}
|
||||
|
||||
void Adder::set_pad_list(const std::vector<int64_t> &pad_list) { this->AddAttr(kPadList, MakeValue(pad_list)); }
|
||||
void Adder::set_pad_list(const std::vector<int64_t> &pad_list) { (void)this->AddAttr(kPadList, MakeValue(pad_list)); }
|
||||
|
||||
std::vector<int64_t> Adder::get_pad_list() const {
|
||||
auto value_ptr = GetAttr(kPadList);
|
||||
return GetValue<std::vector<int64_t>>(value_ptr);
|
||||
}
|
||||
|
||||
void Adder::set_dilation(const std::vector<int64_t> &dilation) { this->AddAttr(kDilation, MakeValue(dilation)); }
|
||||
void Adder::set_dilation(const std::vector<int64_t> &dilation) { (void)this->AddAttr(kDilation, MakeValue(dilation)); }
|
||||
|
||||
std::vector<int64_t> Adder::get_dilation() const {
|
||||
auto value_ptr = GetAttr(kDilation);
|
||||
return GetValue<std::vector<int64_t>>(value_ptr);
|
||||
}
|
||||
|
||||
void Adder::set_group(const int64_t group) { this->AddAttr(kGroup, MakeValue(group)); }
|
||||
void Adder::set_group(const int64_t group) { (void)this->AddAttr(kGroup, MakeValue(group)); }
|
||||
|
||||
int64_t Adder::get_group() const {
|
||||
auto value_ptr = GetAttr(kGroup);
|
||||
|
|
|
@ -70,7 +70,8 @@ TypePtr AddNInferType(const PrimitivePtr &prim, const std::vector<AbstractBasePt
|
|||
auto elements = input_args[0]->isa<abstract::AbstractTuple>()
|
||||
? input_args[0]->cast<abstract::AbstractTuplePtr>()->elements()
|
||||
: input_args[0]->cast<abstract::AbstractListPtr>()->elements();
|
||||
CheckAndConvertUtils::CheckInteger("concat element num", SizeToLong(elements.size()), kGreaterEqual, 1, prim->name());
|
||||
(void)CheckAndConvertUtils::CheckInteger("concat element num", SizeToLong(elements.size()), kGreaterEqual, 1,
|
||||
prim->name());
|
||||
std::map<std::string, TypePtr> types;
|
||||
types.emplace("element_0", elements[0]->BuildType());
|
||||
for (size_t i = 0; i < elements.size(); ++i) {
|
||||
|
@ -90,7 +91,7 @@ AbstractBasePtr AddNInfer(const abstract::AnalysisEnginePtr &, const PrimitivePt
|
|||
const std::vector<AbstractBasePtr> &input_args) {
|
||||
MS_EXCEPTION_IF_NULL(primitive);
|
||||
auto prim_name = primitive->name();
|
||||
CheckAndConvertUtils::CheckInteger("input number", SizeToLong(input_args.size()), kGreaterEqual, 1, prim_name);
|
||||
(void)CheckAndConvertUtils::CheckInteger("input number", SizeToLong(input_args.size()), kGreaterEqual, 1, prim_name);
|
||||
for (const auto &item : input_args) {
|
||||
MS_EXCEPTION_IF_NULL(item);
|
||||
}
|
||||
|
|
|
@ -30,7 +30,7 @@ abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector<A
|
|||
std::vector<int64_t> out_shape;
|
||||
for (size_t i = 0; i < x_shape.size(); ++i) {
|
||||
if (SizeToLong(i) != axis) {
|
||||
out_shape.emplace_back(x_shape[i]);
|
||||
(void)out_shape.emplace_back(x_shape[i]);
|
||||
}
|
||||
}
|
||||
return std::make_shared<abstract::Shape>(out_shape);
|
||||
|
@ -38,7 +38,7 @@ abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector<A
|
|||
|
||||
TypePtr InferType(const PrimitivePtr &prim, const std::vector<AbstractBasePtr> &input_args) {
|
||||
MS_EXCEPTION_IF_NULL(prim);
|
||||
CheckAndConvertUtils::CheckInteger("input number", SizeToLong(input_args.size()), kEqual, 1, prim->name());
|
||||
(void)CheckAndConvertUtils::CheckInteger("input number", SizeToLong(input_args.size()), kEqual, 1, prim->name());
|
||||
for (const auto &item : input_args) {
|
||||
MS_EXCEPTION_IF_NULL(item);
|
||||
}
|
||||
|
@ -51,8 +51,8 @@ void ArgMax::Init(const int64_t axis, const TypeId output_type) {
|
|||
set_output_type(output_type);
|
||||
}
|
||||
|
||||
void ArgMax::set_axis(const int64_t axis) { this->AddAttr(kAxis, MakeValue(axis)); }
|
||||
void ArgMax::set_output_type(const TypeId output_type) { this->AddAttr(kOutputType, TypeIdToType(output_type)); }
|
||||
void ArgMax::set_axis(const int64_t axis) { (void)this->AddAttr(kAxis, MakeValue(axis)); }
|
||||
void ArgMax::set_output_type(const TypeId output_type) { (void)this->AddAttr(kOutputType, TypeIdToType(output_type)); }
|
||||
|
||||
int64_t ArgMax::get_axis() const { return GetValue<int64_t>(GetAttr(kAxis)); }
|
||||
TypeId ArgMax::get_output_type() const {
|
||||
|
|
|
@ -24,8 +24,8 @@ void ArgMin::Init(const int64_t axis, const TypeId output_type) {
|
|||
set_output_type(output_type);
|
||||
}
|
||||
|
||||
void ArgMin::set_axis(const int64_t axis) { this->AddAttr(kAxis, MakeValue(axis)); }
|
||||
void ArgMin::set_output_type(const TypeId output_type) { this->AddAttr(kOutputType, TypeIdToType(output_type)); }
|
||||
void ArgMin::set_axis(const int64_t axis) { (void)this->AddAttr(kAxis, MakeValue(axis)); }
|
||||
void ArgMin::set_output_type(const TypeId output_type) { (void)this->AddAttr(kOutputType, TypeIdToType(output_type)); }
|
||||
|
||||
int64_t ArgMin::get_axis() const { return GetValue<int64_t>(GetAttr(kAxis)); }
|
||||
|
||||
|
@ -51,7 +51,7 @@ AbstractBasePtr ArgMinInfer(const abstract::AnalysisEnginePtr &, const Primitive
|
|||
std::vector<int64_t> out_shape;
|
||||
for (int64_t i = 0; i < x_rank; i++) {
|
||||
if (i != axis) {
|
||||
out_shape.push_back(x_shape[i]);
|
||||
out_shape.push_back(x_shape[LongToSize(i)]);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -26,7 +26,7 @@ AbstractBasePtr AsinInfer(const abstract::AnalysisEnginePtr &, const PrimitivePt
|
|||
const std::vector<AbstractBasePtr> &input_args) {
|
||||
MS_EXCEPTION_IF_NULL(primitive);
|
||||
auto prim_name = primitive->name();
|
||||
CheckAndConvertUtils::CheckInteger("Asin_infer", SizeToLong(input_args.size()), kEqual, 1, prim_name);
|
||||
(void)CheckAndConvertUtils::CheckInteger("Asin_infer", SizeToLong(input_args.size()), kEqual, 1, prim_name);
|
||||
|
||||
// Infer Shape
|
||||
auto x_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[0]->BuildShape())[kShape];
|
||||
|
|
|
@ -40,19 +40,19 @@ AbstractBasePtr AssertInfer(const abstract::AnalysisEnginePtr &, const Primitive
|
|||
auto op_name = primitive->name();
|
||||
TypePtr condition;
|
||||
if (!(input_args[0]->BuildType()->type_id() == kObjectTypeTensorType)) {
|
||||
auto condition_value = GetValue<std::vector<bool>>(input_args[0]->BuildValue());
|
||||
CheckAndConvertUtils::CheckInteger("condition's rank", condition_value.size(), kLessEqual, 1, op_name);
|
||||
if (condition_value.size() == 1) {
|
||||
CheckAndConvertUtils::CheckInteger("condition[0]", condition_value[0], kEqual, 1, op_name);
|
||||
auto condition_values = GetValue<std::vector<bool>>(input_args[0]->BuildValue());
|
||||
CheckAndConvertUtils::CheckInteger("condition's rank", SizeToLong(condition_values.size()), kLessEqual, 1, op_name);
|
||||
if (condition_values.size() == 1) {
|
||||
CheckAndConvertUtils::CheckInteger("condition[0]", SizeToLong(condition_values[0]), kEqual, 1, op_name);
|
||||
}
|
||||
condition = TypeIdToType(kNumberTypeBool);
|
||||
} else {
|
||||
auto condition_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[0]->BuildShape())[kShape];
|
||||
CheckAndConvertUtils::CheckInteger("condition's rank", condition_shape[0], kLessEqual, 1, op_name);
|
||||
(void)CheckAndConvertUtils::CheckInteger("condition's rank", condition_shape[0], kLessEqual, 1, op_name);
|
||||
if (condition_shape[0] == 1) {
|
||||
auto condition_value = reinterpret_cast<bool *>(input_args[0]->BuildValue()->cast<tensor::TensorPtr>()->data_c());
|
||||
MS_EXCEPTION_IF_NULL(condition_value);
|
||||
CheckAndConvertUtils::CheckInteger("condition[0]", *condition_value, kEqual, 1, op_name);
|
||||
(void)CheckAndConvertUtils::CheckInteger("condition[0]", *condition_value, kEqual, 1, op_name);
|
||||
}
|
||||
condition = input_args[0]->BuildType();
|
||||
}
|
||||
|
@ -63,7 +63,7 @@ AbstractBasePtr AssertInfer(const abstract::AnalysisEnginePtr &, const Primitive
|
|||
auto inputs_type = input_args[1]->BuildType()->cast<TuplePtr>()->elements();
|
||||
for (auto dtype : inputs_type) {
|
||||
std::set<TypePtr> template_types = {kTensorType};
|
||||
CheckAndConvertUtils::CheckSubClass("input", dtype, template_types, op_name);
|
||||
(void)CheckAndConvertUtils::CheckSubClass("input", dtype, template_types, op_name);
|
||||
}
|
||||
return std::make_shared<abstract::AbstractTensor>(kInt32, output_shape);
|
||||
}
|
||||
|
|
|
@ -24,7 +24,7 @@ AbstractBasePtr AtanInfer(const abstract::AnalysisEnginePtr &, const PrimitivePt
|
|||
const std::vector<AbstractBasePtr> &input_args) {
|
||||
MS_EXCEPTION_IF_NULL(primitive);
|
||||
auto prim_name = primitive->name();
|
||||
CheckAndConvertUtils::CheckInteger("Atan_infer", int64_t(input_args.size()), kEqual, 1, prim_name);
|
||||
(void)CheckAndConvertUtils::CheckInteger("Atan_infer", SizeToLong(input_args.size()), kEqual, 1, prim_name);
|
||||
|
||||
// Infer Shape
|
||||
auto x_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[0]->BuildShape())[kShape];
|
||||
|
|
|
@ -73,7 +73,7 @@ int64_t AudioSpectrogram::get_window_size() const {
|
|||
return GetValue<int64_t>(value_ptr);
|
||||
}
|
||||
|
||||
void AudioSpectrogram::set_stride(const int64_t stride) { this->AddAttr(kStride, MakeValue(stride)); }
|
||||
void AudioSpectrogram::set_stride(const int64_t stride) { (void)this->AddAttr(kStride, MakeValue(stride)); }
|
||||
int64_t AudioSpectrogram::get_stride() const {
|
||||
auto value_ptr = GetAttr(kStride);
|
||||
return GetValue<int64_t>(value_ptr);
|
||||
|
@ -100,7 +100,7 @@ int64_t GetFftLength(int64_t length) {
|
|||
return SizeToLong(1 << (unsigned int)shift);
|
||||
}
|
||||
|
||||
void AudioSpectrogram::set_mag_square(const bool mag_square) { this->AddAttr(kMagSquare, MakeValue(mag_square)); }
|
||||
void AudioSpectrogram::set_mag_square(const bool mag_square) { (void)this->AddAttr(kMagSquare, MakeValue(mag_square)); }
|
||||
bool AudioSpectrogram::get_mag_square() const {
|
||||
auto value_ptr = GetAttr(kMagSquare);
|
||||
return GetValue<bool>(value_ptr);
|
||||
|
|
|
@ -107,7 +107,7 @@ void GetPadsByPadding(int64_t in_d, int64_t in_h, int64_t in_w, int64_t kernel_d
|
|||
pad_list->push_back(static_cast<int64_t>(std::floor(pad_w / 2)));
|
||||
pad_list->push_back(pad_w - pad_list->at(4));
|
||||
} else if (pad_mode == PadMode::PAD) {
|
||||
(void)pad_list->assign(padding.begin(), padding.end());
|
||||
pad_list->assign(padding.begin(), padding.end());
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -69,8 +69,8 @@ abstract::ShapePtr BatchMatmulInferShape(const PrimitivePtr &primitive,
|
|||
ShapeVector x_max_shape = x_shape_map[kMaxShape];
|
||||
ShapeVector y_min_shape = y_shape_map[kMinShape];
|
||||
ShapeVector y_max_shape = y_shape_map[kMaxShape];
|
||||
(void)CheckAndConvertUtils::CheckMinMaxShape(x_shp, &x_min_shape, &x_max_shape);
|
||||
(void)CheckAndConvertUtils::CheckMinMaxShape(y_shp, &y_min_shape, &y_max_shape);
|
||||
CheckAndConvertUtils::CheckMinMaxShape(x_shp, &x_min_shape, &x_max_shape);
|
||||
CheckAndConvertUtils::CheckMinMaxShape(y_shp, &y_min_shape, &y_max_shape);
|
||||
// Additional check for dynamic shape
|
||||
// Last infer will be real shape values
|
||||
bool x_not_dyn =
|
||||
|
|
|
@ -100,7 +100,7 @@ AbstractBasePtr BatchNormInfer(const abstract::AnalysisEnginePtr &, const Primit
|
|||
TypeError);
|
||||
|
||||
if (!GetValue<bool>(primitive->GetAttr(kIsTraining))) {
|
||||
CheckAndConvertUtils::CheckInteger("mean rank", SizeToLong(mean.size()), kEqual, 1, prim_name);
|
||||
(void)CheckAndConvertUtils::CheckInteger("mean rank", SizeToLong(mean.size()), kEqual, 1, prim_name);
|
||||
CheckAndConvertUtils::Check("mean shape", mean, kEqual, "variance shape", variance, prim_name, TypeError);
|
||||
CheckAndConvertUtils::Check("mean shape", mean, kEqual, "scale shape", scale, prim_name, TypeError);
|
||||
}
|
||||
|
|
|
@ -48,7 +48,7 @@ AbstractBasePtr BatchToSpaceInfer(const abstract::AnalysisEnginePtr &, const Pri
|
|||
const std::vector<AbstractBasePtr> &input_args) {
|
||||
MS_EXCEPTION_IF_NULL(primitive);
|
||||
auto prim_name = primitive->name();
|
||||
CheckAndConvertUtils::CheckInteger("input number", SizeToLong(input_args.size()), kEqual, 1, prim_name);
|
||||
(void)CheckAndConvertUtils::CheckInteger("input number", SizeToLong(input_args.size()), kEqual, 1, prim_name);
|
||||
for (const auto &item : input_args) {
|
||||
MS_EXCEPTION_IF_NULL(item);
|
||||
}
|
||||
|
@ -56,7 +56,7 @@ AbstractBasePtr BatchToSpaceInfer(const abstract::AnalysisEnginePtr &, const Pri
|
|||
prim_name);
|
||||
|
||||
auto x_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[0]->BuildShape())[kShape];
|
||||
CheckAndConvertUtils::CheckInteger("x rank", SizeToLong(x_shape.size()), kEqual, 4, prim_name);
|
||||
(void)CheckAndConvertUtils::CheckInteger("x rank", SizeToLong(x_shape.size()), kEqual, 4, prim_name);
|
||||
auto block_size = GetValue<std::vector<int64_t>>(primitive->GetAttr(kBlockSize));
|
||||
auto crops = GetValue<std::vector<std::vector<int64_t>>>(primitive->GetAttr(kCrops));
|
||||
auto out_shape = x_shape;
|
||||
|
@ -66,8 +66,8 @@ AbstractBasePtr BatchToSpaceInfer(const abstract::AnalysisEnginePtr &, const Pri
|
|||
CheckAndConvertUtils::Check("x block shape prod", x_block_prod, kGreaterThan, "crops sum", 4, prim_name);
|
||||
out_shape[i + 2] = x_block_prod - crops_sum;
|
||||
}
|
||||
CheckAndConvertUtils::CheckInteger("x_shape[0] % (block_size[0]*block_size[1])",
|
||||
out_shape[0] % (block_size[0] * block_size[1]), kEqual, 0, prim_name);
|
||||
(void)CheckAndConvertUtils::CheckInteger("x_shape[0] % (block_size[0]*block_size[1])",
|
||||
out_shape[0] % (block_size[0] * block_size[1]), kEqual, 0, prim_name);
|
||||
out_shape[0] /= block_size[0] * block_size[1];
|
||||
|
||||
auto ret = input_args[0]->Broaden();
|
||||
|
|
|
@ -33,11 +33,11 @@ abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector<A
|
|||
CheckAndConvertUtils::CheckInteger("input_x rank", SizeToLong(x_shape.size()), kEqual, 4, prim_name);
|
||||
auto out_shape = x_shape;
|
||||
int64_t block_shape_prod = 1;
|
||||
int64_t offset = 2;
|
||||
size_t offset = 2;
|
||||
auto block_shape = GetValue<std::vector<int64_t>>(primitive->GetAttr(kBlockShape));
|
||||
auto crops = GetValue<std::vector<std::vector<int64_t>>>(primitive->GetAttr(kCrops));
|
||||
int64_t size = block_shape.size();
|
||||
for (int64_t i = 0; i < size; i++) {
|
||||
size_t size = block_shape.size();
|
||||
for (size_t i = 0; i < size; i++) {
|
||||
block_shape_prod = block_shape_prod * block_shape[i];
|
||||
auto x_block_prod = out_shape[i + offset] * block_shape[i];
|
||||
auto crops_sum = crops[i][0] + crops[i][1];
|
||||
|
@ -62,14 +62,14 @@ TypePtr InferType(const std::vector<AbstractBasePtr> &input_args) {
|
|||
} // namespace
|
||||
|
||||
void BatchToSpaceND::set_crops(std::vector<std::vector<int64_t>> crops) {
|
||||
CheckAndConvertUtils::CheckInteger(kCrops, SizeToLong(crops.size()), kEqual, 2, this->name());
|
||||
int64_t h = crops.size();
|
||||
int64_t w = crops[0].size();
|
||||
std::vector<int64_t> temp_w = {2, 2};
|
||||
(void)CheckAndConvertUtils::CheckInteger(kCrops, SizeToLong(crops.size()), kEqual, 2, this->name());
|
||||
size_t h = crops.size();
|
||||
size_t w = crops[0].size();
|
||||
std::vector<size_t> temp_w = {2, 2};
|
||||
CheckAndConvertUtils::Check(kCrops, {h, w}, kEqual, "paddings_shape(2,2)", temp_w, this->name());
|
||||
for (int64_t i = 0; i < h; i++) {
|
||||
for (int64_t j = 0; j < w; j++) {
|
||||
CheckAndConvertUtils::CheckInteger(kCrops, crops[i][j], kGreaterEqual, 0, this->name());
|
||||
for (size_t i = 0; i < h; i++) {
|
||||
for (size_t j = 0; j < w; j++) {
|
||||
(void)CheckAndConvertUtils::CheckInteger(kCrops, crops[i][j], kGreaterEqual, 0, this->name());
|
||||
}
|
||||
}
|
||||
this->AddAttr(kCrops, MakeValue(crops));
|
||||
|
@ -81,8 +81,8 @@ std::vector<std::vector<int64_t>> BatchToSpaceND::get_crops() const {
|
|||
}
|
||||
void BatchToSpaceND::set_block_shape(std::vector<int64_t> block_shape) {
|
||||
CheckAndConvertUtils::CheckInteger(kBlockShape, SizeToLong(block_shape.size()), kEqual, 2, this->name());
|
||||
for (int64_t i = 0; i < (int64_t)block_shape.size(); i++) {
|
||||
CheckAndConvertUtils::CheckInteger(kBlockShape, block_shape[i], kGreaterEqual, 1, this->name());
|
||||
for (size_t i = 0; i < block_shape.size(); i++) {
|
||||
(void)CheckAndConvertUtils::CheckInteger(kBlockShape, block_shape[i], kGreaterEqual, 1, this->name());
|
||||
}
|
||||
this->AddAttr(kBlockShape, MakeValue(block_shape));
|
||||
}
|
||||
|
|
|
@ -35,15 +35,15 @@ abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector<A
|
|||
auto bias = CheckAndConvertUtils::CheckArgs<abstract::AbstractTensor>(prim_name, input_args, 1);
|
||||
MS_EXCEPTION_IF_NULL(x);
|
||||
MS_EXCEPTION_IF_NULL(bias);
|
||||
CheckAndConvertUtils::CheckInteger("arg size", input_args.size(), kEqual, 2, prim_name);
|
||||
CheckAndConvertUtils::CheckInteger("arg size", SizeToLong(input_args.size()), kEqual, 2, prim_name);
|
||||
auto shape_map = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[0]->BuildShape());
|
||||
auto input_shape = shape_map[kShape];
|
||||
auto min_shape = shape_map[kMinShape];
|
||||
auto max_shape = shape_map[kMaxShape];
|
||||
CheckAndConvertUtils::CheckInRange("bias_add_infer", input_shape.size(), kIncludeBoth, {2, 5}, prim_name);
|
||||
auto bias_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[1]->BuildShape())[kShape];
|
||||
CheckAndConvertUtils::CheckInteger("bias rank", bias_shape.size(), kEqual, 1, prim_name);
|
||||
CheckAndConvertUtils::CheckInteger("x rank", input_shape.size(), kGreaterEqual, 2, prim_name);
|
||||
CheckAndConvertUtils::CheckInteger("bias rank", SizeToLong(bias_shape.size()), kEqual, 1, prim_name);
|
||||
CheckAndConvertUtils::CheckInteger("x rank", SizeToLong(input_shape.size()), kGreaterEqual, 2, prim_name);
|
||||
auto data_format_ptr = primitive->GetAttr("format");
|
||||
int64_t data_format = Format::NCHW;
|
||||
if (data_format_ptr != nullptr) {
|
||||
|
@ -71,7 +71,7 @@ abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector<A
|
|||
TypePtr InferType(const PrimitivePtr &prim, const std::vector<AbstractBasePtr> &input_args) {
|
||||
MS_EXCEPTION_IF_NULL(prim);
|
||||
auto prim_name = prim->name();
|
||||
CheckAndConvertUtils::CheckInteger("biasadd_infer", input_args.size(), kEqual, 2, prim_name);
|
||||
CheckAndConvertUtils::CheckInteger("biasadd_infer", SizeToLong(input_args.size()), kEqual, 2, prim_name);
|
||||
for (const auto &item : input_args) {
|
||||
MS_EXCEPTION_IF_NULL(item);
|
||||
}
|
||||
|
|
|
@ -50,7 +50,8 @@ abstract::ShapePtr BinaryCrossEntroyInferShape(const PrimitivePtr &primitive,
|
|||
}
|
||||
|
||||
TypePtr BinaryCrossEntroyInferType(const PrimitivePtr &prim, const std::vector<AbstractBasePtr> &input_args) {
|
||||
CheckAndConvertUtils::CheckInteger("binary_cross_entropy_infer", input_args.size(), kEqual, 3, prim->name());
|
||||
(void)CheckAndConvertUtils::CheckInteger("binary_cross_entropy_infer", SizeToLong(input_args.size()), kEqual, 3,
|
||||
prim->name());
|
||||
for (const auto &item : input_args) {
|
||||
MS_EXCEPTION_IF_NULL(item);
|
||||
}
|
||||
|
|
|
@ -35,7 +35,8 @@ abstract::ShapePtr BroadcastToInferShape(const PrimitivePtr &primitive,
|
|||
} else {
|
||||
flag = true;
|
||||
}
|
||||
if (flag == true) {
|
||||
|
||||
if (flag) {
|
||||
for (size_t i = 0; i < input_x.size(); i++) {
|
||||
if (input_x[i] == -1) {
|
||||
if (i < outer_dim_offset) {
|
||||
|
|
|
@ -26,14 +26,14 @@ void Clip::Init(const float max, const float min) {
|
|||
this->set_min(min);
|
||||
}
|
||||
|
||||
void Clip::set_max(const float max) { this->AddAttr(kMax, MakeValue(max)); }
|
||||
void Clip::set_max(const float max) { (void)this->AddAttr(kMax, MakeValue(max)); }
|
||||
|
||||
float Clip::get_max() const {
|
||||
auto value_ptr = this->GetAttr(kMax);
|
||||
return GetValue<float>(value_ptr);
|
||||
}
|
||||
|
||||
void Clip::set_min(const float min) { this->AddAttr(kMin, MakeValue(min)); }
|
||||
void Clip::set_min(const float min) { (void)this->AddAttr(kMin, MakeValue(min)); }
|
||||
|
||||
float Clip::get_min() const {
|
||||
auto value_ptr = this->GetAttr(kMin);
|
||||
|
|
|
@ -27,7 +27,7 @@ int64_t Concat::get_axis() const {
|
|||
return GetValue<int64_t>(value_ptr);
|
||||
}
|
||||
|
||||
void Concat::set_axis(const int64_t axis) { this->AddAttr(kAxis, MakeValue(axis)); }
|
||||
void Concat::set_axis(const int64_t axis) { (void)this->AddAttr(kAxis, MakeValue(axis)); }
|
||||
|
||||
AbstractBasePtr ConcatInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive,
|
||||
const std::vector<AbstractBasePtr> &input_args) {
|
||||
|
@ -40,7 +40,8 @@ AbstractBasePtr ConcatInfer(const abstract::AnalysisEnginePtr &, const Primitive
|
|||
auto input_tuple = input_args[0]->cast<abstract::AbstractTuplePtr>();
|
||||
MS_EXCEPTION_IF_NULL(input_tuple);
|
||||
auto elements = input_tuple->elements();
|
||||
(void)CheckAndConvertUtils::CheckInteger("concat element num", elements.size(), kGreaterEqual, 1, prim_name);
|
||||
(void)CheckAndConvertUtils::CheckInteger("concat element num", SizeToLong(elements.size()), kGreaterEqual, 1,
|
||||
prim_name);
|
||||
auto element0 = elements[0]->cast<abstract::AbstractTensorPtr>();
|
||||
MS_EXCEPTION_IF_NULL(element0);
|
||||
auto element0_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(element0->BuildShape())[kShape];
|
||||
|
|
|
@ -24,7 +24,8 @@ namespace ops {
|
|||
namespace {
|
||||
abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector<AbstractBasePtr> &input_args) {
|
||||
MS_EXCEPTION_IF_NULL(primitive);
|
||||
CheckAndConvertUtils::CheckInteger("input args size", SizeToLong(input_args.size()), kEqual, 1, "ConstantOfShape");
|
||||
(void)CheckAndConvertUtils::CheckInteger("input args size", SizeToLong(input_args.size()), kEqual, 1,
|
||||
"ConstantOfShape");
|
||||
auto input_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[0]->BuildShape())[kShape];
|
||||
return std::make_shared<abstract::Shape>(input_shape);
|
||||
}
|
||||
|
@ -41,14 +42,14 @@ void ConstantOfShape::Init(int64_t data_type, const std::vector<float> &value) {
|
|||
this->set_value(value);
|
||||
}
|
||||
|
||||
void ConstantOfShape::set_data_type(int64_t data_type) { this->AddAttr(kDataType, MakeValue(data_type)); }
|
||||
void ConstantOfShape::set_data_type(int64_t data_type) { (void)this->AddAttr(kDataType, MakeValue(data_type)); }
|
||||
|
||||
int64_t ConstantOfShape::get_data_type() const {
|
||||
auto value_ptr = this->GetAttr(kDataType);
|
||||
return GetValue<int64_t>(value_ptr);
|
||||
}
|
||||
|
||||
void ConstantOfShape::set_value(const std::vector<float> &value) { this->AddAttr(kValue, MakeValue(value)); }
|
||||
void ConstantOfShape::set_value(const std::vector<float> &value) { (void)this->AddAttr(kValue, MakeValue(value)); }
|
||||
|
||||
std::vector<float> ConstantOfShape::get_value() const {
|
||||
auto value_ptr = this->GetAttr(kValue);
|
||||
|
|
|
@ -106,8 +106,8 @@ abstract::ShapePtr Conv2dInferShape(const PrimitivePtr &primitive, const std::ve
|
|||
auto w_shape_map = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[1]->BuildShape());
|
||||
auto x_shape = x_shape_map[kShape];
|
||||
auto w_shape = w_shape_map[kShape];
|
||||
CheckAndConvertUtils::CheckInteger("x shape size", x_shape.size(), kEqual, 4, primitive->name());
|
||||
CheckAndConvertUtils::CheckInteger("w shape size", w_shape.size(), kEqual, 4, primitive->name());
|
||||
CheckAndConvertUtils::CheckInteger("x shape size", SizeToLong(x_shape.size()), kEqual, 4, primitive->name());
|
||||
CheckAndConvertUtils::CheckInteger("w shape size", SizeToLong(w_shape.size()), kEqual, 4, primitive->name());
|
||||
auto x_min_shape = x_shape_map[kMinShape];
|
||||
auto x_max_shape = x_shape_map[kMaxShape];
|
||||
auto w_min_shape = w_shape_map[kMinShape];
|
||||
|
@ -313,11 +313,12 @@ Format Conv2D::get_format() const {
|
|||
|
||||
AbstractBasePtr Conv2dInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive,
|
||||
const std::vector<AbstractBasePtr> &input_args) {
|
||||
CheckAndConvertUtils::CheckInteger("Conv2d infer", input_args.size(), kGreaterEqual, 2, primitive->name());
|
||||
CheckAndConvertUtils::CheckInteger("Conv2d infer", SizeToLong(input_args.size()), kGreaterEqual, 2,
|
||||
primitive->name());
|
||||
const std::set<TypePtr> valid_types = {kInt8, kInt32, kInt64, kFloat16, kFloat32};
|
||||
std::map<std::string, TypePtr> types;
|
||||
(void)types.emplace("x", input_args[0]->BuildType());
|
||||
(void)types.emplace("w", input_args[1]->BuildType());
|
||||
types.emplace("x", input_args[0]->BuildType());
|
||||
types.emplace("w", input_args[1]->BuildType());
|
||||
CheckAndConvertUtils::CheckTensorTypeSame(types, valid_types, primitive->name());
|
||||
return abstract::MakeAbstract(Conv2dInferShape(primitive, input_args), Conv2dInferType(primitive, input_args));
|
||||
}
|
||||
|
|
|
@ -54,7 +54,7 @@ void Conv2DTranspose::set_out_channel(int64_t out_channel) {
|
|||
void Conv2DTranspose::set_kernel_size(const std::vector<int64_t> &kernel_size) {
|
||||
CheckAndConvertUtils::CheckInteger(kKernelSize, SizeToLong(kernel_size.size()), kEqual, 2, name());
|
||||
for (int64_t item : kernel_size) {
|
||||
CheckAndConvertUtils::CheckInteger(kKernelSize, item, kGreaterEqual, 1, name());
|
||||
(void)CheckAndConvertUtils::CheckInteger(kKernelSize, item, kGreaterEqual, 1, name());
|
||||
}
|
||||
AddAttr(kKernelSize, MakeValue(kernel_size));
|
||||
}
|
||||
|
@ -62,7 +62,7 @@ void Conv2DTranspose::set_kernel_size(const std::vector<int64_t> &kernel_size) {
|
|||
void Conv2DTranspose::set_stride(const std::vector<int64_t> &stride) {
|
||||
CheckAndConvertUtils::CheckInteger(kStride, SizeToLong(stride.size()), kEqual, 2, name());
|
||||
for (int64_t item : stride) {
|
||||
CheckAndConvertUtils::CheckInteger(kStride, item, kGreaterEqual, 1, name());
|
||||
(void)CheckAndConvertUtils::CheckInteger(kStride, item, kGreaterEqual, 1, name());
|
||||
}
|
||||
AddAttr(kStride, MakeValue(stride));
|
||||
}
|
||||
|
|
|
@ -13,11 +13,11 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
#include "ops/cos.h"
|
||||
|
||||
#include <set>
|
||||
#include <map>
|
||||
#include <string>
|
||||
#include "ops/cos.h"
|
||||
|
||||
namespace mindspore {
|
||||
namespace ops {
|
||||
|
@ -44,7 +44,7 @@ TypePtr InferType(const PrimitivePtr &prim, const std::vector<AbstractBasePtr> &
|
|||
AbstractBasePtr CosInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive,
|
||||
const std::vector<AbstractBasePtr> &input_args) {
|
||||
return std::make_shared<abstract::AbstractTensor>(InferType(primitive, input_args),
|
||||
InferShape(primitive, input_args)->shape());
|
||||
InferShape(primitive, input_args));
|
||||
}
|
||||
REGISTER_PRIMITIVE_C(kNameCos, Cos);
|
||||
} // namespace ops
|
||||
|
|
|
@ -27,14 +27,14 @@ void Crop::Init(const int64_t axis, const std::vector<int64_t> &offsets) {
|
|||
this->set_offsets(offsets);
|
||||
}
|
||||
|
||||
void Crop::set_axis(const int64_t axis) { this->AddAttr(kAxis, MakeValue(axis)); }
|
||||
void Crop::set_axis(const int64_t axis) { (void)this->AddAttr(kAxis, MakeValue(axis)); }
|
||||
|
||||
int64_t Crop::get_axis() const {
|
||||
auto value_ptr = this->GetAttr(kAxis);
|
||||
return GetValue<int64_t>(value_ptr);
|
||||
}
|
||||
|
||||
void Crop::set_offsets(const std::vector<int64_t> &offsets) { this->AddAttr(kOffsets, MakeValue(offsets)); }
|
||||
void Crop::set_offsets(const std::vector<int64_t> &offsets) { (void)this->AddAttr(kOffsets, MakeValue(offsets)); }
|
||||
|
||||
std::vector<int64_t> Crop::get_offsets() const {
|
||||
auto value_ptr = this->GetAttr(kOffsets);
|
||||
|
@ -44,7 +44,7 @@ AbstractBasePtr CropInfer(const abstract::AnalysisEnginePtr &, const PrimitivePt
|
|||
const std::vector<AbstractBasePtr> &input_args) {
|
||||
MS_EXCEPTION_IF_NULL(primitive);
|
||||
auto prim_name = primitive->name();
|
||||
CheckAndConvertUtils::CheckInteger("input number", SizeToLong(input_args.size()), kEqual, 2, prim_name);
|
||||
(void)CheckAndConvertUtils::CheckInteger("input number", SizeToLong(input_args.size()), kEqual, 2, prim_name);
|
||||
for (const auto &item : input_args) {
|
||||
MS_EXCEPTION_IF_NULL(item);
|
||||
}
|
||||
|
|
|
@ -35,10 +35,10 @@ std::string Custom::get_type() const {
|
|||
void Custom::set_attr(const std::map<std::string, std::vector<uint8_t>> &attrs) {
|
||||
ValuePtrList value_ptr_list;
|
||||
for (const auto &attr : attrs) {
|
||||
value_ptr_list.emplace_back(MakeValue<std::string>(attr.first));
|
||||
value_ptr_list.emplace_back(MakeValue<std::vector<uint8_t>>(attr.second));
|
||||
(void)value_ptr_list.emplace_back(MakeValue<std::string>(attr.first));
|
||||
(void)value_ptr_list.emplace_back(MakeValue<std::vector<uint8_t>>(attr.second));
|
||||
}
|
||||
this->AddAttr(kAttr, MakeValue(value_ptr_list));
|
||||
(void)this->AddAttr(kAttr, MakeValue(value_ptr_list));
|
||||
}
|
||||
|
||||
std::map<std::string, std::vector<uint8_t>> Custom::get_attr() const {
|
||||
|
|
|
@ -26,7 +26,7 @@ void CustomPredict::Init(const int64_t output_num, const float weight_threshold)
|
|||
this->set_weight_threshold(weight_threshold);
|
||||
}
|
||||
|
||||
void CustomPredict::set_output_num(const int64_t output_num) { this->AddAttr(kOutputNum, MakeValue(output_num)); }
|
||||
void CustomPredict::set_output_num(const int64_t output_num) { (void)this->AddAttr(kOutputNum, MakeValue(output_num)); }
|
||||
|
||||
int64_t CustomPredict::get_output_num() const {
|
||||
auto value_ptr = this->GetAttr(kOutputNum);
|
||||
|
|
|
@ -43,7 +43,7 @@ int64_t DetectionPostProcess::get_input_size() const {
|
|||
return GetValue<int64_t>(value_ptr);
|
||||
}
|
||||
|
||||
void DetectionPostProcess::set_scale(const std::vector<float> &scale) { this->AddAttr(kScale, MakeValue(scale)); }
|
||||
void DetectionPostProcess::set_scale(const std::vector<float> &scale) { (void)this->AddAttr(kScale, MakeValue(scale)); }
|
||||
std::vector<float> DetectionPostProcess::get_scale() const {
|
||||
auto value_ptr = this->GetAttr(kScale);
|
||||
return GetValue<std::vector<float>>(value_ptr);
|
||||
|
@ -113,8 +113,8 @@ AbstractBasePtr DetectionPostProcessInfer(const abstract::AnalysisEnginePtr &, c
|
|||
const std::vector<AbstractBasePtr> &input_args) {
|
||||
MS_EXCEPTION_IF_NULL(primitive);
|
||||
auto prim_name = primitive->name();
|
||||
CheckAndConvertUtils::CheckInteger("detection_post_process_infer", SizeToLong(input_args.size()), kEqual, 3,
|
||||
prim_name);
|
||||
(void)CheckAndConvertUtils::CheckInteger("detection_post_process_infer", SizeToLong(input_args.size()), kEqual, 3,
|
||||
prim_name);
|
||||
MS_EXCEPTION_IF_NULL(input_args[0]);
|
||||
MS_EXCEPTION_IF_NULL(input_args[1]);
|
||||
MS_EXCEPTION_IF_NULL(input_args[2]);
|
||||
|
|
|
@ -30,7 +30,7 @@ namespace {
|
|||
abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector<AbstractBasePtr> &input_args) {
|
||||
MS_EXCEPTION_IF_NULL(primitive);
|
||||
auto op_name = primitive->name();
|
||||
CheckAndConvertUtils::CheckInteger("input number", input_args.size(), kEqual, 2, op_name);
|
||||
(void)CheckAndConvertUtils::CheckInteger("input number", SizeToLong(input_args.size()), kEqual, 2, op_name);
|
||||
for (const auto &item : input_args) {
|
||||
MS_EXCEPTION_IF_NULL(item);
|
||||
}
|
||||
|
@ -40,7 +40,7 @@ abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector<A
|
|||
TypePtr InferType(const PrimitivePtr &prim, const std::vector<AbstractBasePtr> &input_args) {
|
||||
MS_EXCEPTION_IF_NULL(prim);
|
||||
auto op_name = prim->name();
|
||||
CheckAndConvertUtils::CheckInteger("input number", input_args.size(), kGreaterEqual, 2, op_name);
|
||||
(void)CheckAndConvertUtils::CheckInteger("input number", SizeToLong(input_args.size()), kGreaterEqual, 2, op_name);
|
||||
for (const auto &item : input_args) {
|
||||
MS_EXCEPTION_IF_NULL(item);
|
||||
}
|
||||
|
|
|
@ -46,15 +46,14 @@ abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector<A
|
|||
|
||||
TypePtr InferType(const PrimitivePtr &prim, const std::vector<AbstractBasePtr> &input_args) {
|
||||
MS_EXCEPTION_IF_NULL(prim);
|
||||
auto op_name = prim->name();
|
||||
CheckAndConvertUtils::CheckInteger("input number", input_args.size(), kEqual, 1, op_name);
|
||||
(void)CheckAndConvertUtils::CheckInteger("input number", SizeToLong(input_args.size()), kEqual, 1, prim->name());
|
||||
for (const auto &item : input_args) {
|
||||
MS_EXCEPTION_IF_NULL(item);
|
||||
}
|
||||
std::map<std::string, TypePtr> types;
|
||||
types.emplace("x", input_args[0]->BuildType());
|
||||
std::set<TypePtr> valid_params_types = {kTensorType};
|
||||
CheckAndConvertUtils::CheckSubClass("x_type", input_args[0]->BuildType(), valid_params_types, op_name);
|
||||
CheckAndConvertUtils::CheckSubClass("x_type", input_args[0]->BuildType(), valid_params_types, prim->name());
|
||||
return CheckAndConvertUtils::CheckTensorTypeSame(types, common_valid_types, prim->name());
|
||||
}
|
||||
} // namespace
|
||||
|
|
|
@ -31,7 +31,7 @@ AbstractBasePtr ExpandDimsInfer(const abstract::AnalysisEnginePtr &, const Primi
|
|||
const std::vector<AbstractBasePtr> &input_args) {
|
||||
MS_EXCEPTION_IF_NULL(primitive);
|
||||
auto prim_name = primitive->name();
|
||||
CheckAndConvertUtils::CheckInteger("input numbers", input_args.size(), kEqual, 2, prim_name);
|
||||
(void)CheckAndConvertUtils::CheckInteger("input numbers", SizeToLong(input_args.size()), kEqual, 2, prim_name);
|
||||
for (const auto &item : input_args) {
|
||||
MS_EXCEPTION_IF_NULL(item);
|
||||
}
|
||||
|
|
|
@ -28,7 +28,7 @@ AbstractBasePtr FftRealInfer(const abstract::AnalysisEnginePtr &, const Primitiv
|
|||
const std::vector<AbstractBasePtr> &input_args) {
|
||||
MS_EXCEPTION_IF_NULL(primitive);
|
||||
auto prim_name = primitive->name();
|
||||
CheckAndConvertUtils::CheckInteger("input number", input_args.size(), kEqual, 1, prim_name);
|
||||
(void)CheckAndConvertUtils::CheckInteger("input number", SizeToLong(input_args.size()), kEqual, 1, prim_name);
|
||||
for (const auto &item : input_args) {
|
||||
MS_EXCEPTION_IF_NULL(item);
|
||||
}
|
||||
|
|
|
@ -26,7 +26,7 @@ AbstractBasePtr FillInfer(const abstract::AnalysisEnginePtr &, const PrimitivePt
|
|||
const std::vector<AbstractBasePtr> &input_args) {
|
||||
MS_EXCEPTION_IF_NULL(primitive);
|
||||
auto prim_name = primitive->name();
|
||||
CheckAndConvertUtils::CheckInteger("input number", SizeToLong(input_args.size()), kEqual, 3, prim_name);
|
||||
(void)CheckAndConvertUtils::CheckInteger("input number", SizeToLong(input_args.size()), kEqual, 3, prim_name);
|
||||
for (const auto &item : input_args) {
|
||||
MS_EXCEPTION_IF_NULL(item);
|
||||
}
|
||||
|
@ -47,11 +47,11 @@ AbstractBasePtr FillInfer(const abstract::AnalysisEnginePtr &, const PrimitivePt
|
|||
tensor::TensorPtr tensor = std::make_shared<tensor::Tensor>(x_type_id, out_shape);
|
||||
auto mem_size = IntToSize(tensor->ElementsNum());
|
||||
if (x_type_id == kNumberTypeInt) {
|
||||
auto num = GetValue<int>(x_value);
|
||||
SetTensorData(tensor->data_c(), num, mem_size);
|
||||
auto int_value = GetValue<int>(x_value);
|
||||
SetTensorData(tensor->data_c(), int_value, mem_size);
|
||||
} else if (x_type_id == kNumberTypeFloat || x_type_id == kNumberTypeFloat32) {
|
||||
auto num = GetValue<float>(x_value);
|
||||
SetTensorData(tensor->data_c(), num, mem_size);
|
||||
auto float_value = GetValue<float>(x_value);
|
||||
SetTensorData(tensor->data_c(), float_value, mem_size);
|
||||
} else {
|
||||
MS_LOG(ERROR) << " Fill not supported to flod the constant type " << input_args[2]->ToString();
|
||||
}
|
||||
|
|
|
@ -24,7 +24,8 @@ namespace {
|
|||
abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector<AbstractBasePtr> &input_args) {
|
||||
MS_EXCEPTION_IF_NULL(primitive);
|
||||
auto prim_name = primitive->name();
|
||||
CheckAndConvertUtils::CheckInteger("input args size", input_args.size(), kGreaterEqual, 1, prim_name);
|
||||
(void)CheckAndConvertUtils::CheckInteger("input args size", SizeToLong(input_args.size()), kGreaterEqual, 1,
|
||||
prim_name);
|
||||
auto x_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[0]->BuildShape())[kShape];
|
||||
auto prod = 1;
|
||||
int64_t size = x_shape.size();
|
||||
|
|
|
@ -27,11 +27,11 @@ void FusedBatchNorm::Init(const int64_t mode, const float epsilon, const float m
|
|||
this->set_momentum(momentum);
|
||||
}
|
||||
|
||||
void FusedBatchNorm::set_mode(const int64_t mode) { this->AddAttr(kMode, MakeValue(mode)); }
|
||||
void FusedBatchNorm::set_mode(const int64_t mode) { (void)this->AddAttr(kMode, MakeValue(mode)); }
|
||||
|
||||
void FusedBatchNorm::set_epsilon(const float epsilon) { this->AddAttr(kEpsilon, MakeValue(epsilon)); }
|
||||
void FusedBatchNorm::set_epsilon(const float epsilon) { (void)this->AddAttr(kEpsilon, MakeValue(epsilon)); }
|
||||
|
||||
void FusedBatchNorm::set_momentum(const float momentum) { this->AddAttr(kMomentum, MakeValue(momentum)); }
|
||||
void FusedBatchNorm::set_momentum(const float momentum) { (void)this->AddAttr(kMomentum, MakeValue(momentum)); }
|
||||
|
||||
int64_t FusedBatchNorm::get_mode() const {
|
||||
auto value_ptr = this->GetAttr(kMode);
|
||||
|
|
|
@ -23,16 +23,15 @@
|
|||
|
||||
namespace mindspore {
|
||||
namespace ops {
|
||||
void Activation::set_alpha(const float alpha) { this->AddAttr(kAlpha, MakeValue(alpha)); }
|
||||
void Activation::set_alpha(const float alpha) { (void)this->AddAttr(kAlpha, MakeValue(alpha)); }
|
||||
|
||||
void Activation::set_min_val(const float min_val) { this->AddAttr(kMinVal, MakeValue(min_val)); }
|
||||
void Activation::set_min_val(const float min_val) { (void)this->AddAttr(kMinVal, MakeValue(min_val)); }
|
||||
|
||||
void Activation::set_max_val(const float max_val) { this->AddAttr(kMaxVal, MakeValue(max_val)); }
|
||||
void Activation::set_max_val(const float max_val) { (void)this->AddAttr(kMaxVal, MakeValue(max_val)); }
|
||||
|
||||
void Activation::set_activation_type(const ActivationType &activation_type) {
|
||||
int64_t swi;
|
||||
swi = activation_type;
|
||||
this->AddAttr(kActivationType, MakeValue(swi));
|
||||
int64_t swi = activation_type;
|
||||
(void)this->AddAttr(kActivationType, MakeValue(swi));
|
||||
}
|
||||
|
||||
float Activation::get_alpha() const {
|
||||
|
|
|
@ -26,9 +26,8 @@
|
|||
namespace mindspore {
|
||||
namespace ops {
|
||||
void AddFusion::set_activation_type(const ActivationType activation_type) {
|
||||
int64_t swi;
|
||||
swi = activation_type;
|
||||
this->AddAttr(kActivationType, MakeValue(swi));
|
||||
int64_t swi = activation_type;
|
||||
(void)this->AddAttr(kActivationType, MakeValue(swi));
|
||||
}
|
||||
ActivationType AddFusion::get_activation_type() const {
|
||||
auto value_ptr = GetAttr(kActivationType);
|
||||
|
|
|
@ -37,7 +37,7 @@ void AdderFusion::Init(const int64_t in_channel, const int64_t out_channel, cons
|
|||
|
||||
void AdderFusion::set_activation_type(const ActivationType activation_type) {
|
||||
int64_t swi = activation_type;
|
||||
this->AddAttr(kActivationType, MakeValue(swi));
|
||||
(void)this->AddAttr(kActivationType, MakeValue(swi));
|
||||
}
|
||||
|
||||
ActivationType AdderFusion::get_activation_type() const {
|
||||
|
|
|
@ -25,11 +25,11 @@ void ArgMaxFusion::Init(const bool keep_dims, const bool out_max_value, const in
|
|||
set_top_k(top_k);
|
||||
}
|
||||
|
||||
void ArgMaxFusion::set_keep_dims(const bool keep_dims) { this->AddAttr(kKeepDims, MakeValue(keep_dims)); }
|
||||
void ArgMaxFusion::set_keep_dims(const bool keep_dims) { (void)this->AddAttr(kKeepDims, MakeValue(keep_dims)); }
|
||||
void ArgMaxFusion::set_out_max_value(const bool out_max_value) {
|
||||
this->AddAttr(kOutMaxValue, MakeValue(out_max_value));
|
||||
(void)this->AddAttr(kOutMaxValue, MakeValue(out_max_value));
|
||||
}
|
||||
void ArgMaxFusion::set_top_k(const int64_t top_k) { this->AddAttr(kTopK, MakeValue(top_k)); }
|
||||
void ArgMaxFusion::set_top_k(const int64_t top_k) { (void)this->AddAttr(kTopK, MakeValue(top_k)); }
|
||||
|
||||
bool ArgMaxFusion::get_keep_dims() const { return GetValue<bool>(GetAttr(kKeepDims)); }
|
||||
bool ArgMaxFusion::get_out_max_value() const { return GetValue<bool>(GetAttr(kOutMaxValue)); }
|
||||
|
|
|
@ -25,9 +25,9 @@ void ArgMinFusion::Init(bool keep_dims, bool out_max_value, int64_t top_k, int64
|
|||
set_top_k(top_k);
|
||||
}
|
||||
|
||||
void ArgMinFusion::set_keep_dims(const bool keep_dims) { this->AddAttr(kKeepDims, MakeValue(keep_dims)); }
|
||||
void ArgMinFusion::set_out_max_value(bool out_max_value) { AddAttr(kOutMaxValue, MakeValue(out_max_value)); }
|
||||
void ArgMinFusion::set_top_k(int64_t top_k) { this->AddAttr(kTopK, MakeValue(top_k)); }
|
||||
void ArgMinFusion::set_keep_dims(const bool keep_dims) { (void)this->AddAttr(kKeepDims, MakeValue(keep_dims)); }
|
||||
void ArgMinFusion::set_out_max_value(bool out_max_value) { (void)AddAttr(kOutMaxValue, MakeValue(out_max_value)); }
|
||||
void ArgMinFusion::set_top_k(int64_t top_k) { (void)this->AddAttr(kTopK, MakeValue(top_k)); }
|
||||
|
||||
bool ArgMinFusion::get_keep_dims() const { return GetValue<bool>(GetAttr(kKeepDims)); }
|
||||
bool ArgMinFusion::get_out_max_value() const {
|
||||
|
|
|
@ -31,12 +31,11 @@ void AvgPoolFusion::Init(const std::vector<int64_t> &kernel_size, const std::vec
|
|||
this->set_activation_type(activation_type);
|
||||
}
|
||||
|
||||
void AvgPoolFusion::set_global(const bool global) { AddAttr(kGlobal, MakeValue(global)); }
|
||||
void AvgPoolFusion::set_global(const bool global) { (void)AddAttr(kGlobal, MakeValue(global)); }
|
||||
|
||||
void AvgPoolFusion::set_activation_type(ActivationType activation_type) {
|
||||
int64_t swi;
|
||||
swi = activation_type;
|
||||
this->AddAttr(kActivationType, MakeValue(swi));
|
||||
int64_t swi = activation_type;
|
||||
(void)this->AddAttr(kActivationType, MakeValue(swi));
|
||||
}
|
||||
|
||||
bool AvgPoolFusion::get_global() const {
|
||||
|
@ -58,7 +57,7 @@ abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector<A
|
|||
if (format == NHWC) {
|
||||
in_shape = {in_shape[0], in_shape[3], in_shape[1], in_shape[2]};
|
||||
}
|
||||
CheckAndConvertUtils::CheckInteger("x_rank", SizeToLong(in_shape.size()), kEqual, 4, op_name);
|
||||
(void)CheckAndConvertUtils::CheckInteger("x_rank", SizeToLong(in_shape.size()), kEqual, 4, op_name);
|
||||
auto kernel_size = GetValue<std::vector<int64_t>>(primitive->GetAttr(kKernelSize));
|
||||
auto pad_mode = PadMode(GetValue<int64_t>(primitive->GetAttr(kPadMode)));
|
||||
auto batch = in_shape[0];
|
||||
|
|
|
@ -43,11 +43,11 @@ void Conv2DBackpropFilterFusion::Init(const int64_t out_channel, const std::vect
|
|||
|
||||
void Conv2DBackpropFilterFusion::set_activation_type(const ActivationType activation_type) {
|
||||
int64_t swi = activation_type;
|
||||
this->AddAttr(kActivationType, MakeValue(swi));
|
||||
(void)this->AddAttr(kActivationType, MakeValue(swi));
|
||||
}
|
||||
|
||||
void Conv2DBackpropFilterFusion::set_in_channel(const int64_t in_channel) {
|
||||
this->AddAttr(kInChannel, MakeValue(in_channel));
|
||||
(void)this->AddAttr(kInChannel, MakeValue(in_channel));
|
||||
}
|
||||
|
||||
ActivationType Conv2DBackpropFilterFusion::get_activation_type() const {
|
||||
|
|
|
@ -41,12 +41,13 @@ void Conv2DBackpropInputFusion::Init(int64_t in_channel, int64_t out_channel, co
|
|||
this->set_activation_type(activation_type);
|
||||
}
|
||||
|
||||
void Conv2DBackpropInputFusion::set_in_channel(int64_t in_channel) { this->AddAttr(kInChannel, MakeValue(in_channel)); }
|
||||
void Conv2DBackpropInputFusion::set_in_channel(int64_t in_channel) {
|
||||
(void)this->AddAttr(kInChannel, MakeValue(in_channel));
|
||||
}
|
||||
|
||||
void Conv2DBackpropInputFusion::set_activation_type(const ActivationType &activation_type) {
|
||||
int64_t swi;
|
||||
swi = activation_type;
|
||||
this->AddAttr(kActivationType, MakeValue(swi));
|
||||
int64_t swi = activation_type;
|
||||
(void)this->AddAttr(kActivationType, MakeValue(swi));
|
||||
}
|
||||
int64_t Conv2DBackpropInputFusion::get_in_channel() const {
|
||||
auto value_ptr = GetAttr(kInChannel);
|
||||
|
|
|
@ -38,12 +38,13 @@ void Conv2DFusion::Init(int64_t in_channel, int64_t out_channel, const std::vect
|
|||
this->set_pad_list(pad_list);
|
||||
this->set_activation_type(activation_type);
|
||||
}
|
||||
void Conv2DFusion::set_in_channel(const int64_t in_channel) { this->AddAttr(kInChannel, MakeValue(in_channel)); }
|
||||
void Conv2DFusion::set_pad_list(const std::vector<int64_t> &pad_list) { this->AddAttr(kPadList, MakeValue(pad_list)); }
|
||||
void Conv2DFusion::set_in_channel(const int64_t in_channel) { (void)this->AddAttr(kInChannel, MakeValue(in_channel)); }
|
||||
void Conv2DFusion::set_pad_list(const std::vector<int64_t> &pad_list) {
|
||||
(void)this->AddAttr(kPadList, MakeValue(pad_list));
|
||||
}
|
||||
void Conv2DFusion::set_activation_type(const ActivationType &activation_type) {
|
||||
int64_t swi;
|
||||
swi = activation_type;
|
||||
this->AddAttr(kActivationType, MakeValue(swi));
|
||||
int64_t swi = activation_type;
|
||||
(void)this->AddAttr(kActivationType, MakeValue(swi));
|
||||
}
|
||||
int64_t Conv2DFusion::get_in_channel() const {
|
||||
auto value_ptr = GetAttr(kInChannel);
|
||||
|
|
|
@ -40,32 +40,33 @@ void Conv2dTransposeFusion::Init(int64_t in_channel, int64_t out_channel, const
|
|||
}
|
||||
|
||||
void Conv2dTransposeFusion::set_kernel_size(const std::vector<int64_t> &kernel_size) {
|
||||
CheckAndConvertUtils::CheckInteger(kKernelSize, SizeToLong(kernel_size.size()), kEqual, 2, name());
|
||||
(void)CheckAndConvertUtils::CheckInteger(kKernelSize, SizeToLong(kernel_size.size()), kEqual, 2, name());
|
||||
for (int64_t item : kernel_size) {
|
||||
CheckAndConvertUtils::CheckInteger(kKernelSize, item, kGreaterEqual, 1, name());
|
||||
(void)CheckAndConvertUtils::CheckInteger(kKernelSize, item, kGreaterEqual, 1, name());
|
||||
}
|
||||
AddAttr(kKernelSize, MakeValue(kernel_size));
|
||||
(void)AddAttr(kKernelSize, MakeValue(kernel_size));
|
||||
}
|
||||
|
||||
void Conv2dTransposeFusion::set_dilation(const std::vector<int64_t> &dilation) {
|
||||
CheckAndConvertUtils::CheckInteger(kDilation, SizeToLong(dilation.size()), kEqual, 2, name());
|
||||
(void)CheckAndConvertUtils::CheckInteger(kDilation, SizeToLong(dilation.size()), kEqual, 2, name());
|
||||
for (int64_t item : dilation) {
|
||||
CheckAndConvertUtils::CheckInteger(kDilation, item, kGreaterEqual, 1, name());
|
||||
(void)CheckAndConvertUtils::CheckInteger(kDilation, item, kGreaterEqual, 1, name());
|
||||
}
|
||||
AddAttr(kDilation, MakeValue(dilation));
|
||||
(void)AddAttr(kDilation, MakeValue(dilation));
|
||||
}
|
||||
|
||||
void Conv2dTransposeFusion::set_output_paddings(const std::vector<int64_t> &output_paddings) {
|
||||
CheckAndConvertUtils::CheckInteger(kOutputPaddings, output_paddings.size(), kGreaterEqual, 1, name());
|
||||
(void)CheckAndConvertUtils::CheckInteger(kOutputPaddings, SizeToLong(output_paddings.size()), kGreaterEqual, 1,
|
||||
name());
|
||||
for (int64_t item : output_paddings) {
|
||||
CheckAndConvertUtils::CheckInteger(kOutputPaddings, item, kGreaterEqual, 0, name());
|
||||
(void)CheckAndConvertUtils::CheckInteger(kOutputPaddings, item, kGreaterEqual, 0, name());
|
||||
}
|
||||
AddAttr(kOutputPaddings, MakeValue(output_paddings));
|
||||
(void)AddAttr(kOutputPaddings, MakeValue(output_paddings));
|
||||
}
|
||||
|
||||
void Conv2dTransposeFusion::set_activation_type(ActivationType activation_type) {
|
||||
int64_t swi = activation_type;
|
||||
this->AddAttr(kActivationType, MakeValue(swi));
|
||||
(void)this->AddAttr(kActivationType, MakeValue(swi));
|
||||
}
|
||||
|
||||
std::vector<int64_t> Conv2dTransposeFusion::get_output_paddings() const {
|
||||
|
|
|
@ -23,9 +23,8 @@ namespace ops {
|
|||
void DivFusion::Init(const ActivationType &activation_type) { this->set_activation_type(activation_type); }
|
||||
|
||||
void DivFusion::set_activation_type(const ActivationType &activation_type) {
|
||||
int64_t swi;
|
||||
swi = activation_type;
|
||||
this->AddAttr(kActivationType, MakeValue(swi));
|
||||
int64_t swi = activation_type;
|
||||
(void)this->AddAttr(kActivationType, MakeValue(swi));
|
||||
}
|
||||
|
||||
ActivationType DivFusion::get_activation_type() const {
|
||||
|
|
|
@ -20,7 +20,7 @@
|
|||
|
||||
namespace mindspore {
|
||||
namespace ops {
|
||||
void EmbeddingLookupFusion::set_max_norm(const float max_norm) { this->AddAttr(kMaxNorm, MakeValue(max_norm)); }
|
||||
void EmbeddingLookupFusion::set_max_norm(const float max_norm) { (void)this->AddAttr(kMaxNorm, MakeValue(max_norm)); }
|
||||
float EmbeddingLookupFusion::get_max_norm() const {
|
||||
auto value_ptr = GetAttr(kMaxNorm);
|
||||
return GetValue<float>(value_ptr);
|
||||
|
|
|
@ -29,11 +29,11 @@ void ExpFusion::Init(const float base, const float scale, const float shift) {
|
|||
this->set_shift(shift);
|
||||
}
|
||||
|
||||
void ExpFusion::set_base(const float base) { this->AddAttr(kBase, MakeValue(base)); }
|
||||
void ExpFusion::set_base(const float base) { (void)this->AddAttr(kBase, MakeValue(base)); }
|
||||
|
||||
void ExpFusion::set_scale(const float scale) { this->AddAttr(kScale, MakeValue(scale)); }
|
||||
void ExpFusion::set_scale(const float scale) { (void)this->AddAttr(kScale, MakeValue(scale)); }
|
||||
|
||||
void ExpFusion::set_shift(const float shift) { this->AddAttr(kShift, MakeValue(shift)); }
|
||||
void ExpFusion::set_shift(const float shift) { (void)this->AddAttr(kShift, MakeValue(shift)); }
|
||||
|
||||
float ExpFusion::get_base() const {
|
||||
auto value_ptr = GetAttr(kBase);
|
||||
|
|
|
@ -20,19 +20,19 @@
|
|||
|
||||
namespace mindspore {
|
||||
namespace ops {
|
||||
void FullConnection::set_has_bias(const bool has_bias) { this->AddAttr(kHasBias, MakeValue(has_bias)); }
|
||||
void FullConnection::set_has_bias(const bool has_bias) { (void)this->AddAttr(kHasBias, MakeValue(has_bias)); }
|
||||
|
||||
bool FullConnection::get_has_bias() const { return GetValue<bool>(GetAttr(kHasBias)); }
|
||||
|
||||
void FullConnection::set_axis(const int64_t axis) { this->AddAttr(kAxis, MakeValue(axis)); }
|
||||
void FullConnection::set_axis(const int64_t axis) { (void)this->AddAttr(kAxis, MakeValue(axis)); }
|
||||
int64_t FullConnection::get_axis() const { return GetValue<int64_t>(GetAttr(kAxis)); }
|
||||
|
||||
void FullConnection::set_use_axis(const bool use_axis) { this->AddAttr(kUseAxis, MakeValue(use_axis)); }
|
||||
void FullConnection::set_use_axis(const bool use_axis) { (void)this->AddAttr(kUseAxis, MakeValue(use_axis)); }
|
||||
bool FullConnection::get_use_axis() const { return GetValue<bool>(GetAttr(kUseAxis)); }
|
||||
|
||||
void FullConnection::set_activation_type(const ActivationType &activation_type) {
|
||||
int64_t swi;
|
||||
swi = activation_type;
|
||||
this->AddAttr(kActivationType, MakeValue(swi));
|
||||
int64_t swi = activation_type;
|
||||
(void)this->AddAttr(kActivationType, MakeValue(swi));
|
||||
}
|
||||
ActivationType FullConnection::get_activation_type() const {
|
||||
auto value_ptr = GetAttr(kActivationType);
|
||||
|
@ -58,9 +58,9 @@ AbstractBasePtr FullConnectionInfer(const abstract::AnalysisEnginePtr &, const P
|
|||
auto prim_axis = GetValue<int64_t>(primitive->GetAttr(kAxis));
|
||||
auto has_bias = GetValue<bool>(primitive->GetAttr(kHasBias));
|
||||
if (has_bias) {
|
||||
CheckAndConvertUtils::CheckInteger("input_args.size()", input_args.size(), kEqual, 3, prim_name);
|
||||
(void)CheckAndConvertUtils::CheckInteger("input_args.size()", input_args.size(), kEqual, 3, prim_name);
|
||||
} else {
|
||||
CheckAndConvertUtils::CheckInteger("input_args.size()", input_args.size(), kEqual, 2, prim_name);
|
||||
(void)CheckAndConvertUtils::CheckInteger("input_args.size()", input_args.size(), kEqual, 2, prim_name);
|
||||
}
|
||||
auto use_axis = GetValue<bool>(primitive->GetAttr(kUseAxis));
|
||||
if (use_axis && (prim_axis < 1 || prim_axis > (int64_t)input0_shape.size())) {
|
||||
|
@ -68,7 +68,7 @@ AbstractBasePtr FullConnectionInfer(const abstract::AnalysisEnginePtr &, const P
|
|||
}
|
||||
int64_t new_k = 1;
|
||||
if (use_axis) {
|
||||
for (size_t t = prim_axis; t < input0_shape.size(); t++) {
|
||||
for (size_t t = LongToSize(prim_axis); t < input0_shape.size(); t++) {
|
||||
new_k *= input0_shape[t];
|
||||
}
|
||||
if (new_k != input1_shape[1]) {
|
||||
|
@ -85,8 +85,8 @@ AbstractBasePtr FullConnectionInfer(const abstract::AnalysisEnginePtr &, const P
|
|||
}
|
||||
std::vector<int64_t> out_shape = {(int64_t)input0_shape.size()};
|
||||
if (use_axis) {
|
||||
out_shape.resize(prim_axis + 1);
|
||||
out_shape[prim_axis] = input1_shape[0];
|
||||
out_shape.resize(LongToSize(prim_axis) + 1);
|
||||
out_shape[LongToSize(prim_axis)] = input1_shape[0];
|
||||
} else {
|
||||
int64_t total = 1;
|
||||
for (size_t i = 0; i < input0_shape.size(); i++) {
|
||||
|
|
|
@ -28,9 +28,8 @@ void L2NormalizeFusion::Init(const std::vector<int64_t> &axis, const float epsil
|
|||
}
|
||||
|
||||
void L2NormalizeFusion::set_activation_type(const ActivationType &activation_type) {
|
||||
int64_t swi;
|
||||
swi = activation_type;
|
||||
this->AddAttr(kActivationType, MakeValue(swi));
|
||||
int64_t swi = activation_type;
|
||||
(void)this->AddAttr(kActivationType, MakeValue(swi));
|
||||
}
|
||||
|
||||
ActivationType L2NormalizeFusion::get_activation_type() const {
|
||||
|
|
|
@ -27,7 +27,7 @@ void LayerNormFusion::Init(const int64_t begin_norm_axis, const int64_t begin_pa
|
|||
}
|
||||
|
||||
void LayerNormFusion::set_elementwise_affine(const bool elementwise_affine) {
|
||||
AddAttr(kElementwiseAffine, MakeValue(elementwise_affine));
|
||||
(void)AddAttr(kElementwiseAffine, MakeValue(elementwise_affine));
|
||||
}
|
||||
|
||||
bool LayerNormFusion::get_elementwise_affine() const {
|
||||
|
|
|
@ -31,12 +31,11 @@ void MaxPoolFusion::Init(const std::vector<int64_t> &kernel_size, const std::vec
|
|||
this->set_activation_type(activation_type);
|
||||
}
|
||||
|
||||
void MaxPoolFusion::set_global(const bool global) { AddAttr(kGlobal, MakeValue(global)); }
|
||||
void MaxPoolFusion::set_global(const bool global) { (void)AddAttr(kGlobal, MakeValue(global)); }
|
||||
|
||||
void MaxPoolFusion::set_activation_type(ActivationType activation_type) {
|
||||
int64_t swi;
|
||||
swi = activation_type;
|
||||
this->AddAttr(kActivationType, MakeValue(swi));
|
||||
int64_t swi = activation_type;
|
||||
(void)this->AddAttr(kActivationType, MakeValue(swi));
|
||||
}
|
||||
|
||||
bool MaxPoolFusion::get_global() const {
|
||||
|
@ -58,7 +57,7 @@ abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector<A
|
|||
if (format == NHWC) {
|
||||
in_shape = {in_shape[0], in_shape[3], in_shape[1], in_shape[2]};
|
||||
}
|
||||
CheckAndConvertUtils::CheckInteger("x_rank", in_shape.size(), kEqual, 4, op_name);
|
||||
(void)CheckAndConvertUtils::CheckInteger("x_rank", SizeToLong(in_shape.size()), kEqual, 4, op_name);
|
||||
auto kernel_size = GetValue<std::vector<int64_t>>(primitive->GetAttr(kKernelSize));
|
||||
auto pad_mode = PadMode(GetValue<int64_t>(primitive->GetAttr(kPadMode)));
|
||||
auto batch = in_shape[0];
|
||||
|
|
|
@ -24,9 +24,8 @@
|
|||
namespace mindspore {
|
||||
namespace ops {
|
||||
void MulFusion::set_activation_type(const ActivationType &activation_type) {
|
||||
int64_t swi;
|
||||
swi = activation_type;
|
||||
this->AddAttr(kActivationType, MakeValue(swi));
|
||||
int64_t swi = activation_type;
|
||||
(void)this->AddAttr(kActivationType, MakeValue(swi));
|
||||
}
|
||||
ActivationType MulFusion::get_activation_type() const {
|
||||
auto value_ptr = GetAttr(kActivationType);
|
||||
|
|
|
@ -29,13 +29,12 @@ void PadFusion::Init(const PaddingMode &padding_mode, const float constant_value
|
|||
}
|
||||
|
||||
void PadFusion::set_padding_mode(const PaddingMode &padding_mode) {
|
||||
int64_t swi;
|
||||
swi = padding_mode;
|
||||
this->AddAttr(kPaddingMode, MakeValue(swi));
|
||||
int64_t swi = padding_mode;
|
||||
(void)this->AddAttr(kPaddingMode, MakeValue(swi));
|
||||
}
|
||||
|
||||
void PadFusion::set_constant_value(const float constant_value) {
|
||||
this->AddAttr(kConstantValue, MakeValue(constant_value));
|
||||
(void)this->AddAttr(kConstantValue, MakeValue(constant_value));
|
||||
}
|
||||
|
||||
PaddingMode PadFusion::get_padding_mode() const {
|
||||
|
|
|
@ -21,7 +21,7 @@ namespace mindspore {
|
|||
namespace ops {
|
||||
void PartialFusion::Init(const int64_t sub_graph_index) { this->set_sub_graph_index(sub_graph_index); }
|
||||
void PartialFusion::set_sub_graph_index(const int64_t sub_graph_index) {
|
||||
this->AddAttr(kSubGraphIndex, MakeValue(sub_graph_index));
|
||||
(void)this->AddAttr(kSubGraphIndex, MakeValue(sub_graph_index));
|
||||
}
|
||||
int64_t PartialFusion::get_sub_graph_index() const {
|
||||
auto value_ptr = GetAttr(kSubGraphIndex);
|
||||
|
|
|
@ -28,8 +28,8 @@ void PowFusion::Init(const float &scale, const float &shift) {
|
|||
this->set_shift(shift);
|
||||
}
|
||||
|
||||
void PowFusion::set_scale(const float &scale) { this->AddAttr(kScale, MakeValue(scale)); }
|
||||
void PowFusion::set_shift(const float &shift) { this->AddAttr(kShift, MakeValue(shift)); }
|
||||
void PowFusion::set_scale(const float &scale) { (void)this->AddAttr(kScale, MakeValue(scale)); }
|
||||
void PowFusion::set_shift(const float &shift) { (void)this->AddAttr(kShift, MakeValue(shift)); }
|
||||
|
||||
float PowFusion::get_scale() const { return GetValue<float>(GetAttr(kScale)); }
|
||||
float PowFusion::get_shift() const { return GetValue<float>(GetAttr(kShift)); }
|
||||
|
|
|
@ -29,10 +29,10 @@ void PReLUFusion::Init(const bool channel_shared, const std::vector<float> &slop
|
|||
}
|
||||
|
||||
void PReLUFusion::set_channel_shared(const bool channel_shared) {
|
||||
this->AddAttr(kChannelShared, MakeValue(channel_shared));
|
||||
(void)this->AddAttr(kChannelShared, MakeValue(channel_shared));
|
||||
}
|
||||
|
||||
void PReLUFusion::set_slope(const std::vector<float> &slope) { this->AddAttr(kSlope, MakeValue(slope)); }
|
||||
void PReLUFusion::set_slope(const std::vector<float> &slope) { (void)this->AddAttr(kSlope, MakeValue(slope)); }
|
||||
|
||||
bool PReLUFusion::get_channel_shared() const {
|
||||
auto value_ptr = GetAttr(kChannelShared);
|
||||
|
|
|
@ -26,19 +26,18 @@
|
|||
|
||||
namespace mindspore {
|
||||
namespace ops {
|
||||
void ReduceFusion::set_keep_dims(const bool keep_dims) { this->AddAttr(kKeepDims, MakeValue(keep_dims)); }
|
||||
void ReduceFusion::set_keep_dims(const bool keep_dims) { (void)this->AddAttr(kKeepDims, MakeValue(keep_dims)); }
|
||||
|
||||
void ReduceFusion::set_mode(const ReduceMode mode) {
|
||||
int64_t swi;
|
||||
swi = mode;
|
||||
this->AddAttr(kMode, MakeValue(swi));
|
||||
int64_t swi = mode;
|
||||
(void)this->AddAttr(kMode, MakeValue(swi));
|
||||
}
|
||||
|
||||
void ReduceFusion::set_reduce_to_end(const bool reduce_to_end) {
|
||||
this->AddAttr(kReduceToEnd, MakeValue(reduce_to_end));
|
||||
(void)this->AddAttr(kReduceToEnd, MakeValue(reduce_to_end));
|
||||
}
|
||||
|
||||
void ReduceFusion::set_coeff(const float coeff) { this->AddAttr(kCoeff, MakeValue(coeff)); }
|
||||
void ReduceFusion::set_coeff(const float coeff) { (void)this->AddAttr(kCoeff, MakeValue(coeff)); }
|
||||
|
||||
bool ReduceFusion::get_keep_dims() const {
|
||||
auto value_ptr = GetAttr(kKeepDims);
|
||||
|
|
|
@ -26,9 +26,8 @@ void ScaleFusion::Init(const int64_t axis, const ActivationType &activation_type
|
|||
}
|
||||
|
||||
void ScaleFusion::set_activation_type(const ActivationType &activation_type) {
|
||||
int64_t swi;
|
||||
swi = activation_type;
|
||||
this->AddAttr(kActivationType, MakeValue(swi));
|
||||
int64_t swi = activation_type;
|
||||
(void)this->AddAttr(kActivationType, MakeValue(swi));
|
||||
}
|
||||
|
||||
ActivationType ScaleFusion::get_activation_type() const {
|
||||
|
|
|
@ -22,7 +22,7 @@ namespace mindspore {
|
|||
namespace ops {
|
||||
void SliceFusion::Init(const std::vector<int64_t> &axes) { this->set_axes(axes); }
|
||||
|
||||
void SliceFusion::set_axes(const std::vector<int64_t> &axes) { this->AddAttr(kAxes, MakeValue(axes)); }
|
||||
void SliceFusion::set_axes(const std::vector<int64_t> &axes) { (void)this->AddAttr(kAxes, MakeValue(axes)); }
|
||||
|
||||
std::vector<int64_t> SliceFusion::get_axes() const {
|
||||
auto value_ptr = GetAttr(kAxes);
|
||||
|
@ -52,7 +52,7 @@ AbstractBasePtr SliceFusionInfer(const abstract::AnalysisEnginePtr &, const Prim
|
|||
CheckAndConvertUtils::Check("len of size", (int64_t)size.size(), kEqual, "len x's dim", SizeToLong(x_shape_len));
|
||||
|
||||
for (size_t i = 0; i < x_shape_len; i++) {
|
||||
CheckAndConvertUtils::CheckInteger("input size[" + std::to_string(i) + "]", size[i], kGreaterThan, 0, "");
|
||||
(void)CheckAndConvertUtils::CheckInteger("input size[" + std::to_string(i) + "]", size[i], kGreaterThan, 0, "");
|
||||
if (x_shape[i] < (begin[i] + size[i])) {
|
||||
auto y = begin[i] + size[i];
|
||||
MS_EXCEPTION(ValueError) << "For " + op_name + "slice shape can't bigger than origin shape " +
|
||||
|
|
|
@ -23,9 +23,8 @@ namespace ops {
|
|||
void SubFusion::Init(const ActivationType &activation_type) { this->set_activation_type(activation_type); }
|
||||
|
||||
void SubFusion::set_activation_type(const ActivationType &activation_type) {
|
||||
int64_t swi;
|
||||
swi = activation_type;
|
||||
this->AddAttr(kActivationType, MakeValue(swi));
|
||||
int64_t swi = activation_type;
|
||||
(void)this->AddAttr(kActivationType, MakeValue(swi));
|
||||
}
|
||||
|
||||
ActivationType SubFusion::get_activation_type() const {
|
||||
|
|
|
@ -22,7 +22,7 @@ namespace mindspore {
|
|||
namespace ops {
|
||||
void TileFusion::Init(const std::vector<int64_t> &dims) { this->set_dims(dims); }
|
||||
|
||||
void TileFusion::set_dims(const std::vector<int64_t> &dims) { this->AddAttr(kDims, MakeValue(dims)); }
|
||||
void TileFusion::set_dims(const std::vector<int64_t> &dims) { (void)this->AddAttr(kDims, MakeValue(dims)); }
|
||||
|
||||
std::vector<int64_t> TileFusion::get_dims() const {
|
||||
auto value_ptr = GetAttr(kDims);
|
||||
|
|
|
@ -26,9 +26,9 @@ void TopKFusion::Init(const bool sorted, const int64_t axis, const int64_t large
|
|||
this->set_sorted(sorted);
|
||||
}
|
||||
|
||||
void TopKFusion::set_axis(const int64_t axis) { this->AddAttr(kAxis, MakeValue(axis)); }
|
||||
void TopKFusion::set_axis(const int64_t axis) { (void)this->AddAttr(kAxis, MakeValue(axis)); }
|
||||
|
||||
void TopKFusion::set_largest(const int64_t largest) { this->AddAttr(kLargest, MakeValue(largest)); }
|
||||
void TopKFusion::set_largest(const int64_t largest) { (void)this->AddAttr(kLargest, MakeValue(largest)); }
|
||||
|
||||
int64_t TopKFusion::get_axis() const {
|
||||
auto value_ptr = GetAttr(kAxis);
|
||||
|
|
|
@ -28,7 +28,7 @@ namespace {
|
|||
abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector<AbstractBasePtr> &input_args) {
|
||||
MS_EXCEPTION_IF_NULL(primitive);
|
||||
auto prim_name = primitive->name();
|
||||
CheckAndConvertUtils::CheckInteger("input number", SizeToLong(input_args.size()), kEqual, 2, prim_name);
|
||||
(void)CheckAndConvertUtils::CheckInteger("input number", SizeToLong(input_args.size()), kEqual, 2, prim_name);
|
||||
for (const auto &item : input_args) {
|
||||
MS_EXCEPTION_IF_NULL(item);
|
||||
}
|
||||
|
@ -36,13 +36,13 @@ abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector<A
|
|||
auto indices_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[1]->BuildShape())[kShape];
|
||||
auto input_rank = input_shape.size();
|
||||
auto indices_rank = indices_shape.size();
|
||||
CheckAndConvertUtils::CheckInteger("Input of indices data", input_rank, kGreaterEqual,
|
||||
indices_shape[indices_rank - 1], prim_name);
|
||||
(void)CheckAndConvertUtils::CheckInteger("Input of indices data", SizeToLong(input_rank), kGreaterEqual,
|
||||
indices_shape[indices_rank - 1], prim_name);
|
||||
std::vector<int64_t> output_shape;
|
||||
for (size_t i = 0; i < indices_rank - 1; i++) {
|
||||
output_shape.push_back(indices_shape[i]);
|
||||
}
|
||||
for (size_t i = indices_shape[indices_rank - 1]; i < input_rank; ++i) {
|
||||
for (size_t i = LongToSize(indices_shape[indices_rank - 1]); i < input_rank; ++i) {
|
||||
output_shape.push_back(input_shape[i]);
|
||||
}
|
||||
return std::make_shared<abstract::Shape>(output_shape);
|
||||
|
|
|
@ -30,7 +30,7 @@ namespace {
|
|||
abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector<AbstractBasePtr> &input_args) {
|
||||
MS_EXCEPTION_IF_NULL(primitive);
|
||||
auto op_name = primitive->name();
|
||||
CheckAndConvertUtils::CheckInteger("gelu infer", input_args.size(), kEqual, 1, op_name);
|
||||
(void)CheckAndConvertUtils::CheckInteger("gelu infer", SizeToLong(input_args.size()), kEqual, 1, op_name);
|
||||
MS_EXCEPTION_IF_NULL(input_args[0]);
|
||||
auto shape_map = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[0]->BuildShape());
|
||||
auto in_shape = shape_map[kShape];
|
||||
|
@ -44,7 +44,7 @@ abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector<A
|
|||
TypePtr InferType(const PrimitivePtr &prim, const std::vector<AbstractBasePtr> &input_args) {
|
||||
MS_EXCEPTION_IF_NULL(prim);
|
||||
auto op_name = prim->name();
|
||||
CheckAndConvertUtils::CheckInteger("gelu infer", input_args.size(), kEqual, 1, op_name);
|
||||
(void)CheckAndConvertUtils::CheckInteger("gelu infer", SizeToLong(input_args.size()), kEqual, 1, op_name);
|
||||
std::map<std::string, TypePtr> types;
|
||||
const std::set<TypePtr> valid_types = {kFloat16, kFloat32};
|
||||
MS_EXCEPTION_IF_NULL(input_args[0]);
|
||||
|
|
|
@ -33,7 +33,7 @@ void ActivationGrad::Init(const ActivationType &type, const float alpha) {
|
|||
|
||||
void ActivationGrad::set_activation_type(const ActivationType &type) {
|
||||
int64_t swi = type;
|
||||
this->AddAttr(kActivationType, MakeValue(swi));
|
||||
(void)this->AddAttr(kActivationType, MakeValue(swi));
|
||||
}
|
||||
|
||||
ActivationType ActivationGrad::get_activation_type() const {
|
||||
|
@ -41,7 +41,7 @@ ActivationType ActivationGrad::get_activation_type() const {
|
|||
return ActivationType(GetValue<int64_t>(value_ptr));
|
||||
}
|
||||
|
||||
void ActivationGrad::set_alpha(const float alpha) { this->AddAttr(kAlpha, MakeValue(alpha)); }
|
||||
void ActivationGrad::set_alpha(const float alpha) { (void)this->AddAttr(kAlpha, MakeValue(alpha)); }
|
||||
|
||||
float ActivationGrad::get_alpha() const {
|
||||
auto value_ptr = GetAttr(kAlpha);
|
||||
|
|
|
@ -27,10 +27,7 @@ void BatchNormGrad::Init(const bool is_training, const float epsilon) {
|
|||
this->set_epsilon(epsilon);
|
||||
}
|
||||
|
||||
void BatchNormGrad::set_epsilon(const float epsilon) {
|
||||
// CheckAndConvertUtils::CheckInRange(kEpsilon, epsilon, kIncludeRight, {0, 1}, this->name());
|
||||
this->AddAttr(kEpsilon, MakeValue(epsilon));
|
||||
}
|
||||
void BatchNormGrad::set_epsilon(const float epsilon) { (void)this->AddAttr(kEpsilon, MakeValue(epsilon)); }
|
||||
|
||||
float BatchNormGrad::get_epsilon() const {
|
||||
auto value_ptr = this->GetAttr(kEpsilon);
|
||||
|
|
|
@ -40,7 +40,7 @@ std::vector<int64_t> GetFormatShape(const int64_t &format, const std::vector<int
|
|||
abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector<AbstractBasePtr> &input_args) {
|
||||
MS_EXCEPTION_IF_NULL(primitive);
|
||||
auto prim_name = primitive->name();
|
||||
CheckAndConvertUtils::CheckInteger("input numbers", input_args.size(), kEqual, 1, prim_name);
|
||||
(void)CheckAndConvertUtils::CheckInteger("input numbers", SizeToLong(input_args.size()), kEqual, 1, prim_name);
|
||||
for (const auto &item : input_args) {
|
||||
MS_EXCEPTION_IF_NULL(item);
|
||||
}
|
||||
|
@ -60,7 +60,7 @@ abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector<A
|
|||
TypePtr InferType(const PrimitivePtr &prim, const std::vector<AbstractBasePtr> &input_args) {
|
||||
MS_EXCEPTION_IF_NULL(prim);
|
||||
auto prim_name = prim->name();
|
||||
CheckAndConvertUtils::CheckInteger("BiasAddGrad infer", input_args.size(), kEqual, 1, prim_name);
|
||||
(void)CheckAndConvertUtils::CheckInteger("BiasAddGrad infer", SizeToLong(input_args.size()), kEqual, 1, prim_name);
|
||||
MS_EXCEPTION_IF_NULL(input_args[0]);
|
||||
auto x_type_map = input_args[0]->BuildType();
|
||||
MS_EXCEPTION_IF_NULL(x_type_map);
|
||||
|
|
|
@ -30,9 +30,9 @@ abstract::ShapePtr BinaryCrossEntroyGradInferShape(const PrimitivePtr &primitive
|
|||
auto x_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[0]->BuildShape())[kShape];
|
||||
auto y_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[1]->BuildShape())[kShape];
|
||||
auto weight_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[2]->BuildShape())[kShape];
|
||||
CheckAndConvertUtils::Check("x shape", x_shape, kEqual, "y shape", y_shape, prim_name);
|
||||
(void)CheckAndConvertUtils::Check("x shape", x_shape, kEqual, "y shape", y_shape, prim_name);
|
||||
if (weight_shape.size() < 1) {
|
||||
CheckAndConvertUtils::Check("y shape", y_shape, kEqual, "weight shape", weight_shape, prim_name);
|
||||
(void)CheckAndConvertUtils::Check("y shape", y_shape, kEqual, "weight shape", weight_shape, prim_name);
|
||||
}
|
||||
return std::make_shared<abstract::Shape>(x_shape);
|
||||
}
|
||||
|
@ -58,7 +58,7 @@ void BinaryCrossEntropyGrad::Init(const Reduction &reduction) { set_reduction(re
|
|||
|
||||
void BinaryCrossEntropyGrad::set_reduction(const Reduction &reduction) {
|
||||
int64_t swi = reduction;
|
||||
this->AddAttr(kReduction, MakeValue(swi));
|
||||
(void)this->AddAttr(kReduction, MakeValue(swi));
|
||||
}
|
||||
Reduction BinaryCrossEntropyGrad::get_reduction() const {
|
||||
auto value_ptr = GetAttr(kReduction);
|
||||
|
|
|
@ -26,14 +26,14 @@ void BNGrad::Init(const float eps, const float momentum) {
|
|||
this->set_momentum(momentum);
|
||||
}
|
||||
|
||||
void BNGrad::set_eps(const float eps) { this->AddAttr(kEps, MakeValue(eps)); }
|
||||
void BNGrad::set_eps(const float eps) { (void)this->AddAttr(kEps, MakeValue(eps)); }
|
||||
|
||||
float BNGrad::get_eps() const {
|
||||
auto value_ptr = this->GetAttr(kEps);
|
||||
return GetValue<float>(value_ptr);
|
||||
}
|
||||
|
||||
void BNGrad::set_momentum(const float momentum) { this->AddAttr(kMomentum, MakeValue(momentum)); }
|
||||
void BNGrad::set_momentum(const float momentum) { (void)this->AddAttr(kMomentum, MakeValue(momentum)); }
|
||||
|
||||
float BNGrad::get_momentum() const {
|
||||
auto value_ptr = this->GetAttr(kMomentum);
|
||||
|
|
|
@ -66,7 +66,7 @@ void Conv2DBackpropFilter::Init(const int64_t out_channel, const std::vector<int
|
|||
}
|
||||
|
||||
void Conv2DBackpropFilter::set_out_channel(const int64_t out_channel) {
|
||||
this->AddAttr(kOutChannel, MakeValue(out_channel));
|
||||
(void)this->AddAttr(kOutChannel, MakeValue(out_channel));
|
||||
}
|
||||
|
||||
int64_t Conv2DBackpropFilter::get_out_channel() const {
|
||||
|
@ -75,7 +75,7 @@ int64_t Conv2DBackpropFilter::get_out_channel() const {
|
|||
}
|
||||
|
||||
void Conv2DBackpropFilter::set_kernel_size(const std::vector<int64_t> &kernel_size) {
|
||||
this->AddAttr(kKernelSize, MakeValue(kernel_size));
|
||||
(void)this->AddAttr(kKernelSize, MakeValue(kernel_size));
|
||||
}
|
||||
|
||||
std::vector<int64_t> Conv2DBackpropFilter::get_kernel_size() const {
|
||||
|
@ -85,7 +85,7 @@ std::vector<int64_t> Conv2DBackpropFilter::get_kernel_size() const {
|
|||
|
||||
void Conv2DBackpropFilter::set_pad_mode(const PadMode &pad_mode) {
|
||||
int64_t swi = pad_mode;
|
||||
this->AddAttr(kPadMode, MakeValue(swi));
|
||||
(void)this->AddAttr(kPadMode, MakeValue(swi));
|
||||
}
|
||||
|
||||
PadMode Conv2DBackpropFilter::get_pad_mode() const {
|
||||
|
@ -94,7 +94,7 @@ PadMode Conv2DBackpropFilter::get_pad_mode() const {
|
|||
}
|
||||
|
||||
void Conv2DBackpropFilter::set_pad_list(const std::vector<int64_t> &pad_list) {
|
||||
this->AddAttr(kPadList, MakeValue(pad_list));
|
||||
(void)this->AddAttr(kPadList, MakeValue(pad_list));
|
||||
}
|
||||
|
||||
std::vector<int64_t> Conv2DBackpropFilter::get_pad_list() const {
|
||||
|
@ -102,7 +102,7 @@ std::vector<int64_t> Conv2DBackpropFilter::get_pad_list() const {
|
|||
return GetValue<std::vector<int64_t>>(value_ptr);
|
||||
}
|
||||
|
||||
void Conv2DBackpropFilter::set_mode(const int64_t mode) { this->AddAttr(kMode, MakeValue(mode)); }
|
||||
void Conv2DBackpropFilter::set_mode(const int64_t mode) { (void)this->AddAttr(kMode, MakeValue(mode)); }
|
||||
|
||||
int64_t Conv2DBackpropFilter::get_mode() const {
|
||||
auto value_ptr = GetAttr(kMode);
|
||||
|
@ -125,7 +125,7 @@ std::vector<int64_t> Conv2DBackpropFilter::get_dilation() const {
|
|||
return GetValue<std::vector<int64_t>>(value_ptr);
|
||||
}
|
||||
|
||||
void Conv2DBackpropFilter::set_group(const int64_t group) { this->AddAttr(kGroup, MakeValue(group)); }
|
||||
void Conv2DBackpropFilter::set_group(const int64_t group) { (void)this->AddAttr(kGroup, MakeValue(group)); }
|
||||
|
||||
int64_t Conv2DBackpropFilter::get_group() const {
|
||||
auto value_ptr = GetAttr(kGroup);
|
||||
|
@ -147,7 +147,7 @@ AbstractBasePtr Conv2DBackpropFilterInfer(const abstract::AnalysisEnginePtr &, c
|
|||
MS_EXCEPTION_IF_NULL(primitive);
|
||||
auto prim_name = primitive->name();
|
||||
// check
|
||||
CheckAndConvertUtils::CheckInteger("input size", input_args.size(), kGreaterEqual, 3, prim_name);
|
||||
(void)CheckAndConvertUtils::CheckInteger("input size", SizeToLong(input_args.size()), kGreaterEqual, 3, prim_name);
|
||||
for (const auto &item : input_args) {
|
||||
MS_EXCEPTION_IF_NULL(item);
|
||||
}
|
||||
|
|
|
@ -37,7 +37,7 @@ void SetPadList(const PrimitivePtr &primitive, const std::vector<int64_t> &dout_
|
|||
// default pad mode is valid
|
||||
auto attr_pad_list_prt = primitive->GetAttr(kPadList);
|
||||
int64_t pad_mode;
|
||||
CheckAndConvertUtils::GetPadModEnumValue(primitive->GetAttr(kPadMode), &pad_mode, true);
|
||||
(void)CheckAndConvertUtils::GetPadModEnumValue(primitive->GetAttr(kPadMode), &pad_mode, true);
|
||||
ShapeVector pad_list = {0, 0, 0, 0};
|
||||
if (!attr_pad_list_prt->isa<None>()) {
|
||||
pad_list = GetValue<ShapeVector>(attr_pad_list_prt);
|
||||
|
@ -60,7 +60,7 @@ void SetPadList(const PrimitivePtr &primitive, const std::vector<int64_t> &dout_
|
|||
} else if (pad_mode == PAD) {
|
||||
pad_list = GetValue<std::vector<int64_t>>(primitive->GetAttr(kPad));
|
||||
}
|
||||
primitive->AddAttr(kPadList, MakeValue(pad_list));
|
||||
(void)primitive->AddAttr(kPadList, MakeValue(pad_list));
|
||||
}
|
||||
|
||||
abstract::ShapePtr Conv2DBackpropInputInferShape(const PrimitivePtr &primitive,
|
||||
|
@ -93,7 +93,7 @@ AbstractBasePtr Conv2DBackpropInputInfer(const abstract::AnalysisEnginePtr &, co
|
|||
MS_EXCEPTION_IF_NULL(primitive);
|
||||
auto prim_name = primitive->name();
|
||||
// check
|
||||
CheckAndConvertUtils::CheckInteger("input size", input_args.size(), kGreaterEqual, 3, prim_name);
|
||||
(void)CheckAndConvertUtils::CheckInteger("input size", input_args.size(), kGreaterEqual, 3, prim_name);
|
||||
for (const auto &item : input_args) {
|
||||
MS_EXCEPTION_IF_NULL(item);
|
||||
}
|
||||
|
@ -119,55 +119,55 @@ void Conv2DBackpropInput::Init(int64_t out_channel, const std::vector<int64_t> &
|
|||
}
|
||||
|
||||
void Conv2DBackpropInput::set_out_channel(int64_t out_channel) {
|
||||
AddAttr(kOutChannel,
|
||||
MakeValue(CheckAndConvertUtils::CheckInteger(kOutChannel, out_channel, kGreaterThan, 0, name())));
|
||||
(void)AddAttr(kOutChannel,
|
||||
MakeValue(CheckAndConvertUtils::CheckInteger(kOutChannel, out_channel, kGreaterThan, 0, name())));
|
||||
}
|
||||
|
||||
void Conv2DBackpropInput::set_kernel_size(const std::vector<int64_t> &kernel_size) {
|
||||
AddAttr(kKernelSize, MakeValue(CheckAndConvertUtils::CheckPositiveVector(kKernelSize, kernel_size, name())));
|
||||
(void)AddAttr(kKernelSize, MakeValue(CheckAndConvertUtils::CheckPositiveVector(kKernelSize, kernel_size, name())));
|
||||
}
|
||||
|
||||
void Conv2DBackpropInput::set_stride(const std::vector<int64_t> &stride) {
|
||||
AddAttr(kStride, MakeValue(CheckAndConvertUtils::CheckPositiveVector(kStride, stride, name())));
|
||||
(void)AddAttr(kStride, MakeValue(CheckAndConvertUtils::CheckPositiveVector(kStride, stride, name())));
|
||||
}
|
||||
|
||||
void Conv2DBackpropInput::set_dilation(const std::vector<int64_t> &dilation) {
|
||||
AddAttr(kDilation, MakeValue(CheckAndConvertUtils::CheckPositiveVector(kDilation, dilation, name())));
|
||||
(void)AddAttr(kDilation, MakeValue(CheckAndConvertUtils::CheckPositiveVector(kDilation, dilation, name())));
|
||||
}
|
||||
|
||||
void Conv2DBackpropInput::set_pad_mode(const PadMode &pad_mode) {
|
||||
std::vector<int64_t> pad = get_pad();
|
||||
if (pad_mode == PAD) {
|
||||
for (auto item : pad) {
|
||||
CheckAndConvertUtils::Check(kPadItem, item, kGreaterEqual, "zeros_list", 0, name());
|
||||
(void)CheckAndConvertUtils::Check(kPadItem, item, kGreaterEqual, "zeros_list", 0, name());
|
||||
}
|
||||
} else {
|
||||
CheckAndConvertUtils::Check(kPad, pad, kEqual, "zeros_list", {0, 0, 0, 0}, name());
|
||||
(void)CheckAndConvertUtils::Check(kPad, pad, kEqual, "zeros_list", {0, 0, 0, 0}, name());
|
||||
}
|
||||
int64_t swi = pad_mode;
|
||||
AddAttr(kPadMode, MakeValue(swi));
|
||||
(void)AddAttr(kPadMode, MakeValue(swi));
|
||||
}
|
||||
|
||||
void Conv2DBackpropInput::set_pad(const std::vector<int64_t> &pad) {
|
||||
CheckAndConvertUtils::CheckInteger("pad_size", SizeToLong(pad.size()), kEqual, 4, name());
|
||||
AddAttr(kPad, MakeValue(CheckAndConvertUtils::CheckPositiveVector(kPad, pad, name())));
|
||||
(void)CheckAndConvertUtils::CheckInteger("pad_size", SizeToLong(pad.size()), kEqual, 4, name());
|
||||
(void)AddAttr(kPad, MakeValue(CheckAndConvertUtils::CheckPositiveVector(kPad, pad, name())));
|
||||
}
|
||||
|
||||
void Conv2DBackpropInput::set_mode(int64_t mode) {
|
||||
AddAttr(kMode, MakeValue(CheckAndConvertUtils::CheckInteger(kMode, mode, kEqual, 1, name())));
|
||||
(void)AddAttr(kMode, MakeValue(CheckAndConvertUtils::CheckInteger(kMode, mode, kEqual, 1, name())));
|
||||
}
|
||||
|
||||
void Conv2DBackpropInput::set_group(int64_t group) {
|
||||
AddAttr(kGroup, MakeValue(CheckAndConvertUtils::CheckInteger(kGroup, group, kGreaterThan, 0, name())));
|
||||
(void)AddAttr(kGroup, MakeValue(CheckAndConvertUtils::CheckInteger(kGroup, group, kGreaterThan, 0, name())));
|
||||
}
|
||||
|
||||
void Conv2DBackpropInput::set_format(const Format &format) {
|
||||
int64_t f = format;
|
||||
AddAttr(kFormat, MakeValue(f));
|
||||
(void)AddAttr(kFormat, MakeValue(f));
|
||||
}
|
||||
|
||||
void Conv2DBackpropInput::set_pad_list(const std::vector<int64_t> &pad_list) {
|
||||
this->AddAttr(kPadList, MakeValue(pad_list));
|
||||
(void)this->AddAttr(kPadList, MakeValue(pad_list));
|
||||
}
|
||||
|
||||
int64_t Conv2DBackpropInput::get_out_channel() const {
|
||||
|
|
|
@ -39,7 +39,9 @@ void DeConv2DGradFilter::Init(const int64_t in_channel, const int64_t out_channe
|
|||
set_has_bias(has_bias);
|
||||
}
|
||||
|
||||
void DeConv2DGradFilter::set_in_channel(const int64_t in_channel) { this->AddAttr(kInChannel, MakeValue(in_channel)); }
|
||||
void DeConv2DGradFilter::set_in_channel(const int64_t in_channel) {
|
||||
(void)this->AddAttr(kInChannel, MakeValue(in_channel));
|
||||
}
|
||||
|
||||
int64_t DeConv2DGradFilter::get_in_channel() const {
|
||||
auto value_ptr = GetAttr(kInChannel);
|
||||
|
@ -47,7 +49,7 @@ int64_t DeConv2DGradFilter::get_in_channel() const {
|
|||
}
|
||||
|
||||
void DeConv2DGradFilter::set_out_channel(const int64_t out_channel) {
|
||||
this->AddAttr(kOutChannel, MakeValue(out_channel));
|
||||
(void)this->AddAttr(kOutChannel, MakeValue(out_channel));
|
||||
}
|
||||
|
||||
int64_t DeConv2DGradFilter::get_out_channel() const {
|
||||
|
@ -56,7 +58,7 @@ int64_t DeConv2DGradFilter::get_out_channel() const {
|
|||
}
|
||||
|
||||
void DeConv2DGradFilter::set_kernel_size(const std::vector<int64_t> &kernel_size) {
|
||||
this->AddAttr(kKernelSize, MakeValue(kernel_size));
|
||||
(void)this->AddAttr(kKernelSize, MakeValue(kernel_size));
|
||||
}
|
||||
|
||||
std::vector<int64_t> DeConv2DGradFilter::get_kernel_size() const {
|
||||
|
@ -66,7 +68,7 @@ std::vector<int64_t> DeConv2DGradFilter::get_kernel_size() const {
|
|||
|
||||
void DeConv2DGradFilter::set_pad_mode(const PadMode &pad_mode) {
|
||||
int64_t swi = pad_mode;
|
||||
this->AddAttr(kPadMode, MakeValue(swi));
|
||||
(void)this->AddAttr(kPadMode, MakeValue(swi));
|
||||
}
|
||||
|
||||
PadMode DeConv2DGradFilter::get_pad_mode() const {
|
||||
|
@ -75,7 +77,7 @@ PadMode DeConv2DGradFilter::get_pad_mode() const {
|
|||
}
|
||||
|
||||
void DeConv2DGradFilter::set_pad_list(const std::vector<int64_t> &pad_list) {
|
||||
this->AddAttr(kPadList, MakeValue(pad_list));
|
||||
(void)this->AddAttr(kPadList, MakeValue(pad_list));
|
||||
}
|
||||
|
||||
std::vector<int64_t> DeConv2DGradFilter::get_pad_list() const {
|
||||
|
@ -83,7 +85,9 @@ std::vector<int64_t> DeConv2DGradFilter::get_pad_list() const {
|
|||
return GetValue<std::vector<int64_t>>(value_ptr);
|
||||
}
|
||||
|
||||
void DeConv2DGradFilter::set_stride(const std::vector<int64_t> &stride) { this->AddAttr(kStride, MakeValue(stride)); }
|
||||
void DeConv2DGradFilter::set_stride(const std::vector<int64_t> &stride) {
|
||||
(void)this->AddAttr(kStride, MakeValue(stride));
|
||||
}
|
||||
|
||||
std::vector<int64_t> DeConv2DGradFilter::get_stride() const {
|
||||
auto value_ptr = GetAttr(kStride);
|
||||
|
@ -91,7 +95,7 @@ std::vector<int64_t> DeConv2DGradFilter::get_stride() const {
|
|||
}
|
||||
|
||||
void DeConv2DGradFilter::set_dilation(const std::vector<int64_t> &dilation) {
|
||||
this->AddAttr(kDilation, MakeValue(dilation));
|
||||
(void)this->AddAttr(kDilation, MakeValue(dilation));
|
||||
}
|
||||
|
||||
std::vector<int64_t> DeConv2DGradFilter::get_dilation() const {
|
||||
|
@ -99,7 +103,7 @@ std::vector<int64_t> DeConv2DGradFilter::get_dilation() const {
|
|||
return GetValue<std::vector<int64_t>>(value_ptr);
|
||||
}
|
||||
|
||||
void DeConv2DGradFilter::set_group(const int64_t group) { this->AddAttr(kGroup, MakeValue(group)); }
|
||||
void DeConv2DGradFilter::set_group(const int64_t group) { (void)this->AddAttr(kGroup, MakeValue(group)); }
|
||||
|
||||
int64_t DeConv2DGradFilter::get_group() const {
|
||||
auto value_ptr = GetAttr(kGroup);
|
||||
|
@ -108,7 +112,7 @@ int64_t DeConv2DGradFilter::get_group() const {
|
|||
|
||||
void DeConv2DGradFilter::set_format(const Format &format) {
|
||||
int64_t swi = format;
|
||||
this->AddAttr(kFormat, MakeValue(swi));
|
||||
(void)this->AddAttr(kFormat, MakeValue(swi));
|
||||
}
|
||||
|
||||
Format DeConv2DGradFilter::get_format() const {
|
||||
|
@ -118,7 +122,7 @@ Format DeConv2DGradFilter::get_format() const {
|
|||
|
||||
void DeConv2DGradFilter::set_activation_type(const ActivationType &activation_type) {
|
||||
int64_t swi = activation_type;
|
||||
this->AddAttr(kActivationType, MakeValue(swi));
|
||||
(void)this->AddAttr(kActivationType, MakeValue(swi));
|
||||
}
|
||||
|
||||
ActivationType DeConv2DGradFilter::get_activation_type() const {
|
||||
|
@ -126,7 +130,7 @@ ActivationType DeConv2DGradFilter::get_activation_type() const {
|
|||
return ActivationType(GetValue<int64_t>(value_ptr));
|
||||
}
|
||||
|
||||
void DeConv2DGradFilter::set_has_bias(const bool has_bias) { this->AddAttr(kHasBias, MakeValue(has_bias)); }
|
||||
void DeConv2DGradFilter::set_has_bias(const bool has_bias) { (void)this->AddAttr(kHasBias, MakeValue(has_bias)); }
|
||||
|
||||
bool DeConv2DGradFilter::get_has_bias() const {
|
||||
auto value_ptr = GetAttr(kHasBias);
|
||||
|
|
|
@ -23,7 +23,7 @@ void DropoutGrad::Init(const float keep_prob) { this->set_keep_prob(keep_prob);
|
|||
|
||||
void DropoutGrad::set_keep_prob(const float keep_prob) {
|
||||
CheckAndConvertUtils::CheckInRange<float>(kKeepProb, keep_prob, kIncludeRight, {0.0, 1.0}, this->name());
|
||||
this->AddAttr(kKeepProb, MakeValue(keep_prob));
|
||||
(void)this->AddAttr(kKeepProb, MakeValue(keep_prob));
|
||||
}
|
||||
|
||||
float DropoutGrad::get_keep_prob() const {
|
||||
|
@ -44,8 +44,8 @@ TypePtr DropoutGradInferType(const PrimitivePtr &prim, const std::vector<Abstrac
|
|||
auto op_name = prim->name();
|
||||
auto mask_dtype = input_args[1]->BuildType();
|
||||
auto dy_dtype = input_args[0]->BuildType();
|
||||
CheckAndConvertUtils::CheckTensorTypeValid("mask", mask_dtype, {kTensorType}, op_name);
|
||||
CheckAndConvertUtils::CheckTensorTypeValid("dy", dy_dtype, {kFloat16, kFloat32}, op_name);
|
||||
(void)CheckAndConvertUtils::CheckTensorTypeValid("mask", mask_dtype, {kTensorType}, op_name);
|
||||
(void)CheckAndConvertUtils::CheckTensorTypeValid("dy", dy_dtype, {kFloat16, kFloat32}, op_name);
|
||||
auto tensor_type = dy_dtype->cast<TensorTypePtr>();
|
||||
MS_EXCEPTION_IF_NULL(tensor_type);
|
||||
auto data_type = tensor_type->element();
|
||||
|
|
|
@ -22,7 +22,7 @@ AbstractBasePtr FlattenGradInfer(const abstract::AnalysisEnginePtr &, const Prim
|
|||
const std::vector<AbstractBasePtr> &input_args) {
|
||||
MS_EXCEPTION_IF_NULL(primitive);
|
||||
auto prim_name = primitive->name();
|
||||
CheckAndConvertUtils::CheckInteger("input number", SizeToLong(input_args.size()), kEqual, 2, prim_name);
|
||||
(void)CheckAndConvertUtils::CheckInteger("input number", SizeToLong(input_args.size()), kEqual, 2, prim_name);
|
||||
for (const auto &item : input_args) {
|
||||
MS_EXCEPTION_IF_NULL(item);
|
||||
}
|
||||
|
|
|
@ -102,7 +102,7 @@ std::vector<int64_t> GroupConv2DGradInput::get_dilation() const {
|
|||
return GetValue<std::vector<int64_t>>(value_ptr);
|
||||
}
|
||||
|
||||
void GroupConv2DGradInput::set_group(const int64_t &group) { this->AddAttr(kGroup, MakeValue(group)); }
|
||||
void GroupConv2DGradInput::set_group(const int64_t &group) { (void)this->AddAttr(kGroup, MakeValue(group)); }
|
||||
|
||||
int64_t GroupConv2DGradInput::get_group() const {
|
||||
auto value_ptr = GetAttr(kGroup);
|
||||
|
@ -137,7 +137,7 @@ ActivationType GroupConv2DGradInput::get_activation_type() const {
|
|||
return ActivationType(GetValue<int64_t>(value_ptr));
|
||||
}
|
||||
|
||||
void GroupConv2DGradInput::set_has_bias(const bool has_bias) { this->AddAttr(kHasBias, MakeValue(has_bias)); }
|
||||
void GroupConv2DGradInput::set_has_bias(const bool has_bias) { (void)this->AddAttr(kHasBias, MakeValue(has_bias)); }
|
||||
|
||||
bool GroupConv2DGradInput::get_has_bias() const {
|
||||
auto value_ptr = GetAttr(kHasBias);
|
||||
|
@ -147,7 +147,8 @@ AbstractBasePtr GroupConv2DGradInputInfer(const abstract::AnalysisEnginePtr &, c
|
|||
const std::vector<AbstractBasePtr> &input_args) {
|
||||
MS_EXCEPTION_IF_NULL(primitive);
|
||||
auto prim_name = primitive->name();
|
||||
CheckAndConvertUtils::CheckInteger("group_conv_2D_infer", input_args.size(), kGreaterEqual, 2, prim_name);
|
||||
(void)CheckAndConvertUtils::CheckInteger("group_conv_2D_infer", SizeToLong(input_args.size()), kGreaterEqual, 2,
|
||||
prim_name);
|
||||
MS_EXCEPTION_IF_NULL(input_args[0]);
|
||||
|
||||
// Infer shape
|
||||
|
|
|
@ -26,7 +26,7 @@ AbstractBasePtr LayerNormGradInfer(const abstract::AnalysisEnginePtr &, const Pr
|
|||
// Outputs: x_backprob, gamma_backprob, beta_backprob
|
||||
MS_EXCEPTION_IF_NULL(primitive);
|
||||
auto op_name = primitive->name();
|
||||
CheckAndConvertUtils::CheckInteger("input numbers", input_args.size(), kEqual, 5, op_name);
|
||||
(void)CheckAndConvertUtils::CheckInteger("input numbers", SizeToLong(input_args.size()), kEqual, 5, op_name);
|
||||
auto x_backprob = input_args[0]->Broaden();
|
||||
auto gamma_backprob = input_args[4]->Broaden();
|
||||
auto beta_backprob = input_args[4]->Broaden();
|
||||
|
@ -41,10 +41,10 @@ void LayerNormGrad::Init(const int64_t begin_norm_axis, const int64_t begin_para
|
|||
this->set_begin_params_axis(begin_params_axis);
|
||||
}
|
||||
void LayerNormGrad::set_begin_norm_axis(const int64_t begin_norm_axis) {
|
||||
this->AddAttr(kBeginNormAxis, MakeValue(begin_norm_axis));
|
||||
(void)this->AddAttr(kBeginNormAxis, MakeValue(begin_norm_axis));
|
||||
}
|
||||
void LayerNormGrad::set_begin_params_axis(const int64_t begin_params_axis) {
|
||||
this->AddAttr(kBeginParamsAxis, MakeValue(begin_params_axis));
|
||||
(void)this->AddAttr(kBeginParamsAxis, MakeValue(begin_params_axis));
|
||||
}
|
||||
int64_t LayerNormGrad::get_begin_norm_axis() const {
|
||||
auto value_ptr = this->GetAttr(kBeginNormAxis);
|
||||
|
|
|
@ -24,9 +24,9 @@ void MaximumGrad::Init(const bool grad_x, const bool grad_y) {
|
|||
set_grad_y(grad_y);
|
||||
}
|
||||
|
||||
void MaximumGrad::set_grad_x(const bool grad_x) { this->AddAttr(kGradX, MakeValue(grad_x)); }
|
||||
void MaximumGrad::set_grad_x(const bool grad_x) { (void)this->AddAttr(kGradX, MakeValue(grad_x)); }
|
||||
|
||||
void MaximumGrad::set_grad_y(const bool grad_y) { this->AddAttr(kGradY, MakeValue(grad_y)); }
|
||||
void MaximumGrad::set_grad_y(const bool grad_y) { (void)this->AddAttr(kGradY, MakeValue(grad_y)); }
|
||||
|
||||
bool MaximumGrad::get_grad_x() const {
|
||||
auto value_ptr = GetAttr(kGradX);
|
||||
|
|
|
@ -24,9 +24,9 @@ void MinimumGrad::Init(const bool grad_x, const bool grad_y) {
|
|||
set_grad_y(grad_y);
|
||||
}
|
||||
|
||||
void MinimumGrad::set_grad_x(const bool grad_x) { this->AddAttr(kGradX, MakeValue(grad_x)); }
|
||||
void MinimumGrad::set_grad_x(const bool grad_x) { (void)this->AddAttr(kGradX, MakeValue(grad_x)); }
|
||||
|
||||
void MinimumGrad::set_grad_y(const bool grad_y) { this->AddAttr(kGradY, MakeValue(grad_y)); }
|
||||
void MinimumGrad::set_grad_y(const bool grad_y) { (void)this->AddAttr(kGradY, MakeValue(grad_y)); }
|
||||
|
||||
bool MinimumGrad::get_grad_x() const {
|
||||
auto value_ptr = GetAttr(kGradX);
|
||||
|
|
|
@ -43,14 +43,14 @@ PoolMode PoolingGrad::get_pool_mode() const {
|
|||
return PoolMode(GetValue<int64_t>(value_ptr));
|
||||
}
|
||||
|
||||
void PoolingGrad::set_window(const std::vector<int64_t> &window) { this->AddAttr(kWindow, MakeValue(window)); }
|
||||
void PoolingGrad::set_window(const std::vector<int64_t> &window) { (void)this->AddAttr(kWindow, MakeValue(window)); }
|
||||
|
||||
std::vector<int64_t> PoolingGrad::get_window() const {
|
||||
auto value_ptr = GetAttr(kWindow);
|
||||
return GetValue<std::vector<int64_t>>(value_ptr);
|
||||
}
|
||||
|
||||
void PoolingGrad::set_stride(const std::vector<int64_t> &stride) { this->AddAttr(kStride, MakeValue(stride)); }
|
||||
void PoolingGrad::set_stride(const std::vector<int64_t> &stride) { (void)this->AddAttr(kStride, MakeValue(stride)); }
|
||||
|
||||
std::vector<int64_t> PoolingGrad::get_stride() const {
|
||||
auto value_ptr = GetAttr(kStride);
|
||||
|
@ -94,7 +94,7 @@ Format PoolingGrad::get_format() const {
|
|||
return Format(GetValue<int64_t>(value_ptr));
|
||||
}
|
||||
|
||||
void PoolingGrad::set_global(const bool global) { this->AddAttr(kGlobal, MakeValue(global)); }
|
||||
void PoolingGrad::set_global(const bool global) { (void)this->AddAttr(kGlobal, MakeValue(global)); }
|
||||
|
||||
bool PoolingGrad::get_global() const {
|
||||
auto value_ptr = GetAttr(kGlobal);
|
||||
|
|
|
@ -26,19 +26,19 @@
|
|||
|
||||
namespace mindspore {
|
||||
namespace ops {
|
||||
void PowerGrad::set_power(const float power) { this->AddAttr(kPower, MakeValue(power)); }
|
||||
void PowerGrad::set_power(const float power) { (void)this->AddAttr(kPower, MakeValue(power)); }
|
||||
float PowerGrad::get_power() const {
|
||||
auto value_ptr = GetAttr(kPower);
|
||||
return GetValue<float>(value_ptr);
|
||||
}
|
||||
|
||||
void PowerGrad::set_scale(const float scale) { this->AddAttr(kScale, MakeValue(scale)); }
|
||||
void PowerGrad::set_scale(const float scale) { (void)this->AddAttr(kScale, MakeValue(scale)); }
|
||||
float PowerGrad::get_scale() const {
|
||||
auto value_ptr = GetAttr(kScale);
|
||||
return GetValue<float>(value_ptr);
|
||||
}
|
||||
|
||||
void PowerGrad::set_shift(const float shift) { this->AddAttr(kShift, MakeValue(shift)); }
|
||||
void PowerGrad::set_shift(const float shift) { (void)this->AddAttr(kShift, MakeValue(shift)); }
|
||||
float PowerGrad::get_shift() const {
|
||||
auto value_ptr = GetAttr(kShift);
|
||||
return GetValue<float>(value_ptr);
|
||||
|
|
|
@ -32,10 +32,12 @@ void ResizeGrad::Init(const ResizeMethod method, const bool align_corners) {
|
|||
|
||||
void ResizeGrad::set_method(const ResizeMethod method) {
|
||||
auto swi = (int64_t)method;
|
||||
this->AddAttr(kMethod, MakeValue(swi));
|
||||
(void)this->AddAttr(kMethod, MakeValue(swi));
|
||||
}
|
||||
|
||||
void ResizeGrad::set_align_corners(const bool align_corners) { this->AddAttr(kAlignCorners, MakeValue(align_corners)); }
|
||||
void ResizeGrad::set_align_corners(const bool align_corners) {
|
||||
(void)this->AddAttr(kAlignCorners, MakeValue(align_corners));
|
||||
}
|
||||
|
||||
ResizeMethod ResizeGrad::get_method() const {
|
||||
auto value_ptr = GetAttr(kMethod);
|
||||
|
|
|
@ -31,15 +31,15 @@ AbstractBasePtr SigmoidCrossEntropyWithLogitsGradInfer(const abstract::AnalysisE
|
|||
const std::vector<AbstractBasePtr> &input_args) {
|
||||
MS_EXCEPTION_IF_NULL(primitive);
|
||||
auto prim_name = primitive->name();
|
||||
CheckAndConvertUtils::CheckInteger("sigmoid_cross_entropy_with_logits_grad_infer", SizeToLong(input_args.size()),
|
||||
kEqual, 3, prim_name);
|
||||
(void)CheckAndConvertUtils::CheckInteger("sigmoid_cross_entropy_with_logits_grad_infer",
|
||||
SizeToLong(input_args.size()), kEqual, 3, prim_name);
|
||||
|
||||
// Infer Shape
|
||||
auto x_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[0]->BuildShape())[kShape];
|
||||
auto y_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[1]->BuildShape())[kShape];
|
||||
auto dout_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[2]->BuildShape())[kShape];
|
||||
CheckAndConvertUtils::Check("x_shape", x_shape, kEqual, "y_shape", y_shape, prim_name, TypeError);
|
||||
CheckAndConvertUtils::Check("x_shape", x_shape, kEqual, "dout_shape", dout_shape, prim_name, TypeError);
|
||||
(void)CheckAndConvertUtils::Check("x_shape", x_shape, kEqual, "y_shape", y_shape, prim_name, TypeError);
|
||||
(void)CheckAndConvertUtils::Check("x_shape", x_shape, kEqual, "dout_shape", dout_shape, prim_name, TypeError);
|
||||
|
||||
// Infer type
|
||||
const std::set<TypePtr> valid_types = {kBool, kInt, kInt8, kInt16, kInt32, kInt64, kUInt, kUInt8,
|
||||
|
|
|
@ -26,7 +26,7 @@ namespace mindspore {
|
|||
namespace ops {
|
||||
void SmoothL1LossGrad::Init(const float beta) { this->set_beta(beta); }
|
||||
|
||||
void SmoothL1LossGrad::set_beta(const float beta) { this->AddAttr(kBeta, MakeValue(beta)); }
|
||||
void SmoothL1LossGrad::set_beta(const float beta) { (void)this->AddAttr(kBeta, MakeValue(beta)); }
|
||||
|
||||
float SmoothL1LossGrad::get_beta() const {
|
||||
auto value_ptr = this->GetAttr(kBeta);
|
||||
|
@ -37,7 +37,8 @@ AbstractBasePtr SmoothL1LossGradInfer(const abstract::AnalysisEnginePtr &, const
|
|||
const std::vector<AbstractBasePtr> &input_args) {
|
||||
MS_EXCEPTION_IF_NULL(primitive);
|
||||
auto prim_name = primitive->name();
|
||||
CheckAndConvertUtils::CheckInteger("smooth_l1_loss_grad_infer", input_args.size(), kEqual, 3, prim_name);
|
||||
(void)CheckAndConvertUtils::CheckInteger("smooth_l1_loss_grad_infer", SizeToLong(input_args.size()), kEqual, 3,
|
||||
prim_name);
|
||||
|
||||
// Infer shape
|
||||
auto prediction = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[0]->BuildShape())[kShape];
|
||||
|
|
|
@ -33,46 +33,46 @@ void StridedSliceGrad::Init(int64_t begin_mask, int64_t end_mask, int64_t ellips
|
|||
}
|
||||
|
||||
void StridedSliceGrad::set_begin_mask(int64_t begin_mask) {
|
||||
CheckAndConvertUtils::CheckInteger(kBeginMask, begin_mask, kGreaterEqual, 0, this->name());
|
||||
this->AddAttr(kBeginMask, MakeValue(begin_mask));
|
||||
(void)CheckAndConvertUtils::CheckInteger(kBeginMask, begin_mask, kGreaterEqual, 0, this->name());
|
||||
(void)this->AddAttr(kBeginMask, MakeValue(begin_mask));
|
||||
}
|
||||
int64_t StridedSliceGrad::get_begin_mask() const {
|
||||
auto value_ptr = GetAttr(kBeginMask);
|
||||
return GetValue<int64_t>(value_ptr);
|
||||
}
|
||||
void StridedSliceGrad::set_end_mask(int64_t end_mask) {
|
||||
CheckAndConvertUtils::CheckInteger(kEndMask, end_mask, kGreaterEqual, 0, this->name());
|
||||
this->AddAttr(kEndMask, MakeValue(end_mask));
|
||||
(void)CheckAndConvertUtils::CheckInteger(kEndMask, end_mask, kGreaterEqual, 0, this->name());
|
||||
(void)this->AddAttr(kEndMask, MakeValue(end_mask));
|
||||
}
|
||||
int64_t StridedSliceGrad::get_end_mask() const {
|
||||
auto value_ptr = GetAttr(kEndMask);
|
||||
return GetValue<int64_t>(value_ptr);
|
||||
}
|
||||
void StridedSliceGrad::set_ellipsis_mask(int64_t ellipsis_mask) {
|
||||
CheckAndConvertUtils::CheckInteger(kEllipsisMask, ellipsis_mask, kGreaterEqual, 0, this->name());
|
||||
(void)CheckAndConvertUtils::CheckInteger(kEllipsisMask, ellipsis_mask, kGreaterEqual, 0, this->name());
|
||||
std::bitset<sizeof(int64_t) * 8> bs(ellipsis_mask);
|
||||
std::ostringstream buffer;
|
||||
if (bs.count() > 1) {
|
||||
buffer << "For" << this->name() << ", only support one ellipsis in the index, but got " << this->get_end_mask();
|
||||
MS_EXCEPTION(ValueError) << buffer.str();
|
||||
}
|
||||
this->AddAttr(kEllipsisMask, MakeValue(ellipsis_mask));
|
||||
(void)this->AddAttr(kEllipsisMask, MakeValue(ellipsis_mask));
|
||||
}
|
||||
int64_t StridedSliceGrad::get_ellipsis_mask() const {
|
||||
auto value_ptr = GetAttr(kEllipsisMask);
|
||||
return GetValue<int64_t>(value_ptr);
|
||||
}
|
||||
void StridedSliceGrad::set_new_axis_mask(int64_t new_axis_mask) {
|
||||
CheckAndConvertUtils::CheckInteger(kNewAxisMask, new_axis_mask, kGreaterEqual, 0, this->name());
|
||||
this->AddAttr(kNewAxisMask, MakeValue(new_axis_mask));
|
||||
(void)CheckAndConvertUtils::CheckInteger(kNewAxisMask, new_axis_mask, kGreaterEqual, 0, this->name());
|
||||
(void)this->AddAttr(kNewAxisMask, MakeValue(new_axis_mask));
|
||||
}
|
||||
int64_t StridedSliceGrad::get_new_axis_mask() const {
|
||||
auto value_ptr = GetAttr(kNewAxisMask);
|
||||
return GetValue<int64_t>(value_ptr);
|
||||
}
|
||||
void StridedSliceGrad::set_shrink_axis_mask(int64_t shrink_axis_mask) {
|
||||
CheckAndConvertUtils::CheckInteger(kShrinkAxisMask, shrink_axis_mask, kGreaterEqual, 0, this->name());
|
||||
this->AddAttr(kShrinkAxisMask, MakeValue(shrink_axis_mask));
|
||||
(void)CheckAndConvertUtils::CheckInteger(kShrinkAxisMask, shrink_axis_mask, kGreaterEqual, 0, this->name());
|
||||
(void)this->AddAttr(kShrinkAxisMask, MakeValue(shrink_axis_mask));
|
||||
}
|
||||
int64_t StridedSliceGrad::get_shrink_axis_mask() const {
|
||||
auto value_ptr = GetAttr(kShrinkAxisMask);
|
||||
|
|
|
@ -33,33 +33,33 @@ void GRU::Init(bool bidirectional, int64_t cell_depth, float keep_prob, float ce
|
|||
this->set_gate_order(gate_order);
|
||||
}
|
||||
|
||||
void GRU::set_bidirectional(bool bidirectional) { AddAttr(kBidirectional, MakeValue(bidirectional)); }
|
||||
void GRU::set_bidirectional(bool bidirectional) { (void)AddAttr(kBidirectional, MakeValue(bidirectional)); }
|
||||
|
||||
void GRU::set_cell_depth(int64_t cell_depth) { AddAttr(kCellDepth, MakeValue(cell_depth)); }
|
||||
void GRU::set_cell_depth(int64_t cell_depth) { (void)AddAttr(kCellDepth, MakeValue(cell_depth)); }
|
||||
|
||||
void GRU::set_keep_prob(float keep_prob) { AddAttr(kKeepProb, MakeValue(keep_prob)); }
|
||||
void GRU::set_keep_prob(float keep_prob) { (void)AddAttr(kKeepProb, MakeValue(keep_prob)); }
|
||||
|
||||
void GRU::set_cell_clip(float cell_clip) { AddAttr(kCellClip, MakeValue(cell_clip)); }
|
||||
void GRU::set_cell_clip(float cell_clip) { (void)AddAttr(kCellClip, MakeValue(cell_clip)); }
|
||||
|
||||
void GRU::set_num_proj(int64_t num_proj) {
|
||||
CheckAndConvertUtils::CheckInteger(kNumProj, num_proj, kGreaterThan, 0, this->name());
|
||||
AddAttr(kNumProj, MakeValue(num_proj));
|
||||
(void)CheckAndConvertUtils::CheckInteger(kNumProj, num_proj, kGreaterThan, 0, this->name());
|
||||
(void)AddAttr(kNumProj, MakeValue(num_proj));
|
||||
}
|
||||
|
||||
void GRU::set_time_major(bool time_major) { AddAttr(kTimeMajor, MakeValue(time_major)); }
|
||||
void GRU::set_time_major(bool time_major) { (void)AddAttr(kTimeMajor, MakeValue(time_major)); }
|
||||
|
||||
void GRU::set_reset_after(bool reset_after) { AddAttr(kResetAfter, MakeValue(reset_after)); }
|
||||
void GRU::set_reset_after(bool reset_after) { (void)AddAttr(kResetAfter, MakeValue(reset_after)); }
|
||||
|
||||
void GRU::set_is_training(bool is_training) { AddAttr(kIsTraining, MakeValue(is_training)); }
|
||||
void GRU::set_is_training(bool is_training) { (void)AddAttr(kIsTraining, MakeValue(is_training)); }
|
||||
|
||||
void GRU::set_activation(ActivationType activation) {
|
||||
int64_t swi = activation;
|
||||
AddAttr(kActivation, MakeValue(swi));
|
||||
(void)AddAttr(kActivation, MakeValue(swi));
|
||||
}
|
||||
|
||||
void GRU::set_gate_order(GateOrderMode gate_order) {
|
||||
int64_t swi = gate_order;
|
||||
AddAttr(kGateOrder, MakeValue(swi));
|
||||
(void)AddAttr(kGateOrder, MakeValue(swi));
|
||||
}
|
||||
|
||||
bool GRU::get_bidirectional() const {
|
||||
|
|
|
@ -99,13 +99,15 @@ AbstractBasePtr LayerNormInfer(const abstract::AnalysisEnginePtr &, const Primit
|
|||
auto input_min_shape = input_shape->min_shape();
|
||||
auto input_max_shape = input_shape->max_shape();
|
||||
if (input_min_shape.empty() || input_max_shape.empty()) {
|
||||
shapes_list.emplace_back(std::make_shared<abstract::Shape>(mean_var_shape));
|
||||
shapes_list.emplace_back(std::make_shared<abstract::Shape>(mean_var_shape));
|
||||
(void)shapes_list.emplace_back(std::make_shared<abstract::Shape>(mean_var_shape));
|
||||
(void)shapes_list.emplace_back(std::make_shared<abstract::Shape>(mean_var_shape));
|
||||
} else {
|
||||
auto mean_var_shape_min = CalLayerNormMeanAndVarShape(begin_norm_axis, input_min_shape);
|
||||
auto mean_var_shape_max = CalLayerNormMeanAndVarShape(begin_norm_axis, input_min_shape);
|
||||
shapes_list.emplace_back(std::make_shared<abstract::Shape>(mean_var_shape, mean_var_shape_min, mean_var_shape_max));
|
||||
shapes_list.emplace_back(std::make_shared<abstract::Shape>(mean_var_shape, mean_var_shape_min, mean_var_shape_max));
|
||||
(void)shapes_list.emplace_back(
|
||||
std::make_shared<abstract::Shape>(mean_var_shape, mean_var_shape_min, mean_var_shape_max));
|
||||
(void)shapes_list.emplace_back(
|
||||
std::make_shared<abstract::Shape>(mean_var_shape, mean_var_shape_min, mean_var_shape_max));
|
||||
}
|
||||
return abstract::MakeAbstract(std::make_shared<abstract::TupleShape>(shapes_list),
|
||||
std::make_shared<Tuple>(types_list));
|
||||
|
|
|
@ -29,7 +29,7 @@ abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector<A
|
|||
|
||||
TypePtr InferType(const PrimitivePtr &prim, const std::vector<AbstractBasePtr> &input_args) {
|
||||
MS_EXCEPTION_IF_NULL(prim);
|
||||
CheckAndConvertUtils::CheckInteger("input number", input_args.size(), kEqual, 1, prim->name());
|
||||
(void)CheckAndConvertUtils::CheckInteger("input number", SizeToLong(input_args.size()), kEqual, 1, prim->name());
|
||||
for (const auto &item : input_args) {
|
||||
MS_EXCEPTION_IF_NULL(item);
|
||||
}
|
||||
|
@ -48,7 +48,7 @@ float LeakyRelu::get_negative_slope() const { return GetValue<float>(GetAttr(kNe
|
|||
AbstractBasePtr LeakyReluInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive,
|
||||
const std::vector<AbstractBasePtr> &input_args) {
|
||||
return std::make_shared<abstract::AbstractTensor>(InferType(primitive, input_args),
|
||||
InferShape(primitive, input_args)->shape());
|
||||
InferShape(primitive, input_args));
|
||||
}
|
||||
REGISTER_PRIMITIVE_C(kNameLeakyRelu, LeakyRelu);
|
||||
} // namespace ops
|
||||
|
|
|
@ -30,7 +30,7 @@ namespace {
|
|||
abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector<AbstractBasePtr> &input_args) {
|
||||
MS_EXCEPTION_IF_NULL(primitive);
|
||||
auto prim_name = primitive->name();
|
||||
CheckAndConvertUtils::CheckInteger("input numbers", input_args.size(), kEqual, 1, prim_name);
|
||||
(void)CheckAndConvertUtils::CheckInteger("input numbers", input_args.size(), kEqual, 1, prim_name);
|
||||
for (const auto &item : input_args) {
|
||||
MS_EXCEPTION_IF_NULL(item);
|
||||
}
|
||||
|
@ -47,7 +47,7 @@ abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector<A
|
|||
TypePtr InferType(const PrimitivePtr &prim, const std::vector<AbstractBasePtr> &input_args) {
|
||||
MS_EXCEPTION_IF_NULL(prim);
|
||||
auto op_name = prim->name();
|
||||
CheckAndConvertUtils::CheckInteger("input number", input_args.size(), kEqual, 1, op_name);
|
||||
(void)CheckAndConvertUtils::CheckInteger("input number", input_args.size(), kEqual, 1, op_name);
|
||||
for (const auto &item : input_args) {
|
||||
MS_EXCEPTION_IF_NULL(item);
|
||||
}
|
||||
|
|
|
@ -14,9 +14,10 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "ops/logical_not.h"
|
||||
|
||||
#include <set>
|
||||
|
||||
#include "ops/logical_not.h"
|
||||
#include "ops/op_utils.h"
|
||||
|
||||
namespace mindspore {
|
||||
|
|
|
@ -27,7 +27,7 @@
|
|||
namespace mindspore {
|
||||
namespace ops {
|
||||
void LRN::set_depth_radius(const int64_t depth_radius) {
|
||||
CheckAndConvertUtils::CheckInteger(kDepthRadius, depth_radius, kGreaterEqual, 0, this->name());
|
||||
(void)CheckAndConvertUtils::CheckInteger(kDepthRadius, depth_radius, kGreaterEqual, 0, this->name());
|
||||
this->AddAttr(kDepthRadius, MakeValue(depth_radius));
|
||||
}
|
||||
|
||||
|
@ -79,7 +79,8 @@ abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector<A
|
|||
MS_EXCEPTION_IF_NULL(primitive);
|
||||
auto prim_name = primitive->name();
|
||||
auto in_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[0]->BuildShape())[kShape];
|
||||
CheckAndConvertUtils::CheckInteger("input shape", in_shape.size(), kEqual, 4, prim_name);
|
||||
(void)CheckAndConvertUtils::CheckInteger("input shape", SizeToLong(in_shape.size()), kEqual, 4, prim_name);
|
||||
|
||||
return std::make_shared<abstract::Shape>(in_shape);
|
||||
}
|
||||
|
||||
|
|
|
@ -34,14 +34,14 @@ AbstractBasePtr LshProjectionInfer(const abstract::AnalysisEnginePtr &, const Pr
|
|||
auto op_name = primitive->name();
|
||||
auto input0 = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[0]->BuildShape())[kShape];
|
||||
auto input1 = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[1]->BuildShape())[kShape];
|
||||
CheckAndConvertUtils::CheckInteger("input0_shape", SizeToLong(input0.size()), kEqual, 2, op_name);
|
||||
CheckAndConvertUtils::CheckInteger("input0_shape_dimen_1", input0[1], kLessEqual, 32, op_name);
|
||||
CheckAndConvertUtils::CheckInteger("input1_shape", SizeToLong(input1.size()), kGreaterEqual, 1, op_name);
|
||||
(void)CheckAndConvertUtils::CheckInteger("input0_shape", SizeToLong(input0.size()), kEqual, 2, op_name);
|
||||
(void)CheckAndConvertUtils::CheckInteger("input0_shape_dimen_1", input0[1], kLessEqual, 32, op_name);
|
||||
(void)CheckAndConvertUtils::CheckInteger("input1_shape", SizeToLong(input1.size()), kGreaterEqual, 1, op_name);
|
||||
|
||||
if (input_args.size() == 3) {
|
||||
auto input2 = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[2]->BuildShape())[kShape];
|
||||
CheckAndConvertUtils::CheckInteger("input2_shape", SizeToLong(input2.size()), kEqual, 1, op_name);
|
||||
CheckAndConvertUtils::CheckInteger("input2_shape_dimen_0", input2[0], kEqual, input1[0], op_name);
|
||||
(void)CheckAndConvertUtils::CheckInteger("input2_shape", SizeToLong(input2.size()), kEqual, 1, op_name);
|
||||
(void)CheckAndConvertUtils::CheckInteger("input2_shape_dimen_0", input2[0], kEqual, input1[0], op_name);
|
||||
}
|
||||
|
||||
std::vector<int64_t> out_shape;
|
||||
|
|
|
@ -31,7 +31,7 @@ AbstractBasePtr LstmInfer(const PrimitivePtr &primitive, const std::vector<Abstr
|
|||
// infer shape
|
||||
MS_EXCEPTION_IF_NULL(primitive);
|
||||
auto prim_name = primitive->name();
|
||||
CheckAndConvertUtils::CheckInteger("lstm_prim_infer", SizeToLong(input_args.size()), kEqual, 4, prim_name);
|
||||
(void)CheckAndConvertUtils::CheckInteger("lstm_prim_infer", SizeToLong(input_args.size()), kEqual, 4, prim_name);
|
||||
auto x_input_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[0]->BuildShape())[kShape];
|
||||
auto h_input_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[1]->BuildShape())[kShape];
|
||||
auto c_input_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[2]->BuildShape())[kShape];
|
||||
|
@ -47,9 +47,10 @@ AbstractBasePtr LstmInfer(const PrimitivePtr &primitive, const std::vector<Abstr
|
|||
int64_t num_directions = GetValue<int64_t>(primitive->GetAttr(kNumDirections));
|
||||
int64_t hidden_size = GetValue<int64_t>(primitive->GetAttr(kHidden_size));
|
||||
int64_t input_size = input_x_size;
|
||||
CheckAndConvertUtils::CheckInteger("h_shape[0]", h_input_shape[0], kEqual, num_layers * num_directions, prim_name);
|
||||
CheckAndConvertUtils::CheckInteger("h_shape[1]", h_input_shape[1], kEqual, x_input_shape[1], prim_name);
|
||||
CheckAndConvertUtils::CheckInteger("h_shape[2]", h_input_shape[2], kEqual, hidden_size, prim_name);
|
||||
(void)CheckAndConvertUtils::CheckInteger("h_shape[0]", h_input_shape[0], kEqual, num_layers * num_directions,
|
||||
prim_name);
|
||||
(void)CheckAndConvertUtils::CheckInteger("h_shape[1]", h_input_shape[1], kEqual, x_input_shape[1], prim_name);
|
||||
(void)CheckAndConvertUtils::CheckInteger("h_shape[2]", h_input_shape[2], kEqual, hidden_size, prim_name);
|
||||
|
||||
std::vector<int64_t> y_shape = {x_input_shape[0], x_input_shape[1], hidden_size * num_directions};
|
||||
|
||||
|
@ -74,7 +75,7 @@ AbstractBasePtr LstmInfer(const PrimitivePtr &primitive, const std::vector<Abstr
|
|||
current_offset += ws_diff_states_size;
|
||||
current_offset = ((current_offset / page_size - 1) / page_size) * page_size;
|
||||
std::vector<int64_t> x_shape = {x_input_shape};
|
||||
// std::vector<int64_t> h_shape = {h_input_shape};
|
||||
|
||||
std::vector<int64_t> c_shape = {c_input_shape};
|
||||
std::vector<int64_t> reverse_shape = {current_offset, 1};
|
||||
std::vector<int64_t> state_shape = {1, 1};
|
||||
|
|
|
@ -63,8 +63,8 @@ abstract::ShapePtr MatMulInferShape(const PrimitivePtr &primitive, const std::ve
|
|||
ShapeVector x_max_shape = x_shape_map[kMaxShape];
|
||||
ShapeVector y_min_shape = y_shape_map[kMinShape];
|
||||
ShapeVector y_max_shape = y_shape_map[kMaxShape];
|
||||
(void)CheckAndConvertUtils::CheckMinMaxShape(x_shp, &x_min_shape, &x_max_shape);
|
||||
(void)CheckAndConvertUtils::CheckMinMaxShape(y_shp, &y_min_shape, &y_max_shape);
|
||||
CheckAndConvertUtils::CheckMinMaxShape(x_shp, &x_min_shape, &x_max_shape);
|
||||
CheckAndConvertUtils::CheckMinMaxShape(y_shp, &y_min_shape, &y_max_shape);
|
||||
// Additional check for dynamic shape
|
||||
// Last infer will be real shape values
|
||||
bool x_not_dyn =
|
||||
|
@ -98,8 +98,8 @@ TypePtr MatMulInferType(const PrimitivePtr &prim, const std::vector<AbstractBase
|
|||
MS_EXCEPTION_IF_NULL(prim);
|
||||
const std::set<TypePtr> valid_types = {kInt8, kInt16, kInt32, kInt64, kFloat16, kFloat32, kFloat64};
|
||||
std::map<std::string, TypePtr> types;
|
||||
(void)types.emplace("x", input_args[0]->BuildType());
|
||||
(void)types.emplace("w", input_args[1]->BuildType());
|
||||
types.emplace("x", input_args[0]->BuildType());
|
||||
types.emplace("w", input_args[1]->BuildType());
|
||||
return CheckAndConvertUtils::CheckTensorTypeSame(types, valid_types, prim->name());
|
||||
}
|
||||
} // namespace
|
||||
|
@ -125,7 +125,8 @@ bool MatMul::get_transpose_b() const {
|
|||
|
||||
AbstractBasePtr MatMulInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive,
|
||||
const std::vector<AbstractBasePtr> &input_args) {
|
||||
CheckAndConvertUtils::CheckInteger("MatMul infer", input_args.size(), kGreaterEqual, 2, primitive->name());
|
||||
(void)CheckAndConvertUtils::CheckInteger("MatMul infer", SizeToLong(input_args.size()), kGreaterEqual, 2,
|
||||
primitive->name());
|
||||
return abstract::MakeAbstract(MatMulInferShape(primitive, input_args), MatMulInferType(primitive, input_args));
|
||||
}
|
||||
// Add
|
||||
|
|
|
@ -87,7 +87,8 @@ abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector<A
|
|||
if (format == NHWC) {
|
||||
in_shape = {in_shape[0], in_shape[3], in_shape[1], in_shape[2]};
|
||||
}
|
||||
CheckAndConvertUtils::CheckInteger("x_rank", in_shape.size(), kEqual, 4, op_name);
|
||||
(void)CheckAndConvertUtils::CheckInteger("x_rank", SizeToLong(in_shape.size()), kEqual, 4, op_name);
|
||||
|
||||
auto kernel_size = GetValue<std::vector<int64_t>>(primitive->GetAttr(kKernelSize));
|
||||
auto pad_mode_value = (primitive->GetAttr(kPadMode));
|
||||
auto pad_mode = PadMode(GetValue<int64_t>(pad_mode_value));
|
||||
|
|
|
@ -36,7 +36,7 @@ AbstractBasePtr MergeInfer(const abstract::AnalysisEnginePtr &, const PrimitiveP
|
|||
args.insert({"input[" + std::to_string(i) + "]", inputs_type[i]});
|
||||
}
|
||||
std::set<TypePtr> template_type = common_valid_types;
|
||||
(void)template_type.emplace(kBool);
|
||||
template_type.emplace(kBool);
|
||||
auto infered_type = CheckAndConvertUtils::CheckScalarOrTensorTypesSame(args, template_type, op_name);
|
||||
std::vector<int64_t> in_shape0 = inputs_shape[0]->cast<abstract::ShapePtr>()->shape();
|
||||
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue