forked from mindspore-Ecosystem/mindspore
clear the alarm information of master branch
This commit is contained in:
parent
db4669f3d1
commit
3894c4d3d7
|
@ -80,9 +80,9 @@ AbstractBasePtr ApplyMomentumInfer(const abstract::AnalysisEnginePtr &, const Pr
|
|||
(void)CheckAndConvertUtils::CheckTensorTypeValid("v_type", v_tensor_type, valid_types, prim_name);
|
||||
(void)CheckAndConvertUtils::CheckTensorTypeValid("a_type", a_tensor_type, valid_types, prim_name);
|
||||
std::map<std::string, TypePtr> args;
|
||||
args.insert(std::make_pair("l_type", l_type));
|
||||
args.insert(std::make_pair("g_type", g_type));
|
||||
args.insert(std::make_pair("m_type", m_type));
|
||||
(void)args.insert(std::make_pair("l_type", l_type));
|
||||
(void)args.insert(std::make_pair("g_type", g_type));
|
||||
(void)args.insert(std::make_pair("m_type", m_type));
|
||||
CheckAndConvertUtils::CheckScalarOrTensorTypesSame(args, valid_types, prim_name);
|
||||
auto g_type_tensor = g_type->cast<TensorTypePtr>();
|
||||
auto element = g_type_tensor->element();
|
||||
|
|
|
@ -78,7 +78,7 @@ int64_t Log2Ceil(int64_t length) {
|
|||
int64_t floor = 0;
|
||||
for (int64_t i = 4; i >= 0; --i) {
|
||||
const int64_t shift = static_cast<int64_t>(1UL << static_cast<unsigned>(i));
|
||||
int64_t tmp = SizeToLong(length >> shift);
|
||||
int64_t tmp = SizeToLong(static_cast<uint64_t>(length) >> static_cast<uint64_t>(shift));
|
||||
if (tmp != 0) {
|
||||
length = tmp;
|
||||
floor += shift;
|
||||
|
|
|
@ -111,11 +111,11 @@ abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector<A
|
|||
int64_t out_h = abstract::Shape::SHP_ANY;
|
||||
int64_t out_w = abstract::Shape::SHP_ANY;
|
||||
if (pad_mode == VALID) {
|
||||
out_h = static_cast<int64_t>(ceil((in_h - (kernel_h - 1)) / stride_h));
|
||||
out_w = static_cast<int64_t>(ceil((in_w - (kernel_w - 1)) / stride_w));
|
||||
out_h = static_cast<int64_t>(std::ceil((in_h - (kernel_h - 1)) / static_cast<float>(stride_h)));
|
||||
out_w = static_cast<int64_t>(std::ceil((in_w - (kernel_w - 1)) / static_cast<float>(stride_w)));
|
||||
} else if (pad_mode == SAME) {
|
||||
out_h = static_cast<int64_t>(ceil(in_h / stride_h));
|
||||
out_w = static_cast<int64_t>(ceil(in_w / stride_w));
|
||||
out_h = static_cast<int64_t>(std::ceil(in_h / static_cast<float>(stride_h)));
|
||||
out_w = static_cast<int64_t>(std::ceil(in_w / static_cast<float>(stride_w)));
|
||||
}
|
||||
std::vector<int64_t> out_shape = {batch, channel, out_h, out_w};
|
||||
if (format == NHWC) {
|
||||
|
|
|
@ -26,7 +26,7 @@
|
|||
namespace mindspore {
|
||||
namespace ops {
|
||||
namespace {
|
||||
constexpr size_t k5DInputDims = 5;
|
||||
constexpr int64_t k5DInputDims = 5;
|
||||
constexpr size_t kKernelDims = 3;
|
||||
constexpr size_t kStridesDims = 3;
|
||||
constexpr size_t kPadDims = 6;
|
||||
|
|
|
@ -49,8 +49,8 @@ TypePtr InferType(const PrimitivePtr &primitive, const std::vector<AbstractBaseP
|
|||
}
|
||||
const std::set<TypePtr> valid_types = {kFloat32, kFloat16};
|
||||
std::map<std::string, TypePtr> types;
|
||||
types.emplace("input_x", input_args[0]->BuildType());
|
||||
types.emplace("input_y", input_args[1]->BuildType());
|
||||
(void)types.emplace("input_x", input_args[0]->BuildType());
|
||||
(void)types.emplace("input_y", input_args[1]->BuildType());
|
||||
return CheckAndConvertUtils::CheckTensorTypeSame(types, valid_types, primitive->name());
|
||||
}
|
||||
} // namespace
|
||||
|
|
|
@ -77,13 +77,15 @@ void Conv2DPadFunction(std::vector<int64_t> *output_hw, std::vector<int64_t> *pa
|
|||
int64_t out_h = -1;
|
||||
int64_t out_w = -1;
|
||||
if (x_h != Shape::SHP_ANY) {
|
||||
out_h = static_cast<int64_t>(std::ceil(((x_h * 1.0) - dilation[0] * (kernel[0] - 1)) / stride[0]));
|
||||
out_h =
|
||||
static_cast<int64_t>(std::ceil(((x_h * 1.0) - static_cast<float>(dilation[0] * (kernel[0] - 1))) / stride[0]));
|
||||
if (is_min_shape && out_h < 1) {
|
||||
out_h = 1L;
|
||||
}
|
||||
}
|
||||
if (x_w != Shape::SHP_ANY) {
|
||||
out_w = static_cast<int64_t>(std::ceil(((x_w * 1.0) - dilation[1] * (kernel[1] - 1)) / stride[1]));
|
||||
out_w =
|
||||
static_cast<int64_t>(std::ceil(((x_w * 1.0) - static_cast<float>(dilation[1] * (kernel[1] - 1))) / stride[1]));
|
||||
if (is_min_shape && out_w < 1) {
|
||||
out_w = 1L;
|
||||
}
|
||||
|
@ -120,9 +122,9 @@ void Conv2DPadFunction(std::vector<int64_t> *output_hw, std::vector<int64_t> *pa
|
|||
int64_t out_h = -1;
|
||||
int64_t out_w = -1;
|
||||
if (x_h != Shape::SHP_ANY) {
|
||||
out_h = static_cast<int64_t>(std::floor(
|
||||
1 + ((x_h * 1.0) + pad_list->at(0) + pad_list->at(1) - kernel[0] - (kernel[0] - 1) * (dilation[0] - 1)) /
|
||||
stride[0]));
|
||||
out_h = static_cast<int64_t>(std::floor(1 + ((x_h * 1.0) + pad_list->at(0) + pad_list->at(1) - kernel[0] -
|
||||
static_cast<float>((kernel[0] - 1) * (dilation[0] - 1))) /
|
||||
stride[0]));
|
||||
if (is_min_shape && out_h < 1) {
|
||||
out_h = 1L;
|
||||
}
|
||||
|
@ -130,7 +132,7 @@ void Conv2DPadFunction(std::vector<int64_t> *output_hw, std::vector<int64_t> *pa
|
|||
if (x_w != Shape::SHP_ANY) {
|
||||
out_w =
|
||||
static_cast<int64_t>(std::floor(1 + ((x_w * 1.0) + pad_list->at(kInputIndex2) + pad_list->at(kInputIndex3) -
|
||||
kernel[1] - (kernel[1] - 1) * (dilation[1] - 1)) /
|
||||
kernel[1] - static_cast<float>((kernel[1] - 1) * (dilation[1] - 1))) /
|
||||
stride[1]));
|
||||
if (is_min_shape && out_w < 1) {
|
||||
out_w = 1L;
|
||||
|
|
|
@ -30,7 +30,7 @@ namespace {
|
|||
constexpr size_t kLenLogProbs = 3;
|
||||
constexpr size_t kLenTarget = 2;
|
||||
constexpr int64_t kMulti = 2;
|
||||
constexpr size_t kInputSize = 4;
|
||||
constexpr int64_t kInputSize = 4;
|
||||
abstract::TupleShapePtr CTCLossV2InferShape(const PrimitivePtr &primitive,
|
||||
const std::vector<AbstractBasePtr> &input_args) {
|
||||
MS_EXCEPTION_IF_NULL(primitive);
|
||||
|
|
|
@ -27,7 +27,7 @@ namespace mindspore {
|
|||
namespace ops {
|
||||
namespace {
|
||||
constexpr size_t kLenLogProbs = 3;
|
||||
constexpr size_t kInputSize = 7;
|
||||
constexpr int64_t kInputSize = 7;
|
||||
constexpr size_t kIdx2 = 2;
|
||||
abstract::ShapePtr CTCLossV2GradInferShape(const PrimitivePtr &primitive,
|
||||
const std::vector<AbstractBasePtr> &input_args) {
|
||||
|
|
|
@ -40,13 +40,13 @@ int64_t CheckInputsAndGetShape(const AbstractBasePtr &input_arg, const string &p
|
|||
if (max_shape.empty()) {
|
||||
MS_LOG(EXCEPTION) << prim_name << " input shape is dynamic, but max shape is empty.";
|
||||
}
|
||||
return static_cast<size_t>(max_shape[0]);
|
||||
return max_shape[0];
|
||||
}
|
||||
return static_cast<size_t>(input_shape[0]);
|
||||
return input_shape[0];
|
||||
} else if (input_arg->isa<abstract::AbstractTuple>()) {
|
||||
auto x_shape = dyn_cast<abstract::AbstractTuple>(input_arg);
|
||||
auto x_shape_data = x_shape->elements();
|
||||
return x_shape_data.size();
|
||||
return SizeToLong(x_shape_data.size());
|
||||
} else {
|
||||
MS_EXCEPTION(TypeError) << prim_name << " input must be a tuple or Tensor.";
|
||||
}
|
||||
|
|
|
@ -40,7 +40,7 @@ AbstractBasePtr ExpandDimsInfer(const abstract::AnalysisEnginePtr &, const Primi
|
|||
auto x_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[0]->BuildShape())[kShape];
|
||||
auto dim_val = GetValue<int64_t>(input_args[1]->BuildValue());
|
||||
auto rank = x_shape.size();
|
||||
CheckAndConvertUtils::CheckInRange<int64_t>("axis", dim_val, kIncludeBoth, {-rank - 1, rank}, prim_name);
|
||||
(void)CheckAndConvertUtils::CheckInRange<int64_t>("axis", dim_val, kIncludeBoth, {-rank - 1, rank}, prim_name);
|
||||
if (dim_val < 0) {
|
||||
dim_val += SizeToLong(x_shape.size()) + 1;
|
||||
}
|
||||
|
|
|
@ -27,12 +27,12 @@ abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector<A
|
|||
(void)CheckAndConvertUtils::CheckInteger("input args size", SizeToLong(input_args.size()), kGreaterEqual, 1,
|
||||
prim_name);
|
||||
auto x_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[0]->BuildShape())[kShape];
|
||||
size_t prod = 1;
|
||||
int64_t prod = 1;
|
||||
size_t size = x_shape.size();
|
||||
for (size_t i = 1; i < size; i++) {
|
||||
prod = prod * x_shape[i];
|
||||
}
|
||||
std::vector<int64_t> out_shape = {x_shape[0], SizeToLong(prod)};
|
||||
std::vector<int64_t> out_shape = {x_shape[0], prod};
|
||||
return std::make_shared<abstract::Shape>(out_shape);
|
||||
}
|
||||
|
||||
|
|
|
@ -40,7 +40,7 @@ void Conv2dTransposeFusion::Init(int64_t in_channel, int64_t out_channel, const
|
|||
}
|
||||
|
||||
void Conv2dTransposeFusion::set_kernel_size(const std::vector<int64_t> &kernel_size) {
|
||||
const size_t kernel_len = 2;
|
||||
const int64_t kernel_len = 2;
|
||||
(void)CheckAndConvertUtils::CheckInteger(kKernelSize, SizeToLong(kernel_size.size()), kEqual, kernel_len, name());
|
||||
for (int64_t item : kernel_size) {
|
||||
(void)CheckAndConvertUtils::CheckInteger(kKernelSize, item, kGreaterEqual, 1, name());
|
||||
|
@ -49,7 +49,7 @@ void Conv2dTransposeFusion::set_kernel_size(const std::vector<int64_t> &kernel_s
|
|||
}
|
||||
|
||||
void Conv2dTransposeFusion::set_dilation(const std::vector<int64_t> &dilation) {
|
||||
const size_t dilation_size = 2;
|
||||
const int64_t dilation_size = 2;
|
||||
(void)CheckAndConvertUtils::CheckInteger(kDilation, SizeToLong(dilation.size()), kEqual, dilation_size, name());
|
||||
for (int64_t item : dilation) {
|
||||
(void)CheckAndConvertUtils::CheckInteger(kDilation, item, kGreaterEqual, 1, name());
|
||||
|
|
|
@ -25,7 +25,7 @@
|
|||
namespace mindspore {
|
||||
namespace ops {
|
||||
namespace {
|
||||
constexpr size_t k5DInputDims = 5;
|
||||
constexpr int64_t k5DInputDims = 5;
|
||||
|
||||
abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector<AbstractBasePtr> &input_args) {
|
||||
MS_EXCEPTION_IF_NULL(primitive);
|
||||
|
|
|
@ -50,10 +50,10 @@ TypePtr InferType(const PrimitivePtr &primitive, const std::vector<AbstractBaseP
|
|||
}
|
||||
const std::set<TypePtr> valid_types = {kFloat32, kFloat16};
|
||||
std::map<std::string, TypePtr> types;
|
||||
types.emplace("grad", input_args[0]->BuildType());
|
||||
types.emplace("input_x", input_args[1]->BuildType());
|
||||
types.emplace("input_y", input_args[2]->BuildType());
|
||||
types.emplace("cdist", input_args[3]->BuildType());
|
||||
(void)types.emplace("grad", input_args[0]->BuildType());
|
||||
(void)types.emplace("input_x", input_args[1]->BuildType());
|
||||
(void)types.emplace("input_y", input_args[2]->BuildType());
|
||||
(void)types.emplace("cdist", input_args[3]->BuildType());
|
||||
return CheckAndConvertUtils::CheckTensorTypeSame(types, valid_types, primitive->name());
|
||||
}
|
||||
} // namespace
|
||||
|
|
|
@ -24,9 +24,9 @@
|
|||
namespace mindspore {
|
||||
namespace ops {
|
||||
namespace {
|
||||
constexpr size_t kDoutIndex = 0;
|
||||
constexpr size_t kInputIndex = 1;
|
||||
constexpr size_t kFilterSizeIdex = 2;
|
||||
constexpr int64_t kDoutIndex = 0;
|
||||
constexpr int64_t kInputIndex = 1;
|
||||
constexpr int64_t kFilterSizeIdex = 2;
|
||||
constexpr size_t kStride2dSize = 2;
|
||||
constexpr size_t kStride4dSize = 4;
|
||||
|
||||
|
@ -56,7 +56,6 @@ abstract::ShapePtr Conv2DBackpropFilterInferShape(const PrimitivePtr &primitive,
|
|||
std::vector<int64_t> out_shape;
|
||||
abstract::ShapePtr ret_shape;
|
||||
TransStrideTo4D(primitive, input_args);
|
||||
|
||||
auto filter_size = input_args[kFilterSizeIdex];
|
||||
auto filter_size_v = filter_size->BuildValue();
|
||||
MS_EXCEPTION_IF_NULL(filter_size_v);
|
||||
|
|
|
@ -27,7 +27,7 @@ namespace ops {
|
|||
namespace {
|
||||
constexpr size_t kDoutIndex = 0;
|
||||
constexpr size_t kInputIndex = 1;
|
||||
constexpr size_t kSizeIndex = 2;
|
||||
constexpr int64_t kSizeIndex = 2;
|
||||
|
||||
void SetPadList(const PrimitivePtr &primitive, const std::vector<int64_t> &dout_shape_norm,
|
||||
const std::vector<int64_t> &x_size_v) {
|
||||
|
|
|
@ -24,50 +24,55 @@ namespace {
|
|||
AbstractBasePtr LstmGradInfer(const PrimitivePtr &primitive, const std::vector<AbstractBasePtr> &input_args) {
|
||||
// infer shape
|
||||
MS_EXCEPTION_IF_NULL(primitive);
|
||||
for (const auto &input : input_args) {
|
||||
MS_EXCEPTION_IF_NULL(input);
|
||||
}
|
||||
return nullptr;
|
||||
}
|
||||
} // namespace
|
||||
|
||||
void LSTMGrad::set_input_size(const int64_t input_size) {
|
||||
CheckAndConvertUtils::CheckInteger(kInput_size, input_size, kGreaterThan, 0, this->name());
|
||||
AddAttr(kInput_size, MakeValue(input_size));
|
||||
(void)CheckAndConvertUtils::CheckInteger(kInput_size, input_size, kGreaterThan, 0, this->name());
|
||||
(void)AddAttr(kInput_size, MakeValue(input_size));
|
||||
}
|
||||
int64_t LSTMGrad::get_input_size() const { return GetValue<int64_t>(GetAttr(kInput_size)); }
|
||||
void LSTMGrad::set_hidden_size(const int64_t hidden_size) {
|
||||
CheckAndConvertUtils::CheckInteger(kHidden_size, hidden_size, kGreaterThan, 0, this->name());
|
||||
AddAttr(kHidden_size, MakeValue(hidden_size));
|
||||
(void)CheckAndConvertUtils::CheckInteger(kHidden_size, hidden_size, kGreaterThan, 0, this->name());
|
||||
(void)AddAttr(kHidden_size, MakeValue(hidden_size));
|
||||
}
|
||||
int64_t LSTMGrad::get_hidden_size() const { return GetValue<int64_t>(GetAttr(kHidden_size)); }
|
||||
void LSTMGrad::set_num_layers(const int64_t num_layers) {
|
||||
CheckAndConvertUtils::CheckInteger(kNumLayers, num_layers, kGreaterThan, 0, this->name());
|
||||
AddAttr(kNumLayers, MakeValue(num_layers));
|
||||
(void)CheckAndConvertUtils::CheckInteger(kNumLayers, num_layers, kGreaterThan, 0, this->name());
|
||||
(void)AddAttr(kNumLayers, MakeValue(num_layers));
|
||||
}
|
||||
int64_t LSTMGrad::get_num_layers() const { return GetValue<int64_t>(GetAttr(kNumLayers)); }
|
||||
void LSTMGrad::set_has_bias(const bool has_bias) { AddAttr(kHasBias, MakeValue(has_bias)); }
|
||||
void LSTMGrad::set_has_bias(const bool has_bias) { (void)AddAttr(kHasBias, MakeValue(has_bias)); }
|
||||
bool LSTMGrad::get_has_bias() const {
|
||||
auto value_ptr = this->GetAttr(kHasBias);
|
||||
return GetValue<bool>(value_ptr);
|
||||
}
|
||||
void LSTMGrad::set_dropout(const float dropout) {
|
||||
CheckAndConvertUtils::CheckInRange<float>(kDropout, dropout, kIncludeBoth, {0.0, 1.0}, this->name());
|
||||
AddAttr(kDropout, MakeValue(dropout));
|
||||
(void)CheckAndConvertUtils::CheckInRange<float>(kDropout, dropout, kIncludeBoth, {0.0, 1.0}, this->name());
|
||||
(void)AddAttr(kDropout, MakeValue(dropout));
|
||||
}
|
||||
float LSTMGrad::get_dropout() const {
|
||||
auto value_ptr = this->GetAttr(kDropout);
|
||||
return GetValue<float>(value_ptr);
|
||||
}
|
||||
void LSTMGrad::set_bidirectional(const bool bidirectional) { AddAttr(kBidirectional, MakeValue(bidirectional)); }
|
||||
void LSTMGrad::set_bidirectional(const bool bidirectional) { (void)AddAttr(kBidirectional, MakeValue(bidirectional)); }
|
||||
bool LSTMGrad::get_bidirectional() const {
|
||||
auto value_ptr = this->GetAttr(kBidirectional);
|
||||
return GetValue<bool>(value_ptr);
|
||||
}
|
||||
void LSTMGrad::set_num_directions(const int64_t num_directions) { AddAttr(kNumDirections, MakeValue(num_directions)); }
|
||||
void LSTMGrad::set_num_directions(const int64_t num_directions) {
|
||||
(void)AddAttr(kNumDirections, MakeValue(num_directions));
|
||||
}
|
||||
int64_t LSTMGrad::get_num_directions() const { return GetValue<int64_t>(GetAttr(kNumDirections)); }
|
||||
void LSTMGrad::set_zoneout_cell(float zoneout_cell) { AddAttr(kZoneoutCell, MakeValue(zoneout_cell)); }
|
||||
void LSTMGrad::set_zoneout_cell(float zoneout_cell) { (void)AddAttr(kZoneoutCell, MakeValue(zoneout_cell)); }
|
||||
|
||||
float LSTMGrad::get_zoneout_cell() const { return GetValue<float>(this->GetAttr(kZoneoutCell)); }
|
||||
|
||||
void LSTMGrad::set_zoneout_hidden(float zoneout_hidden) { AddAttr(kZoneoutHidden, MakeValue(zoneout_hidden)); }
|
||||
void LSTMGrad::set_zoneout_hidden(float zoneout_hidden) { (void)AddAttr(kZoneoutHidden, MakeValue(zoneout_hidden)); }
|
||||
|
||||
float LSTMGrad::get_zoneout_hidden() const { return GetValue<float>(this->GetAttr(kZoneoutHidden)); }
|
||||
|
||||
|
|
|
@ -23,7 +23,7 @@
|
|||
namespace mindspore {
|
||||
namespace ops {
|
||||
namespace {
|
||||
constexpr size_t kInputSize = 3;
|
||||
constexpr int64_t kInputSize = 3;
|
||||
abstract::ShapePtr SoftMarginLossGradInferShape(const PrimitivePtr &primitive,
|
||||
const std::vector<AbstractBasePtr> &input_args) {
|
||||
MS_EXCEPTION_IF_NULL(primitive);
|
||||
|
|
|
@ -26,7 +26,8 @@ abstract::ShapePtr IndexAddInferShape(const PrimitivePtr &primitive, const std::
|
|||
MS_EXCEPTION_IF_NULL(primitive);
|
||||
auto prim_name = primitive->name();
|
||||
const int64_t input_num = 3;
|
||||
(void)CheckAndConvertUtils::CheckInteger("input numbers", input_args.size(), kEqual, input_num, prim_name);
|
||||
(void)CheckAndConvertUtils::CheckInteger("input numbers", SizeToLong(input_args.size()), kEqual, input_num,
|
||||
prim_name);
|
||||
for (const auto &item : input_args) {
|
||||
MS_EXCEPTION_IF_NULL(item);
|
||||
}
|
||||
|
|
|
@ -33,7 +33,7 @@ AbstractBasePtr MergeInfer(const abstract::AnalysisEnginePtr &, const PrimitiveP
|
|||
auto inputs_shape = input_args[0]->BuildShape()->cast<abstract::TupleShapePtr>()->shape();
|
||||
std::map<std::string, TypePtr> args;
|
||||
for (size_t i = 0; i != inputs_type.size(); i++) {
|
||||
args.insert(std::make_pair("input[" + std::to_string(i) + "]", inputs_type[i]));
|
||||
(void)args.insert(std::make_pair("input[" + std::to_string(i) + "]", inputs_type[i]));
|
||||
}
|
||||
std::set<TypePtr> template_type = common_valid_types;
|
||||
(void)template_type.emplace(kBool);
|
||||
|
|
|
@ -101,12 +101,15 @@ void Check(const PrimitivePtr &primitive, const std::vector<AbstractBasePtr> &in
|
|||
auto send_rank_ids = GetValue<std::vector<int64_t>>(primitive->GetAttr(kSendRankIds));
|
||||
const int64_t input_num = 0;
|
||||
if (send_rank_ids.empty()) {
|
||||
(void)CheckAndConvertUtils::CheckInteger("input_numbers", input_args.size(), kEqual, input_num, prim_name);
|
||||
(void)CheckAndConvertUtils::CheckInteger("input_numbers", SizeToLong(input_args.size()), kEqual, input_num,
|
||||
prim_name);
|
||||
return;
|
||||
}
|
||||
// check input shape & attr send shape
|
||||
(void)CheckAndConvertUtils::CheckInteger("input_numbers", input_args.size(), kEqual, 1, prim_name);
|
||||
(void)CheckAndConvertUtils::CheckArgs<abstract::AbstractTuple>(prim_name, input_args, 0);
|
||||
const int64_t input_num_ = 1;
|
||||
(void)CheckAndConvertUtils::CheckInteger("input_numbers", SizeToLong(input_args.size()), kEqual, input_num_,
|
||||
prim_name);
|
||||
(void)CheckAndConvertUtils::CheckArgs<abstract::AbstractTuple>(prim_name, input_args, input_num);
|
||||
auto abstract_tuple = input_args[0]->cast<abstract::AbstractTuplePtr>();
|
||||
MS_EXCEPTION_IF_NULL(abstract_tuple);
|
||||
auto abstract_element = abstract_tuple->elements();
|
||||
|
|
|
@ -41,12 +41,12 @@ std::vector<int64_t> CalBroadCastShape(std::vector<int64_t> x_shape, std::vector
|
|||
(void)std::copy(x_shape.begin(), x_shape.end() - length, std::back_inserter(broadcast_shape));
|
||||
}
|
||||
for (int64_t i = -length; i < 0; i++) {
|
||||
if (x_shape[x_length + i] == 1) {
|
||||
broadcast_shape.push_back(y_shape[y_length + i]);
|
||||
} else if (y_shape[y_length + i] == 1) {
|
||||
broadcast_shape.push_back(x_shape[x_length + i]);
|
||||
} else if (x_shape[x_length + i] == y_shape[y_length + i]) {
|
||||
broadcast_shape.push_back(x_shape[x_length + i]);
|
||||
if (x_shape[LongToSize(x_length + i)] == 1) {
|
||||
(void)broadcast_shape.push_back(y_shape[LongToSize(y_length + i)]);
|
||||
} else if (y_shape[LongToSize(y_length + i)] == 1) {
|
||||
(void)broadcast_shape.push_back(x_shape[LongToSize(x_length + i)]);
|
||||
} else if (x_shape[x_length + i] == y_shape[LongToSize(y_length + i)]) {
|
||||
(void)broadcast_shape.push_back(x_shape[LongToSize(x_length + i)]);
|
||||
} else {
|
||||
MS_EXCEPTION(ValueError) << "For op " << op_name << ", the two input '" << op_x_name << "' and '" << op_y_name
|
||||
<< "' can not broadcast";
|
||||
|
|
|
@ -49,7 +49,7 @@ void InferImplReduceFuncCalShape(ShapeVector *shape, const ShapeVector &x_shape,
|
|||
if (keep_dims_value) {
|
||||
for (it = axis_items.begin(); it != axis_items.end(); ++it) {
|
||||
auto axis_value = GetValue<int64_t>(*it);
|
||||
shape->at(axis_value) = 1;
|
||||
shape->at(LongToSize(axis_value)) = 1;
|
||||
}
|
||||
} else {
|
||||
std::vector<int64_t> axis_value_list;
|
||||
|
@ -185,7 +185,9 @@ TypePtr InferType(const PrimitivePtr &prim, const std::vector<AbstractBasePtr> &
|
|||
|
||||
AbstractBasePtr ReduceSumInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive,
|
||||
const std::vector<AbstractBasePtr> &input_args) {
|
||||
CheckAndConvertUtils::CheckInteger("input size", input_args.size(), kGreaterEqual, 1, primitive->name());
|
||||
const int64_t input_num = 1;
|
||||
CheckAndConvertUtils::CheckInteger("input size", SizeToLong(input_args.size()), kGreaterEqual, input_num,
|
||||
primitive->name());
|
||||
return abstract::MakeAbstract(InferShape(primitive, input_args), InferType(primitive, input_args));
|
||||
}
|
||||
} // namespace ops
|
||||
|
|
|
@ -29,7 +29,8 @@ abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector<A
|
|||
MS_EXCEPTION_IF_NULL(primitive);
|
||||
auto prim_name = primitive->name();
|
||||
const int64_t input_num = 1;
|
||||
(void)CheckAndConvertUtils::CheckInteger("input numbers", input_args.size(), kEqual, input_num, prim_name);
|
||||
(void)CheckAndConvertUtils::CheckInteger("input numbers", SizeToLong(input_args.size()), kEqual, input_num,
|
||||
prim_name);
|
||||
auto x_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[0]->BuildShape())[kShape];
|
||||
auto axis = GetValue<int64_t>(primitive->GetAttr(kAxis));
|
||||
auto x_rank = SizeToLong(x_shape.size());
|
||||
|
|
|
@ -30,7 +30,7 @@ void SmoothL1Loss::set_beta(const float beta) { (void)this->AddAttr(kBeta, MakeV
|
|||
|
||||
float SmoothL1Loss::get_beta() const {
|
||||
auto value_ptr = this->GetAttr(kBeta);
|
||||
return GetValue<int64_t>(value_ptr);
|
||||
return GetValue<int32_t>(value_ptr);
|
||||
}
|
||||
|
||||
AbstractBasePtr SmoothL1LossInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive,
|
||||
|
|
|
@ -23,7 +23,7 @@
|
|||
namespace mindspore {
|
||||
namespace ops {
|
||||
namespace {
|
||||
constexpr size_t kInputSize = 2;
|
||||
constexpr int64_t kInputSize = 2;
|
||||
abstract::ShapePtr SoftMarginLossInferShape(const PrimitivePtr &primitive,
|
||||
const std::vector<AbstractBasePtr> &input_args) {
|
||||
MS_EXCEPTION_IF_NULL(primitive);
|
||||
|
|
|
@ -30,7 +30,9 @@ namespace ops {
|
|||
namespace {
|
||||
abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector<AbstractBasePtr> &input_args) {
|
||||
MS_EXCEPTION_IF_NULL(primitive);
|
||||
(void)CheckAndConvertUtils::CheckInteger("input number", input_args.size(), kEqual, 1, primitive->name());
|
||||
const int64_t input_num = 1;
|
||||
(void)CheckAndConvertUtils::CheckInteger("input number", SizeToLong(input_args.size()), kEqual, input_num,
|
||||
primitive->name());
|
||||
for (const auto &item : input_args) {
|
||||
MS_EXCEPTION_IF_NULL(item);
|
||||
}
|
||||
|
@ -39,7 +41,9 @@ abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector<A
|
|||
}
|
||||
TypePtr InferType(const PrimitivePtr &prim, const std::vector<AbstractBasePtr> &input_args) {
|
||||
MS_EXCEPTION_IF_NULL(prim);
|
||||
(void)CheckAndConvertUtils::CheckInteger("input number", input_args.size(), kEqual, 1, prim->name());
|
||||
const int64_t input_num = 1;
|
||||
(void)CheckAndConvertUtils::CheckInteger("input number", SizeToLong(input_args.size()), kEqual, input_num,
|
||||
prim->name());
|
||||
if (std::any_of(input_args.begin(), input_args.end(), [](const AbstractBasePtr &a) { return a == nullptr; })) {
|
||||
MS_LOG(EXCEPTION) << "nullptr";
|
||||
}
|
||||
|
|
|
@ -38,9 +38,10 @@ abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector<A
|
|||
const size_t kDimsOffset = 2;
|
||||
for (size_t i = 0; i < kDimsOffset; i++) {
|
||||
auto padded = output_shape[i + kDimsOffset] + paddings[i][0] + paddings[i][1];
|
||||
(void)CheckAndConvertUtils::CheckInteger("padded shape", SizeToLong(padded % block_shape_vector.size()), kEqual, 0,
|
||||
prim_name);
|
||||
output_shape[i + kDimsOffset] = SizeToLong(padded / block_shape_vector.size());
|
||||
const int64_t input_num = 0;
|
||||
(void)CheckAndConvertUtils::CheckInteger("padded shape", SizeToLong(padded % block_shape_vector.size()), kEqual,
|
||||
input_num, prim_name);
|
||||
output_shape[i + kDimsOffset] = padded / SizeToLong(block_shape_vector.size());
|
||||
}
|
||||
output_shape[0] *= SizeToLong(block_shape_vector.size() * block_shape_vector.size());
|
||||
return std::make_shared<abstract::Shape>(output_shape);
|
||||
|
|
|
@ -55,9 +55,11 @@ abstract::TupleShapePtr InferShape(const PrimitivePtr &primitive, const std::vec
|
|||
}
|
||||
// Indices must be rank 1
|
||||
const int64_t input_num1 = 1;
|
||||
(void)CheckAndConvertUtils::CheckInteger("indices dim", indices_shape.size(), kEqual, input_num1, prim_name);
|
||||
(void)CheckAndConvertUtils::CheckInteger("indices dim", SizeToLong(indices_shape.size()), kEqual, input_num1,
|
||||
prim_name);
|
||||
// Dimension of var must be equal or greater than 1
|
||||
(void)CheckAndConvertUtils::CheckInteger("dimension of var", var_shape.size(), kGreaterEqual, input_num1, prim_name);
|
||||
(void)CheckAndConvertUtils::CheckInteger("dimension of var", SizeToLong(var_shape.size()), kGreaterEqual, input_num1,
|
||||
prim_name);
|
||||
// Indices shape must be equal to the first dimension of var
|
||||
CheckAndConvertUtils::Check("indices shape", indices_shape[0], kEqual, "the first dimension of var", var_shape[0],
|
||||
prim_name);
|
||||
|
|
|
@ -21,7 +21,7 @@
|
|||
|
||||
namespace mindspore {
|
||||
namespace ops {
|
||||
void Split::Init(const std::vector<int64_t> &size_splits, const int64_t axis, const int64_t output_num) {
|
||||
void Split::Init(const int64_t axis, const int64_t output_num) {
|
||||
this->set_axis(axis);
|
||||
this->set_output_num(output_num);
|
||||
}
|
||||
|
|
|
@ -31,7 +31,7 @@ class MS_CORE_API Split : public PrimitiveC {
|
|||
Split() : PrimitiveC(kNameSplit) {}
|
||||
~Split() = default;
|
||||
MS_DECLARE_PARENT(Split, PrimitiveC);
|
||||
void Init(const std::vector<int64_t> &size_splits, const int64_t axis, const int64_t output_num);
|
||||
void Init(const int64_t axis, const int64_t output_num);
|
||||
void set_size_splits(const std::vector<int64_t> &size_splits);
|
||||
void set_axis(const int64_t axis);
|
||||
void set_output_num(const int64_t output_num);
|
||||
|
|
|
@ -48,7 +48,7 @@ abstract::TupleShapePtr InferShape(const PrimitivePtr &primitive, const std::vec
|
|||
for (int64_t i = 0; i < num_split; i++) {
|
||||
(void)CheckAndConvertUtils::CheckInRange("elements of size_splits", size_splits[i], kIncludeBoth,
|
||||
{0, shape_of_split_dim}, prim_name);
|
||||
sum_of_size_splits += size_splits[i];
|
||||
sum_of_size_splits += size_splits[LongToSize(i)];
|
||||
}
|
||||
CheckAndConvertUtils::Check("sum of size_splits", sum_of_size_splits, kEqual, "dimension of value along split_dim",
|
||||
shape_of_split_dim, prim_name);
|
||||
|
@ -62,7 +62,7 @@ abstract::TupleShapePtr InferShape(const PrimitivePtr &primitive, const std::vec
|
|||
for (int64_t i = 0; i < num_split - 1; i++) {
|
||||
(void)CheckAndConvertUtils::CheckInRange("elements of size_splits", size_splits[i], kIncludeBoth,
|
||||
{0, shape_of_split_dim}, prim_name);
|
||||
sum_of_size_splits += size_splits[i];
|
||||
sum_of_size_splits += size_splits[LongToSize(i)];
|
||||
}
|
||||
auto default_value = shape_of_split_dim - sum_of_size_splits;
|
||||
(void)size_splits.insert(default_idx, default_value);
|
||||
|
|
|
@ -32,7 +32,8 @@ void ImpleSquare(void *origin, void *target, size_t size) {
|
|||
}
|
||||
}
|
||||
|
||||
abstract::ShapePtr SquareInferShape(const std::vector<AbstractBasePtr> &input_args) {
|
||||
abstract::ShapePtr SquareInferShape(const PrimitivePtr &primitive, const std::vector<AbstractBasePtr> &input_args) {
|
||||
MS_EXCEPTION_IF_NULL(primitive);
|
||||
auto shape_map = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[kInputIndex0]->BuildShape());
|
||||
auto in_shape = shape_map[kShape];
|
||||
auto min_shape = shape_map[kMinShape];
|
||||
|
@ -52,7 +53,7 @@ AbstractBasePtr SquareInfer(const abstract::AnalysisEnginePtr &, const Primitive
|
|||
const int64_t input_num = 1;
|
||||
CheckAndConvertUtils::CheckInputArgs(input_args, kEqual, input_num, primitive->name());
|
||||
|
||||
return abstract::MakeAbstract(SquareInferShape(input_args), SquareInferType(primitive, input_args));
|
||||
return abstract::MakeAbstract(SquareInferShape(primitive, input_args), SquareInferType(primitive, input_args));
|
||||
}
|
||||
|
||||
ValuePtr SquareInferValue(const PrimitivePtr &prim, const std::vector<AbstractBasePtr> &input_args) {
|
||||
|
@ -72,7 +73,7 @@ ValuePtr SquareInferValue(const PrimitivePtr &prim, const std::vector<AbstractBa
|
|||
|
||||
auto data_size = x_tensor->DataSize();
|
||||
auto dtype = x_tensor->data_type();
|
||||
auto shape = SquareInferShape(input_args)->shape();
|
||||
auto shape = SquareInferShape(prim, input_args)->shape();
|
||||
auto result_tensor = std::make_shared<tensor::Tensor>(dtype, shape); // same shape and dtype
|
||||
auto x_datac = x_tensor->data_c();
|
||||
auto result_datac = result_tensor->data_c();
|
||||
|
|
|
@ -146,7 +146,7 @@ void EllipsisInferShape(const PrimitivePtr &primitive, const std::vector<int64_t
|
|||
}
|
||||
|
||||
size_t ellipsis_occupied_dims = x_rank - i - (slice_len - (j + 1)) + num;
|
||||
(void)infer_shape->insert(infer_shape->end(), x_shape.begin() + i,
|
||||
(void)infer_shape->insert(infer_shape->end(), x_shape.begin() + LongToSize(i),
|
||||
x_shape.begin() + SizeToLong(i + ellipsis_occupied_dims));
|
||||
j += 1;
|
||||
i += ellipsis_occupied_dims;
|
||||
|
|
|
@ -55,7 +55,7 @@ AbstractBasePtr UnsqueezeInfer(const abstract::AnalysisEnginePtr &, const Primit
|
|||
if (ax_itr < dim_rank && dims[ax_itr] == (int64_t)i) {
|
||||
(void)out_shape.emplace_back(1);
|
||||
ax_itr++;
|
||||
} else if (ax_itr < dim_rank && dims[ax_itr] + sz == i) {
|
||||
} else if (ax_itr < dim_rank && dims[ax_itr] + sz == LongToSize(i)) {
|
||||
(void)out_shape.emplace_back(1);
|
||||
ax_itr++;
|
||||
} else {
|
||||
|
|
Loading…
Reference in New Issue