code clean

This commit is contained in:
xuanyue 2022-09-08 21:23:30 +08:00
parent 3324fea63f
commit cf08c4fe72
30 changed files with 107 additions and 95 deletions

View File

@ -136,7 +136,7 @@ class MS_API IKernel {
std::vector<mindspore::MSTensor> outputs_;
const Primitive *primitive_ = nullptr;
std::map<std::string, std::string> attrs_;
const std::map<std::string, std::map<std::string, std::string>> *config_;
const std::map<std::string, std::map<std::string, std::string>> *config_ = nullptr;
};
} // namespace kernel
} // namespace mindspore

View File

@ -334,14 +334,17 @@ int CastTensorData(Tensor *dst, Tensor *src, bool support_fp16) {
}
int CastCommonTensorData(Tensor *dst, Tensor *src, bool support_fp16) {
dst->ReallocData();
auto dst_data = dst->ReallocData(); /* using MutableData to sync GPU data */
if (dst_data == nullptr) {
MS_LOG(ERROR) << "Remalloc memory failed.";
return RET_NULL_PTR;
}
dst->ResetRefCount();
if (dst->shape() != src->shape()) {
MS_LOG(ERROR) << "dst tensor: " << dst->tensor_name() << " shape: " << dst->shape() << " vs "
<< "src tensor: " << src->tensor_name() << " shape: " << src->shape();
return RET_PARAM_INVALID;
}
auto dst_data = dst->MutableData(); /* using MutableData to sync GPU data */
auto src_data = src->MutableData();
auto src_nums_size = src->ElementsNum();
auto dst_data_type = static_cast<int>(dst->data_type());

View File

@ -48,8 +48,8 @@ int ConcatBaseCPUKernel::DoConcat(int task_id) {
int64_t start_row = start / inner_sizes_.back();
int64_t end_row = end / inner_sizes_.back();
std::vector<const uint8_t *> src;
for (size_t i = 0; i < inputs_.size(); ++i) {
src.push_back(inputs_[i] + start_row * inner_sizes_[i]);
for (size_t i = 0; i < inputs_ptr_.size(); ++i) {
src.push_back(inputs_ptr_[i] + start_row * inner_sizes_[i]);
}
uint8_t *out = output_ + start;
int input_index = block_boundary_infos_[task_id].begin_input;
@ -79,14 +79,14 @@ int ConcatBaseCPUKernel::DoConcat(int task_id) {
src[input_index] += inner_sizes_[input_index];
out += size;
++input_index;
for (; input_index < static_cast<int>(inputs_.size()); ++input_index) {
for (; input_index < static_cast<int>(inputs_ptr_.size()); ++input_index) {
memcpy(out, src[input_index], inner_sizes_[input_index]);
src[input_index] += inner_sizes_[input_index];
out += inner_sizes_[input_index];
}
++start_row;
for (; start_row < end_row; ++start_row) {
for (input_index = 0; input_index < static_cast<int>(inputs_.size()); ++input_index) {
for (input_index = 0; input_index < static_cast<int>(inputs_ptr_.size()); ++input_index) {
memcpy(out, src[input_index], inner_sizes_[input_index]);
src[input_index] += inner_sizes_[input_index];
out += inner_sizes_[input_index];
@ -226,12 +226,12 @@ int ConcatBaseCPUKernel::Run() {
if (outer_size_ == 0 || inner_sizes_.back() == 0) {
return RET_OK;
}
inputs_.clear();
inputs_ptr_.clear();
for (size_t i = 0; i < in_tensors_.size(); ++i) {
if (!is_with_data_[i]) {
continue;
}
inputs_.push_back(static_cast<const uint8_t *>(in_tensors_[i]->data()));
inputs_ptr_.push_back(static_cast<const uint8_t *>(in_tensors_[i]->data()));
}
output_ = static_cast<uint8_t *>(out_tensors_.front()->data());
MS_CHECK_TRUE_MSG(output_ != nullptr, RET_ERROR, "output data is a nullptr.");

View File

@ -42,7 +42,7 @@ class ConcatBaseCPUKernel : public LiteKernel {
int64_t outer_size_{0};
uint8_t *output_{nullptr};
std::vector<bool> is_with_data_;
std::vector<const uint8_t *> inputs_;
std::vector<const uint8_t *> inputs_ptr_;
std::vector<int64_t> block_splits_;
std::vector<int64_t> inner_sizes_; // byte-inner-size (including axis) of each input and the last one is output's.

View File

@ -219,7 +219,7 @@ int TransposeBaseCPUKernel::CopyInputToOutput() {
CHECK_NULL_RETURN(in_tensor->data());
MS_CHECK_FALSE(in_tensor->Size() == 0, RET_ERROR);
if (in_tensor->data() != out_tensor->data()) {
memcpy(out_tensor->data(), in_tensor->data(), in_tensor->Size());
(void)memcpy(out_tensor->data(), in_tensor->data(), in_tensor->Size());
}
return RET_OK;
}

View File

@ -36,7 +36,6 @@ class TransposeBaseCPUKernel : public LiteKernel {
virtual int DoTransposeMultiThread(int task_id) = 0;
protected:
virtual int DoTransposeSingleThread() = 0;
// only true when perm is [1, 0] or [0, 2, 1]
bool opt_run_{true};
@ -56,6 +55,7 @@ class TransposeBaseCPUKernel : public LiteKernel {
TransposeParameter *param_{nullptr};
private:
virtual int DoTransposeSingleThread() = 0;
int CopyInputToOutput();
int ResetStatus();
// to simplify transpose, we consider two steps. Firstly, delete the dimension where the value is 1. Secondly, fuse

View File

@ -35,7 +35,7 @@ int ConcatFp16Run(void *cdata, int task_id, float lhs_scale, float rhs_scale) {
}
int ConcatFp16CPUKernel::EnsureFp16InputsAndOutput() {
inputs_.clear();
inputs_ptr_.clear();
for (size_t i = 0; i < in_tensors_.size(); ++i) {
if (!is_with_data_[i]) {
continue;
@ -43,7 +43,7 @@ int ConcatFp16CPUKernel::EnsureFp16InputsAndOutput() {
auto input = in_tensors_[i]->data();
MS_CHECK_TRUE_MSG(input != nullptr, RET_ERROR, "input-data is a nullptr.");
if (in_tensors_[i]->data_type() == kNumberTypeFloat16) {
inputs_.push_back(static_cast<const uint8_t *>(input));
inputs_ptr_.push_back(static_cast<const uint8_t *>(input));
continue;
}
if (in_tensors_[i]->data_type() == kNumberTypeFloat32 || in_tensors_[i]->data_type() == kNumberTypeFloat) {
@ -53,7 +53,7 @@ int ConcatFp16CPUKernel::EnsureFp16InputsAndOutput() {
MS_LOG(ERROR) << "malloc failed";
return RET_ERROR;
}
inputs_.push_back(tmp);
inputs_ptr_.push_back(tmp);
tmp_buffers_.push_back(tmp);
Float32ToFloat16(static_cast<float *>(input), reinterpret_cast<float16_t *>(tmp), in_tensors_[i]->ElementsNum());
} else {

View File

@ -30,10 +30,10 @@ class TransposeFp16CPUKernel : public TransposeBaseCPUKernel {
~TransposeFp16CPUKernel() = default;
int ReSize() override;
int DoTransposeMultiThread(int task_id) override;
private:
int DoTransposeSingleThread() override;
int DoTransposeMultiThread(int task_id) override;
};
} // namespace mindspore::kernel

View File

@ -100,7 +100,8 @@ int AdderCPUKernel::InitWeightBias() {
CHECK_NULL_RETURN(in_tensors_.at(kBiasIndex));
auto ori_bias = reinterpret_cast<float *>(in_tensors_.at(kBiasIndex)->MutableData());
CHECK_NULL_RETURN(ori_bias);
MS_CHECK_TRUE_MSG(in_tensors_.at(kBiasIndex)->Size() == out_channel * sizeof(float), RET_ERROR, "bias is invalid.");
MS_CHECK_TRUE_MSG(in_tensors_.at(kBiasIndex)->Size() == static_cast<size_t>(out_channel) * sizeof(float), RET_ERROR,
"bias is invalid.");
memcpy(bias_data_, ori_bias, out_channel * sizeof(float));
} else {
MS_ASSERT(in_tensors_.size() == kInputSize1);

View File

@ -237,7 +237,8 @@ kernel::LiteKernel *AffineFp32CPUKernel::FullMatmulKernelCreate() {
int context_max = affine_parameter_->context_[affine_parameter_->context_size_ - 1];
std::vector<int> splice_output_shape = {1, input_shape.at(1) - (context_max - context_min), out_dim};
full_input_ = new lite::Tensor(kNumberTypeFloat32, splice_output_shape);
full_input_ = new (std::nothrow) lite::Tensor(kNumberTypeFloat32, splice_output_shape);
MS_CHECK_TRUE_MSG(full_input_ != nullptr, nullptr, "Create a new-tensor failed.");
if (in_tensors_.size() < kAffineMinInputNum) {
MS_LOG(ERROR) << "wrong affine input size";
@ -286,11 +287,13 @@ kernel::LiteKernel *AffineFp32CPUKernel::IncrementMatmulKernelCreate() {
return nullptr;
}
increment_input_ = new lite::Tensor(kNumberTypeFloat32, {1, 1, affine_splice_output_col});
increment_input_ = new (std::nothrow) lite::Tensor(kNumberTypeFloat32, {1, 1, affine_splice_output_col});
MS_CHECK_TRUE_MSG(increment_input_ != nullptr, nullptr, "Create a new-tensor failed.");
// matmul_output == 1 * matmul_col
int matmul_col = out_tensors_.front()->shape().back();
increment_output_ = new lite::Tensor(kNumberTypeFloat32, {1, 1, matmul_col});
increment_output_ = new (std::nothrow) lite::Tensor(kNumberTypeFloat32, {1, 1, matmul_col});
MS_CHECK_TRUE_MSG(increment_output_ != nullptr, nullptr, "Create a new-tensor failed.");
increment_output_->MallocData();
if (in_tensors_.size() < kAffineMinInputNum) {

View File

@ -32,6 +32,8 @@ class ArithmeticCPUKernel : public ArithmeticBaseCPUKernel {
protected:
void DoBroadcast(void *out_data, int input_index) override;
int DoExecute(const void *input0, const void *input1, void *output, int64_t size) override;
void InitRunFunction(int primitive_type) override;
private:
typedef struct {
@ -45,8 +47,6 @@ class ArithmeticCPUKernel : public ArithmeticBaseCPUKernel {
ArithmeticOptFunc<bool> opt_bool_func_;
} ARITHMETIC_FUNC_INFO_FP32;
int DoExecute(const void *input0, const void *input1, void *output, int64_t size) override;
void InitRunFunction(int primitive_type) override;
ArithmeticFunc<float> arithmetic_run_fp32_{nullptr};
ArithmeticOptFunc<float> arithmetic_opt_run_fp32_{nullptr};
ArithmeticFunc<int> arithmetic_run_int_{nullptr};

View File

@ -31,7 +31,7 @@ class GatherCPUKernel : public GatherBaseCPUKernel {
int Run() override;
private:
protected:
int AssignIndicesData(bool isIndicesInt32) override;
};
} // namespace mindspore::kernel

View File

@ -146,7 +146,7 @@ int MatmulFp32BaseCPUKernel::BackupConstMatrix(MatrixInfo *matrix_info, int inde
MS_CHECK_TRUE_MSG(matrix_info->origin_ptr != nullptr, RET_ERROR, "matrix is invalid.");
auto src_ptr = in_tensors_[index]->data();
MS_CHECK_TRUE_MSG(src_ptr != nullptr, RET_ERROR, "matrix is invalid.");
memcpy(matrix_info->origin_ptr, src_ptr, element_num * sizeof(float));
(void)memcpy(matrix_info->origin_ptr, src_ptr, element_num * sizeof(float));
matrix_info->has_origin = true;
return RET_OK;
}
@ -294,7 +294,7 @@ int MatmulFp32BaseCPUKernel::PackBiasMatrix() {
}
} else {
(void)memcpy(matrix_c_.pack_ptr, bias_src, bias_num * static_cast<int>(sizeof(float)));
memset(matrix_c_.pack_ptr + bias_num, 0, (matrix_c_.pack_size - bias_num) * sizeof(float));
(void)memset(matrix_c_.pack_ptr + bias_num, 0, (matrix_c_.pack_size - bias_num) * sizeof(float));
}
if (matrix_c_.has_origin) {
ms_context_->allocator->Free(matrix_c_.origin_ptr);

View File

@ -29,10 +29,10 @@ class TransposeCPUKernel : public TransposeBaseCPUKernel {
~TransposeCPUKernel() override = default;
int ReSize() override;
int DoTransposeMultiThread(int task_id) override;
private:
int DoTransposeSingleThread() override;
int DoTransposeMultiThread(int task_id) override;
};
} // namespace mindspore::kernel

View File

@ -30,12 +30,12 @@ class TransposeServerCPUKernel : public TransposeBaseCPUKernel {
~TransposeServerCPUKernel() override = default;
int ReSize() override;
int DoTransposeMultiThread(int task_id) override;
private:
void ComputeIndividualOfflineInfo();
int ChooseThreadCuttingStrategy();
int DoTransposeSingleThread() override;
int DoTransposeMultiThread(int task_id) override;
std::vector<int64_t> overflow_points_;
std::vector<int64_t> strides_;

View File

@ -82,6 +82,7 @@ class ArithmeticSelfInt8CPUKernel : public LiteKernel {
break;
case PrimitiveType_Reciprocal:
arithmeticSelf_run_ = Int8ElementReciprocal;
break;
default:
break;
}

View File

@ -102,7 +102,7 @@ int ScaleInt8CPUKernel::InitParameter() {
auto input0_shape = in_tensors_[FIRST_INPUT]->shape();
auto input1_shape = in_tensors_[SECOND_INPUT]->shape();
if (scale_param_->axis_ < 0) {
scale_param_->axis_ += input0_shape.size();
scale_param_->axis_ += static_cast<int>(input0_shape.size());
}
if (input1_shape.empty()) {
MS_LOG(ERROR) << "Scale tensor shape is incorrect.";

View File

@ -28,10 +28,10 @@ class TransposeInt8CPUKernel : public TransposeBaseCPUKernel {
~TransposeInt8CPUKernel() = default;
int ReSize() override;
int DoTransposeMultiThread(int task_id) override;
private:
int DoTransposeSingleThread() override;
int DoTransposeMultiThread(int task_id) override;
};
} // namespace mindspore::kernel

View File

@ -219,10 +219,11 @@ class KernelExec {
std::static_pointer_cast<Abstractkernel>(kernel_)->set_in_tensors(in_tensors);
} else {
std::vector<MSTensor> tensors_in;
std::transform(in_tensors.begin(), in_tensors.end(), std::back_inserter(tensors_in), [](lite::Tensor *tensor) {
auto impl = std::make_shared<mindspore::LiteTensorImpl>(tensor);
return mindspore::MSTensor(impl);
});
(void)std::transform(in_tensors.begin(), in_tensors.end(), std::back_inserter(tensors_in),
[](lite::Tensor *tensor) {
auto impl = std::make_shared<mindspore::LiteTensorImpl>(tensor);
return mindspore::MSTensor(impl);
});
kernel_->set_inputs(tensors_in);
}
}
@ -245,10 +246,11 @@ class KernelExec {
std::static_pointer_cast<Abstractkernel>(kernel_)->set_out_tensors(out_tensors);
} else {
std::vector<MSTensor> tensors_out;
std::transform(out_tensors.begin(), out_tensors.end(), std::back_inserter(tensors_out), [](lite::Tensor *tensor) {
auto impl = std::make_shared<mindspore::LiteTensorImpl>(tensor);
return mindspore::MSTensor(impl);
});
(void)std::transform(out_tensors.begin(), out_tensors.end(), std::back_inserter(tensors_out),
[](lite::Tensor *tensor) {
auto impl = std::make_shared<mindspore::LiteTensorImpl>(tensor);
return mindspore::MSTensor(impl);
});
kernel_->set_outputs(tensors_out);
}
}

View File

@ -397,7 +397,7 @@ int Tensor::MallocData(const AllocatorPtr allocator) {
this->data_ = malloc(data_size);
} else {
this->data_ = allocator_->Malloc(data_size);
allocator_->SetRefCount(this->data_, 1);
(void)allocator_->SetRefCount(this->data_, 1);
}
if (this->data_ == nullptr) {
MS_LOG(ERROR) << "Malloc tensor data failed, size=" << data_size;

View File

@ -130,8 +130,8 @@ class Tensor {
// course, you can call FreeData before calling set_data to ensure the data can be freed by current tensor.
void set_data(void *data, bool own_data = true) {
if (allocator_ != nullptr && this->data_ != data) {
allocator_->IncRefCount(data, 1);
allocator_->DecRefCount(this->data_, 1);
(void)allocator_->IncRefCount(data, 1);
(void)allocator_->DecRefCount(this->data_, 1);
}
this->data_ = data;
this->own_data_ = own_data;

View File

@ -55,7 +55,7 @@ bool RemovePublicPrimitiveInterference::Run(const FuncGraphPtr &func_graph) {
return succ;
}
} else {
has_visited.insert(first_input);
(void)has_visited.insert(first_input);
}
}
}
@ -88,7 +88,7 @@ bool RemovePublicPrimitiveInterference::CreateIndividualPrim(const CNodePtr &cno
prim->set_instance_name(node_type);
}
}
prim->SetAttrs(public_prim->attrs());
(void)prim->SetAttrs(public_prim->attrs());
auto value_node = std::make_shared<ValueNode>(prim);
MS_CHECK_TRUE_MSG(value_node != nullptr, false, "create valueNode failed.");
cnode->set_input(0, value_node);

View File

@ -122,9 +122,9 @@ STATUS GetDataTypeAndShape(const ParameterPtr &param_node, TypeId *data_type, Sh
return RET_OK;
}
int FetchFromTensorValue(const ValueNodePtr &value_node, const PrimitivePtr &primitive, converter::FmkType fmk_type,
bool train_flag, DataInfo *data_info, bool copy_data) {
MS_ASSERT(value_node != nullptr && primitive != nullptr && data_info != nullptr);
int FetchFromTensorValue(const ValueNodePtr &value_node, converter::FmkType fmk_type, bool train_flag,
DataInfo *data_info, bool copy_data) {
MS_ASSERT(value_node != nullptr && data_info != nullptr);
auto valueAbstract = value_node->abstract();
MS_CHECK_TRUE_MSG(valueAbstract != nullptr, RET_ERROR, "valueAbstract is nullptr");
auto abstract_tensor = valueAbstract->cast<abstract::AbstractTensorPtr>();
@ -163,8 +163,8 @@ int FetchFromTensorValue(const ValueNodePtr &value_node, const PrimitivePtr &pri
return RET_OK;
}
int FetchFromInt32OrInt64ImmValue(const ValueNodePtr &value_node, const PrimitivePtr &primitive, DataInfo *data_info) {
MS_ASSERT(value_node != nullptr && primitive != nullptr && data_info != nullptr);
int FetchFromInt32OrInt64ImmValue(const ValueNodePtr &value_node, DataInfo *data_info) {
MS_ASSERT(value_node != nullptr && data_info != nullptr);
// data of int64 is converted to int32 here.
data_info->data_type_ = kNumberTypeInt32;
data_info->shape_ = {1};
@ -179,8 +179,8 @@ int FetchFromInt32OrInt64ImmValue(const ValueNodePtr &value_node, const Primitiv
return RET_OK;
}
int FetchFromBoolImmValue(const ValueNodePtr &value_node, const PrimitivePtr &primitive, DataInfo *data_info) {
MS_ASSERT(value_node != nullptr && primitive != nullptr && data_info != nullptr);
int FetchFromBoolImmValue(const ValueNodePtr &value_node, DataInfo *data_info) {
MS_ASSERT(value_node != nullptr && data_info != nullptr);
data_info->data_type_ = kNumberTypeBool;
data_info->shape_ = {1};
data_info->data_.resize(sizeof(bool));
@ -196,8 +196,8 @@ int FetchFromBoolImmValue(const ValueNodePtr &value_node, const PrimitivePtr &pr
return RET_OK;
}
int FetchFromNumberValue(const ValueNodePtr &value_node, const PrimitivePtr &primitive, DataInfo *data_info) {
MS_ASSERT(value_node != nullptr && primitive != nullptr && data_info != nullptr);
int FetchFromNumberValue(const ValueNodePtr &value_node, DataInfo *data_info) {
MS_ASSERT(value_node != nullptr && data_info != nullptr);
data_info->data_type_ = kNumberTypeInt32;
data_info->shape_ = {1};
data_info->data_.resize(sizeof(int));
@ -214,8 +214,8 @@ int FetchFromNumberValue(const ValueNodePtr &value_node, const PrimitivePtr &pri
return RET_OK;
}
int FetchFromSequenceValue(const ValueNodePtr &value_node, const PrimitivePtr &primitive, DataInfo *data_info) {
MS_ASSERT(value_node != nullptr && primitive != nullptr && data_info != nullptr);
int FetchFromSequenceValue(const ValueNodePtr &value_node, DataInfo *data_info) {
MS_ASSERT(value_node != nullptr && data_info != nullptr);
auto value = value_node->value();
MS_CHECK_TRUE_MSG(value != nullptr, RET_ERROR, "value is nullptr");
std::vector<int32_t> shape;
@ -343,18 +343,18 @@ int FetchDataFromValueNode(const CNodePtr &cnode, size_t index, converter::FmkTy
auto prim = GetValueNode<PrimitivePtr>(cnode->input(0));
MS_CHECK_TRUE_MSG(prim != nullptr, RET_ERROR, "prim is nullptr");
if (value->isa<tensor::Tensor>()) {
ret = FetchFromTensorValue(value_node, prim, fmk_type, train_flag, data_info, copy_data);
ret = FetchFromTensorValue(value_node, fmk_type, train_flag, data_info, copy_data);
if (index == kNumWeightIndex && prim->GetAttr(mindspore::ops::kFormat) != nullptr) {
data_info->format_ = GetValue<int64_t>(prim->GetAttr(mindspore::ops::kFormat));
}
} else if (value->isa<mindspore::Int32Imm>() || value->isa<mindspore::Int64Imm>()) {
ret = FetchFromInt32OrInt64ImmValue(value_node, prim, data_info);
ret = FetchFromInt32OrInt64ImmValue(value_node, data_info);
} else if (value->isa<mindspore::BoolImm>()) {
ret = FetchFromBoolImmValue(value_node, prim, data_info);
ret = FetchFromBoolImmValue(value_node, data_info);
} else if (value->isa<mindspore::ValueSequence>()) {
ret = FetchFromSequenceValue(value_node, prim, data_info);
ret = FetchFromSequenceValue(value_node, data_info);
} else if (value->isa<Number>()) {
ret = FetchFromNumberValue(value_node, prim, data_info);
ret = FetchFromNumberValue(value_node, data_info);
} else if (value->isa<FuncGraph>()) {
MS_LOG(INFO) << "op name:" << value_node->fullname_with_scope() << " input is func_graph";
return RET_NO_CHANGE;

View File

@ -131,8 +131,8 @@ bool AnfEqualValueNode(const AnfNodePtr &a_node, const AnfNodePtr &b_node) {
}
if (utils::isa<ops::PrimitiveC>(a_value_ptr) && utils::isa<ops::PrimitiveC>(b_value_ptr)) {
auto a_obj = (ops::PrimitiveC *)(a_value_ptr.get());
auto b_obj = (ops::PrimitiveC *)(b_value_ptr.get());
auto a_obj = static_cast<ops::PrimitiveC *>(a_value_ptr.get());
auto b_obj = static_cast<ops::PrimitiveC *>(b_value_ptr.get());
return (*a_obj) == (*b_obj);
} else {
return (*a_value_ptr) == (*b_value_ptr);

View File

@ -61,14 +61,14 @@ int64_t GetOutChannels(const CNodePtr &conv_node) {
}
void GenerateNewWeightConv2D(float *dst_weight, const float *conv_weight, const float *scale_weight,
int weight_shape_size, int kernel_num) {
size_t weight_shape_size, int kernel_num) {
MS_ASSERT(dst_weight != nullptr && conv_weight != nullptr && scale_weight != nullptr);
if (kernel_num <= 0) {
return;
}
auto kernel_size = weight_shape_size / kernel_num;
for (int i = 0; i < kernel_num; i++) {
for (int j = 0; j < kernel_size; j++) {
auto kernel_size = weight_shape_size / static_cast<size_t>(kernel_num);
for (size_t i = 0; i < static_cast<size_t>(kernel_num); ++i) {
for (size_t j = 0; j < kernel_size; j++) {
dst_weight[i * kernel_size + j] = conv_weight[i * kernel_size + j] * scale_weight[i];
}
}

View File

@ -38,7 +38,7 @@ constexpr int kReciprocalSecondIndex = -2;
int CommonInferShape(const CNodePtr &cnode, const std::vector<ShapeVector> &in_shapes,
std::vector<ShapeVector> *out_shapes) {
out_shapes->clear();
out_shapes->insert(out_shapes->begin(), in_shapes.begin(), in_shapes.end());
(void)out_shapes->insert(out_shapes->begin(), in_shapes.begin(), in_shapes.end());
return lite::RET_OK;
}
@ -71,7 +71,7 @@ int ExpandDimsInferShape(const CNodePtr &cnode, const std::vector<ShapeVector> &
}
MS_CHECK_TRUE_MSG(axis >= 0 && axis <= first_shape_size, lite::RET_ERROR, "Expanddims's second-input is invalid.");
out_shapes->clear();
first_shape.insert(first_shape.begin() + axis, 1);
(void)first_shape.insert(first_shape.begin() + axis, 1);
out_shapes->push_back(first_shape);
return lite::RET_OK;
}
@ -117,7 +117,7 @@ int GatherInferShape(const CNodePtr &cnode, const std::vector<ShapeVector> &in_s
for (int i = 0; i < axis; ++i) {
out_shape.push_back(first_shape[i]);
}
out_shape.insert(out_shape.end(), second_shape.begin(), second_shape.end());
(void)out_shape.insert(out_shape.end(), second_shape.begin(), second_shape.end());
for (int i = axis + 1; i < first_shape_size; ++i) {
out_shape.push_back(first_shape[i]);
}
@ -140,12 +140,12 @@ int MulInferShape(const CNodePtr &cnode, const std::vector<ShapeVector> &in_shap
for (size_t i = 0; i < (out_shape_size - first_shape.size()); ++i) {
first_shape_expand.push_back(1);
}
first_shape_expand.insert(first_shape_expand.end(), first_shape.begin(), first_shape.end());
(void)first_shape_expand.insert(first_shape_expand.end(), first_shape.begin(), first_shape.end());
ShapeVector second_shape_expand;
for (size_t i = 0; i < (out_shape_size - second_shape.size()); ++i) {
second_shape_expand.push_back(1);
}
second_shape_expand.insert(second_shape_expand.end(), second_shape.begin(), second_shape.end());
(void)second_shape_expand.insert(second_shape_expand.end(), second_shape.begin(), second_shape.end());
ShapeVector out_shape;
for (size_t i = 0; i < out_shape_size; ++i) {
if (first_shape_expand[i] == second_shape_expand[i]) {
@ -173,7 +173,7 @@ int ReshapeInferShape(const CNodePtr &cnode, const std::vector<ShapeVector> &in_
MS_ASSERT(cnode != nullptr);
out_shapes->clear();
if (cnode->size() < kInputSizeTwo) {
out_shapes->emplace_back();
(void)out_shapes->emplace_back();
return lite::RET_OK;
}
if (in_shapes.size() < kInputSizeTwo) {
@ -190,7 +190,7 @@ int ReshapeInferShape(const CNodePtr &cnode, const std::vector<ShapeVector> &in_
MS_CHECK_TRUE_MSG(ret == lite::RET_OK, lite::RET_ERROR, "Reshape fetch second-input's data failed.");
MS_CHECK_TRUE_MSG(data_info.shape_.size() <= 1, lite::RET_ERROR, "Reshape second-input should be <= 1D.");
if (data_info.data_ptr_ == nullptr || (data_info.shape_.size() == 1 && data_info.shape_.front() == 0)) {
out_shapes->emplace_back();
(void)out_shapes->emplace_back();
}
auto element_num = std::accumulate(data_info.shape_.begin(), data_info.shape_.end(), 1L, std::multiplies<int64_t>());
ShapeVector out_shape;
@ -223,7 +223,7 @@ int SplitInferShape(const CNodePtr &cnode, const std::vector<ShapeVector> &in_sh
? std::vector<int64_t>{}
: GetValue<std::vector<int64_t>>(prim->GetAttr(ops::kSizeSplits));
out_num = (out_num == 0 ? static_cast<int64_t>(size_splits.size()) : out_num);
if (out_num == 0) {
if (out_num <= 0) {
return lite::RET_NOT_SUPPORT;
}
auto axis = prim->GetAttr(ops::kAxis) == nullptr ? 0 : GetValue<int64_t>(prim->GetAttr(ops::kAxis));
@ -236,7 +236,7 @@ int SplitInferShape(const CNodePtr &cnode, const std::vector<ShapeVector> &in_sh
MS_CHECK_TRUE_MSG(in_shape[axis] > 0 && in_shape[axis] % out_num == 0, lite::RET_ERROR,
"Split's dim doesn't match split-axis.");
out_shape[axis] = in_shape[axis] / out_num;
out_shapes->insert(out_shapes->end(), out_num, out_shape);
(void)out_shapes->insert(out_shapes->end(), out_num, out_shape);
} else {
for (auto v : size_splits) {
out_shape[axis] = v;
@ -427,7 +427,7 @@ int MulReduceFusion::ProcessOp(const FuncGraphPtr &func_graph, const CNodePtr &c
return lite::RET_OK;
}
if (reduce_mode_ == ReduceMode::Reduce_Mean) {
auto ret = ProcessGather(func_graph);
auto ret = ProcessGather();
if (ret == lite::RET_NOT_SUPPORT) {
return lite::RET_OK;
}
@ -449,7 +449,7 @@ int MulReduceFusion::ProcessOp(const FuncGraphPtr &func_graph, const CNodePtr &c
return lite::RET_OK;
}
int MulReduceFusion::ProcessGather(const FuncGraphPtr &func_graph) {
int MulReduceFusion::ProcessGather() {
MS_ASSERT(gather_.size() > C1NUM);
auto gather_table = gather_->input(1);
if (gather_table == nullptr || utils::isa<CNode>(gather_table)) {
@ -490,7 +490,7 @@ int MulReduceFusion::PostProcess(const FuncGraphPtr &func_graph) {
}
auto cnode = node->cast<CNodePtr>();
if (CheckPrimitiveType(cnode, prim::kPrimConcat)) {
concat_ops.insert(cnode);
(void)concat_ops.insert(cnode);
}
}
}
@ -512,12 +512,12 @@ int MulReduceFusion::PostProcessSqueezeWithConcat(const FuncGraphPtr &func_graph
}
auto manager = func_graph->manager();
MS_ASSERT(manager != nullptr);
for (size_t i = 1; i < cnode->size(); ++i) {
for (int i = 1; i < static_cast<int>(cnode->size()); ++i) {
manager->SetEdge(cnode, i, cnode->input(i)->cast<CNodePtr>()->input(1));
}
auto concat_prim = GetCNodePrimitive(cnode);
MS_ASSERT(concat_prim != nullptr);
concat_prim->AddAttr(ops::kAxis, MakeValue<int64_t>(concat_axis_));
(void)concat_prim->AddAttr(ops::kAxis, MakeValue<int64_t>(concat_axis_));
auto &node_users = manager->node_users();
auto &concat_users = node_users[cnode];
CNodePtr post_squeeze{nullptr};
@ -610,7 +610,7 @@ bool MulReduceFusion::CheckBasicCond(const FuncGraphPtr &func_graph, const CNode
if (mode_attr == nullptr) {
return false;
}
reduce_mode_ = GetValue<int64_t>(mode_attr);
reduce_mode_ = static_cast<int>(GetValue<int64_t>(mode_attr));
if (reduce_mode_ != ReduceMode::Reduce_Sum && reduce_mode_ != ReduceMode::Reduce_Mean) {
return false;
}
@ -671,7 +671,7 @@ bool MulReduceFusion::CheckAxisCond(const CNodePtr &cnode) {
if (data_info.data_type_ == kNumberTypeInt || data_info.data_type_ == kNumberTypeInt32) {
axis_ = *(static_cast<int *>(data_info.data_ptr_));
} else if (data_info.data_type_ == kNumberTypeInt64) {
axis_ = *(static_cast<int64_t *>(data_info.data_ptr_));
axis_ = static_cast<int>(*(static_cast<int64_t *>(data_info.data_ptr_)));
} else {
return false;
}
@ -708,7 +708,7 @@ bool MulReduceFusion::CheckShapeCond(const CNodePtr &cnode) {
(mul_in0_shape[mul_in0_shape.size() - C2NUM] != 1 && mul_in1_shape[mul_in1_shape.size() - C2NUM] != 1)) {
return false;
}
exchange_ = mul_in1_shape[mul_in1_shape.size() - C2NUM] == 1 ? false : true;
exchange_ = mul_in1_shape[mul_in1_shape.size() - C2NUM] != 1;
transpose_a_ = false;
transpose_b_ = true;
MS_ASSERT(mul_in0_shape.back() != 0);
@ -720,7 +720,7 @@ bool MulReduceFusion::CheckShapeCond(const CNodePtr &cnode) {
(mul_in0_shape.back() != 1 && mul_in1_shape.back() != 1)) {
return false;
}
exchange_ = mul_in0_shape.back() == 1 ? false : true;
exchange_ = mul_in0_shape.back() != 1;
transpose_a_ = true;
transpose_b_ = false;
MS_ASSERT(mul_in0_shape[mul_in0_shape.size() - C2NUM] != 0);
@ -759,7 +759,7 @@ bool MulReduceFusion::CheckGatherOp(const FuncGraphPtr &func_graph, const CNodeP
return false;
}
if (IsMultiOutputTensors(func_graph, gather_)) {
return lite::RET_OK;
return false;
}
return true;
}
@ -791,7 +791,9 @@ bool MulReduceFusion::CheckConcatOp(const FuncGraphPtr &func_graph, const CNodeP
}
auto concat_prim = GetCNodePrimitive(cnode);
MS_CHECK_TRUE_RET(concat_prim != nullptr, false);
concat_axis_ = concat_prim->GetAttr(ops::kAxis) == nullptr ? 0 : GetValue<int64_t>(concat_prim->GetAttr(ops::kAxis));
concat_axis_ = concat_prim->GetAttr(ops::kAxis) == nullptr
? 0
: static_cast<int>(GetValue<int64_t>(concat_prim->GetAttr(ops::kAxis)));
axis = axis < 0 ? axis + out_dims + 1 : axis;
MS_CHECK_TRUE_RET(axis >= 0 && axis <= out_dims, false);
concat_axis_ = concat_axis_ < 0 ? concat_axis_ + out_dims : concat_axis_;

View File

@ -56,7 +56,7 @@ class MulReduceFusion : public Pass {
int PostProcessSqueezeWithConcat(const FuncGraphPtr &func_graph, const CNodePtr &cnode);
int GenerateMatmul(const FuncGraphPtr &func_graph, const CNodePtr &cnode);
int GenerateSqueeze(const FuncGraphPtr &func_graph, const CNodePtr &cnode);
int ProcessGather(const FuncGraphPtr &func_graph);
int ProcessGather();
bool CheckBasicCond(const FuncGraphPtr &func_graph, const CNodePtr &cnode);
bool CheckAxisCond(const CNodePtr &cnode);
bool CheckShapeCond(const CNodePtr &cnode);

View File

@ -87,7 +87,7 @@ bool ReshapeReduceFusion::CheckCanFusion(const FuncGraphPtr &func_graph, const C
return false;
}
if (!keep_dim_) {
shape_.erase(shape_.begin() + axis_);
(void)shape_.erase(shape_.begin() + axis_);
}
return true;
}

View File

@ -111,11 +111,11 @@ STATUS NodeInferShape::InferShape(const CNodePtr &cnode) {
return lite::RET_ERROR;
}
std::vector<lite::Tensor *> inputs;
std::transform(inputs_ptr.begin(), inputs_ptr.end(), std::back_inserter(inputs),
[](const TensorPtr &input) { return input.get(); });
(void)std::transform(inputs_ptr.begin(), inputs_ptr.end(), std::back_inserter(inputs),
[](const TensorPtr &input) { return input.get(); });
std::vector<lite::Tensor *> outputs;
std::transform(outputs_ptr.begin(), outputs_ptr.end(), std::back_inserter(outputs),
[](const TensorPtr &output) { return output.get(); });
(void)std::transform(outputs_ptr.begin(), outputs_ptr.end(), std::back_inserter(outputs),
[](const TensorPtr &output) { return output.get(); });
auto ret = KernelInferShape(inputs, outputs, prim, {}, lite::SCHEMA_CUR);
if (ret == lite::RET_NOT_SUPPORT) {
auto parameter_gen = lite::PopulateRegistry::GetInstance()->GetParameterCreator(
@ -154,8 +154,8 @@ STATUS NodeInferShape::InferShape(const CNodePtr &cnode) {
MS_LOG(WARNING) << "infer shape failed.";
}
std::vector<int64_t> outputs_format;
std::transform(outputs.begin(), outputs.end(), std::back_inserter(outputs_format),
[](const lite::Tensor *output) { return output->format(); });
(void)std::transform(outputs.begin(), outputs.end(), std::back_inserter(outputs_format),
[](const lite::Tensor *output) { return output->format(); });
(void)anf_prim->AddAttr(kOutputsFormat, MakeValue(outputs_format));
return ret;
}

View File

@ -142,7 +142,7 @@ int SpecialNodePostProcess::HandleInstanceNorm(const FuncGraphPtr &func_graph, c
MS_CHECK_TRUE_RET(pre_transpose != nullptr, lite::RET_ERROR);
auto pre_trans_prim = GetValueNode<PrimitivePtr>(pre_transpose->input(0));
MS_CHECK_TRUE_RET(pre_trans_prim != nullptr, lite::RET_ERROR);
pre_trans_prim->AddAttr(ops::kFormat, MakeValue<int64_t>(mindspore::NHWC));
(void)pre_trans_prim->AddAttr(ops::kFormat, MakeValue<int64_t>(mindspore::NHWC));
auto abstract = GetCNodeInputAbstract(cnode, 1);
if (abstract != nullptr) {
auto shape = GenerateNewShape(abstract);
@ -155,8 +155,8 @@ int SpecialNodePostProcess::HandleInstanceNorm(const FuncGraphPtr &func_graph, c
MS_CHECK_TRUE_RET(post_transpose != nullptr, lite::RET_ERROR);
auto post_trans_prim = GetValueNode<PrimitivePtr>(post_transpose->input(0));
MS_CHECK_TRUE_RET(post_trans_prim != nullptr, lite::RET_ERROR);
post_trans_prim->AddAttr(ops::kFormat, MakeValue<int64_t>(mindspore::NCHW));
prim->AddAttr(ops::kFormat, MakeValue<int64_t>(mindspore::NCHW));
(void)post_trans_prim->AddAttr(ops::kFormat, MakeValue<int64_t>(mindspore::NCHW));
(void)prim->AddAttr(ops::kFormat, MakeValue<int64_t>(mindspore::NCHW));
abstract = cnode->abstract();
if (abstract != nullptr) {
post_transpose->set_abstract(abstract->Clone());