!22913 codex: variable-init memory-check

Merge pull request !22913 from hangq/master
This commit is contained in:
i-robot 2021-09-08 07:28:27 +00:00 committed by Gitee
commit 639aee53d6
11 changed files with 36 additions and 30 deletions

View File

@ -131,7 +131,7 @@ class FusionPattern {
// output PatternOp id of pattern
std::string outputOpId{};
bool hasError = false;
bool hasError{false};
};
} // namespace lite
} // namespace mindspore

View File

@ -324,7 +324,15 @@ int InferShapePass::CopyPartialShapeToSubGraph(const CNodeT *partial_node, MetaG
subgraph_input->dims = partial_input->dims;
subgraph_input->format = partial_input->format;
subgraph_input->data.resize(partial_input->data.size(), 0);
memcpy(subgraph_input->data.data(), partial_input->data.data(), partial_input->data.size());
if (partial_input->data.empty()) {
continue;
}
auto ret = memcpy_s(subgraph_input->data.data(), subgraph_input->data.size(), partial_input->data.data(),
partial_input->data.size());
if (ret != EOK) {
MS_LOG(ERROR) << "memcpy failed, ret: " << ret;
return RET_ERROR;
}
}
return RET_OK;
}

View File

@ -35,7 +35,7 @@ class SelectPass : public GraphPass {
STATUS RemoveSelectNodes();
private:
std::vector<uint32_t> select_indices_;
std::vector<uint32_t> select_indices_{};
schema::MetaGraphT *graph_ = nullptr;
};

View File

@ -71,7 +71,7 @@ STATUS ComputeDataToInt8(const std::unique_ptr<TensorT> &tensor, int32_t index)
tensor->data.clear();
MS_CHECK_FALSE_MSG(INT_MUL_OVERFLOW_THRESHOLD(wShapeSize, sizeof(int8_t), SIZE_MAX), RET_ERROR, "int mul overflow");
tensor->data.resize(wShapeSize * sizeof(int8_t));
if (memcpy_s(tensor->data.data(), wShapeSize * sizeof(int8_t), qDatas.data(), wShapeSize * sizeof(int8_t)) != EOK) {
if (memcpy_s(tensor->data.data(), tensor->data.size(), qDatas.data(), wShapeSize * sizeof(int8_t)) != EOK) {
MS_LOG(ERROR) << "memcpy_s failed";
return RET_ERROR;
}

View File

@ -86,9 +86,7 @@ ops::PrimitiveC *OnnxPReluParser::Parse(const onnx::GraphProto &onnx_graph, cons
slope.push_back(*slope_raw_data);
channel_shared = true;
} else {
slope.resize(slope_size);
if (memcpy_s(slope.data(), slope_data->raw_data().size(), slope_raw_data, slope_data->raw_data().size()) !=
EOK) {
if (memcpy_s(slope.data(), slope_size * sizeof(float), slope_raw_data, slope_data->raw_data().size()) != EOK) {
MS_LOG(ERROR) << "memcpy_s failed";
return nullptr;
}

View File

@ -56,7 +56,7 @@ STATUS AddAttrToInput(const FuncGraphPtr &func_graph, const CNodePtr &cnode, int
opt::BuildIntVecParameterNode(func_graph, value_data, cnode->fullname_with_scope() + "_" + attr_name);
inputs.push_back(param_node);
auto manager = func_graph->manager();
MS_ASSERT(manager != nullptr);
MS_CHECK_TRUE_MSG(manager != nullptr, RET_ERROR, "funcgraph has no manager");
auto tr = manager->Transact();
tr.AddEdge(cnode, param_node);
tr.Commit();
@ -90,7 +90,7 @@ STATUS ReplaceInt64ParameterNode(const FuncGraphPtr &func_graph, const Parameter
return lite::RET_OK;
}
auto manager = func_graph->manager();
MS_ASSERT(manager != nullptr);
MS_CHECK_TRUE_MSG(manager != nullptr, RET_ERROR, "funcgraph has no manager");
if (param_node->has_default()) {
auto default_value = param_node->default_param();
MS_ASSERT(default_value != nullptr);
@ -221,7 +221,7 @@ STATUS ReplaceTransposeWithGraphInput(const FuncGraphPtr &func_graph, const CNod
shape_vector.insert(shape_vector.begin() + 1, channel);
param_node->abstract()->set_shape(std::make_shared<abstract::Shape>(shape_vector));
auto manager = func_graph->manager();
MS_ASSERT(manager != nullptr);
MS_CHECK_TRUE_MSG(manager != nullptr, RET_ERROR, "funcgraph has no manager");
manager->Replace(cnode, param_node);
}
return lite::RET_OK;

View File

@ -59,7 +59,7 @@ class OnnxNodeParser {
static STATUS GetTensorDataFromOnnx(const onnx::TensorProto &onnx_tensor, std::vector<float> *value, int *type);
const std::string name_;
const std::string name_{};
private:
static int64_t opset_version_;

View File

@ -37,7 +37,7 @@ class OnnxNodeParserRegistry {
OnnxNodeParserRegistry();
private:
std::unordered_map<std::string, OnnxNodeParser *> parsers;
std::unordered_map<std::string, OnnxNodeParser *> parsers{};
};
class OnnxNodeRegistrar {

View File

@ -394,6 +394,10 @@ bool UpdateRatioWithPadStride(int64_t *ratio, size_t ratio_len, size_t split_siz
for (size_t i = 0; i < split_size; i++) {
total_block_count += ratio[i];
}
if (ratio_len < split_size) {
MS_LOG(ERROR) << "out of ratio range";
return false;
}
std::vector<int64_t> new_ratio(split_size);
int visited_block = 0;
@ -403,10 +407,6 @@ bool UpdateRatioWithPadStride(int64_t *ratio, size_t ratio_len, size_t split_siz
new_ratio[i + 1] = cur_border;
}
if (ratio_len < split_size) {
MS_LOG(ERROR) << "out of ratio range";
return false;
}
for (size_t i = 0; i < split_size; i++) {
ratio[i] = new_ratio[i];
}

View File

@ -34,16 +34,16 @@ using mindspore::schema::PrimitiveType;
namespace opt {
struct SplitInfo {
int64_t axis;
int64_t out_num;
std::vector<int64_t> size_splits;
std::vector<int64_t> extend_top;
std::vector<int64_t> extend_bottom;
std::vector<mindspore::lite::DeviceType> dev_types;
int64_t ori_split_axis_value;
int64_t in_num_conv;
int64_t fmk_type;
PrimitiveType primitive_type;
int64_t axis{0};
int64_t out_num{0};
std::vector<int64_t> size_splits{};
std::vector<int64_t> extend_top{};
std::vector<int64_t> extend_bottom{};
std::vector<mindspore::lite::DeviceType> dev_types{};
int64_t ori_split_axis_value{0};
int64_t in_num_conv{0};
int64_t fmk_type{0};
PrimitiveType primitive_type{schema::PrimitiveType_NONE};
};
typedef enum { CUT_N, CUT_H, CUT_W, CUT_C_IN, CUT_C_OUT, CUT_NONE } CuttingStragedy;

View File

@ -71,10 +71,10 @@ enum SplitMode {
};
struct SplitStrategy {
Strategys strategys;
std::vector<std::string> dev_types;
size_t dev_num;
SplitMode split_mode_;
Strategys strategys{};
std::vector<std::string> dev_types{};
size_t dev_num{0};
SplitMode split_mode_{NoSplit};
};
// this is a map for key: <primitive,is_depth_wise> value: parallel_op_name