forked from mindspore-Ecosystem/mindspore
add nullptr check
This commit is contained in:
parent
81833943ba
commit
e6e3956c84
|
@ -36,7 +36,7 @@ void AnfTransform::SetGraphDef(schema::MetaGraphT *_dstDef) { graphDefT = _dstDe
|
|||
FuncGraphPtr AnfTransform::Transform(const FuncGraphPtr &old_graph) {
|
||||
// return old_graph;
|
||||
auto optimizer = std::make_shared<opt::GraphOptimizer>();
|
||||
auto pm = std::make_shared<opt::PassManager>();
|
||||
auto pm = std::make_shared<opt::PassManager>("anf fusion pass manager", false);
|
||||
pm->AddPass(std::make_shared<opt::ConvBiasaddFusion>());
|
||||
pm->AddPass(std::make_shared<opt::ConvBatchNormFusion>());
|
||||
pm->AddPass(std::make_shared<opt::ConvScaleFusion>());
|
||||
|
|
|
@ -327,7 +327,15 @@ schema::PrimitiveType GetCNodeType(const BaseRef &n) {
|
|||
}
|
||||
|
||||
bool IsParamNode(const BaseRef &n) {
|
||||
return utils::isa<ParameterPtr>(n);
|
||||
if (!utils::isa<ParameterPtr>(n)) {
|
||||
return false;
|
||||
}
|
||||
auto param = utils::cast<ParameterPtr>(n)->default_param();
|
||||
auto tensor = std::dynamic_pointer_cast<ParamValueLite>(param);
|
||||
if (tensor == nullptr) {
|
||||
return false;
|
||||
}
|
||||
return tensor->tensor_addr() != nullptr;
|
||||
}
|
||||
|
||||
bool IsConvNode(const BaseRef &n) {
|
||||
|
|
|
@ -28,6 +28,8 @@
|
|||
|
||||
namespace mindspore {
|
||||
namespace opt {
|
||||
static size_t count = 0;
|
||||
constexpr size_t kMaxRepassTimes = 9;
|
||||
const std::vector<PassPtr> &PassManager::Passes() const { return passes_; }
|
||||
|
||||
void PassManager::AddPass(const PassPtr &pass) {
|
||||
|
@ -79,9 +81,11 @@ bool PassManager::Run(const FuncGraphPtr &func_graph) const {
|
|||
while (change) {
|
||||
change = Run(func_graph, passes_);
|
||||
changed = change || changed;
|
||||
if (run_only_once_) {
|
||||
if (run_only_once_ || count > kMaxRepassTimes) {
|
||||
break;
|
||||
}
|
||||
count++;
|
||||
MS_LOG(INFO) << "Run pass counts:" << count;
|
||||
}
|
||||
return changed;
|
||||
}
|
||||
|
|
|
@ -45,12 +45,20 @@ const std::vector<Tensor *> GetCNodeInputTensors(const CNodePtr &CNode) {
|
|||
auto tensor_shape = tensorT->dims;
|
||||
auto lite_tensor =
|
||||
new(std::nothrow)Tensor(TypeId(tensorT->dataType), tensor_shape, tensorT->format, tensorT->nodeType);
|
||||
if (lite_tensor == nullptr) {
|
||||
MS_LOG(ERROR) << "lite tensor is nullptr";
|
||||
return input_tensors;
|
||||
}
|
||||
auto lite_tensor_size = tensorT->data.size() * sizeof(uint8_t);
|
||||
// when tensorT as graph input
|
||||
if (lite_tensor_size == 0) {
|
||||
return input_tensors;
|
||||
}
|
||||
auto tensor_data = new(std::nothrow)char[lite_tensor_size / sizeof(char)];
|
||||
if (tensor_data == nullptr) {
|
||||
MS_LOG(ERROR) << "tensor_data is nullptr";
|
||||
return input_tensors;
|
||||
}
|
||||
auto ret = memcpy_s(tensor_data, lite_tensor_size, tensorT->data.data(), lite_tensor_size);
|
||||
if (ret != EOK) {
|
||||
MS_LOG(EXCEPTION) << "memcpy error: " << ret;
|
||||
|
@ -97,6 +105,10 @@ const ParameterPtr CreateNewParamter(const FuncGraphPtr &func_graph, Tensor *ten
|
|||
if (tensor->Data() != nullptr) {
|
||||
auto size = tensor->ElementsNum();
|
||||
auto tensor_data = new (std::nothrow) float[size];
|
||||
if (tensor_data == nullptr) {
|
||||
MS_LOG(ERROR) << "tensor_data is nullptr";
|
||||
return nullptr;
|
||||
}
|
||||
auto ret = memcpy_s(tensor_data, size * sizeof(float), tensor->Data(), size * sizeof(float));
|
||||
if (ret != EOK) {
|
||||
MS_LOG(EXCEPTION) << "memcpy error: " << ret;
|
||||
|
@ -150,11 +162,15 @@ const AnfNodePtr ConstFoldPass::Process(const FuncGraphPtr &func_graph, const An
|
|||
std::vector<Tensor *> output_tensors{output_nums, new Tensor()};
|
||||
auto scheam_primitive = PackPrimitiveT(input_cnode);
|
||||
auto lite_primitive = lite::Primitive::CreatePrimitive(scheam_primitive);
|
||||
if (lite_primitive == nullptr) {
|
||||
MS_LOG(DEBUG) << "constant_folding schedule node lite primitive nullptr";
|
||||
return nullptr;
|
||||
}
|
||||
lite_primitive->InferShape(input_tensors, output_tensors);
|
||||
auto lite_kernel = GetLiteKernel(input_tensors, output_tensors, lite_primitive);
|
||||
if (lite_kernel == nullptr) {
|
||||
MS_LOG(ERROR) << "constant_folding schedule node lite kernel nullptr";
|
||||
return any_node;
|
||||
MS_LOG(DEBUG) << "constant_folding schedule node lite kernel nullptr";
|
||||
return nullptr;
|
||||
}
|
||||
auto ret = lite_kernel->Run();
|
||||
if (0 != ret) {
|
||||
|
|
|
@ -83,7 +83,11 @@ void GenConvNewBias(const FuncGraphPtr &func_graph, const CNodePtr &conv_node, c
|
|||
if (kernel_nums <= 0) {
|
||||
MS_LOG(EXCEPTION) << "kernel num less than 0";
|
||||
}
|
||||
auto add_bias_data = new (std::nothrow) float[kernel_nums];
|
||||
auto add_bias_data = new(std::nothrow) float[kernel_nums];
|
||||
if (add_bias_data == nullptr) {
|
||||
MS_LOG(ERROR) << "tensor_data is nullptr";
|
||||
return;
|
||||
}
|
||||
auto bias_add_weight = bias_node->input(kAddWEIGHTINDEX);
|
||||
CheckIfNodeIsParam(bias_add_weight);
|
||||
auto add_weight_param = bias_add_weight->cast<ParameterPtr>()->default_param();
|
||||
|
@ -140,7 +144,7 @@ const AnfNodePtr ConvBiasaddFusion::Process(const FuncGraphPtr &func_graph, cons
|
|||
AnfNodePtr conv_node_anf = add_node->input(1);
|
||||
CheckIfAnfNodeIsNull(conv_node_anf);
|
||||
if (IsMultiOutputTensors(func_graph, conv_node_anf)) {
|
||||
return add_node;
|
||||
return nullptr;
|
||||
}
|
||||
auto conv_node = conv_node_anf->cast<CNodePtr>();
|
||||
CheckIfCNodeIsNull(conv_node);
|
||||
|
|
|
@ -67,7 +67,7 @@ const AnfNodePtr ConvTransformFusion::Process(const FuncGraphPtr &func_graph, co
|
|||
auto pre_node = transform_node->input(1);
|
||||
auto conv_node = pre_node->cast<CNodePtr>();
|
||||
if (IsMultiOutputTensors(func_graph, conv_node)) {
|
||||
return transform_node;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
auto abstr = transform_node->abstract();
|
||||
|
@ -76,8 +76,16 @@ const AnfNodePtr ConvTransformFusion::Process(const FuncGraphPtr &func_graph, co
|
|||
MS_LOG(ERROR) << "Unsupported conv node, " << conv_node->DebugString();
|
||||
return node;
|
||||
}
|
||||
auto trans_scale = new (std::nothrow) float[kernel_nums];
|
||||
auto trans_bias = new (std::nothrow) float[kernel_nums];
|
||||
auto trans_scale = new(std::nothrow) float[kernel_nums];
|
||||
if (trans_scale == nullptr) {
|
||||
MS_LOG(ERROR) << "tensor_data is nullptr";
|
||||
return nullptr;
|
||||
}
|
||||
auto trans_bias = new(std::nothrow) float[kernel_nums];
|
||||
if (trans_bias == nullptr) {
|
||||
MS_LOG(ERROR) << "tensor_data is nullptr";
|
||||
return nullptr;
|
||||
}
|
||||
GenTransParam(transform_node, kernel_nums, trans_scale, trans_bias);
|
||||
GenNewConvTensor(func_graph, conv_node, kernel_nums, trans_scale, trans_bias);
|
||||
delete[] trans_bias;
|
||||
|
@ -155,7 +163,11 @@ const void ConvTransformFusion::GenNewConvTensor(const FuncGraphPtr &func_graph,
|
|||
bias_data = reinterpret_cast<float *>(bias_tensor->tensor_addr());
|
||||
bias_flag = true;
|
||||
} else {
|
||||
bias_data = new (std::nothrow) float[kernel_num];
|
||||
bias_data = new(std::nothrow) float[kernel_num];
|
||||
if (trans_scale == nullptr) {
|
||||
MS_LOG(ERROR) << "tensor_data is nullptr";
|
||||
return;
|
||||
}
|
||||
}
|
||||
CalNewBiasTensor(bias_data, kernel_num, bias_flag, trans_scale, trans_bias);
|
||||
if (!bias_flag) {
|
||||
|
@ -193,7 +205,11 @@ const void ConvTransformFusion::CalNewBiasTensor(float *bias_data, int kernel_nu
|
|||
const float *trans_scale, const float *trans_bias) const {
|
||||
MS_ASSERT(bias_data != nullptr);
|
||||
if (bias_flag) {
|
||||
auto tmp_bias_data = new (std::nothrow) float[kernel_num];
|
||||
auto tmp_bias_data = new(std::nothrow) float[kernel_num];
|
||||
if (tmp_bias_data == nullptr) {
|
||||
MS_LOG(ERROR) << "tensor_data is nullptr";
|
||||
return;
|
||||
}
|
||||
if (EOK != memset_s(tmp_bias_data, kernel_num * sizeof(float), 0, kernel_num * sizeof(float))) {
|
||||
MS_LOG(EXCEPTION) << "memset bias data failed";
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue