forked from mindspore-Ecosystem/mindspore
!8726 modify static check to matser
From: @lyvette Reviewed-by: @hangangqiang,@zhanghaibo5 Signed-off-by: @hangangqiang
This commit is contained in:
commit
88fa121734
|
@ -36,13 +36,11 @@ class KernelRegistry {
|
|||
|
||||
static KernelRegistry *GetInstance();
|
||||
int Init();
|
||||
void FreeCreatorArray();
|
||||
virtual kernel::KernelCreator GetCreator(const kernel::KernelKey &desc);
|
||||
const kernel::KernelCreator *GetCreatorArrays();
|
||||
int GetCreatorFuncIndex(const kernel::KernelKey desc);
|
||||
void RegKernel(const kernel::KernelKey desc, kernel::KernelCreator creator);
|
||||
void RegKernel(const kernel::KERNEL_ARCH arch, const TypeId data_type, const schema::PrimitiveType type,
|
||||
kernel::KernelCreator creator);
|
||||
int GetCreatorFuncIndex(kernel::KernelKey desc);
|
||||
void RegKernel(kernel::KernelKey desc, kernel::KernelCreator creator);
|
||||
void RegKernel(kernel::KERNEL_ARCH arch, TypeId data_type, schema::PrimitiveType type, kernel::KernelCreator creator);
|
||||
bool Merge(const std::unordered_map<kernel::KernelKey, kernel::KernelCreator> &newCreators);
|
||||
kernel::LiteKernel *GetKernel(const std::vector<Tensor *> &in_tensors, const std::vector<Tensor *> &out_tensors,
|
||||
const PrimitiveC *primitive, const InnerContext *ctx, const kernel::KernelKey &key);
|
||||
|
@ -61,6 +59,7 @@ class KernelRegistrar {
|
|||
KernelRegistrar(const kernel::KernelKey &desc, kernel::KernelCreator creator) {
|
||||
KernelRegistry::GetInstance()->RegKernel(desc, creator);
|
||||
}
|
||||
~KernelRegistrar() = default;
|
||||
|
||||
KernelRegistrar(const kernel::KERNEL_ARCH arch, const TypeId data_type, const schema::PrimitiveType op_type,
|
||||
kernel::KernelCreator creator) {
|
||||
|
|
|
@ -23,6 +23,7 @@ int ConvertSubGraph(const schema::SubGraph &sub_graph, Model *model) {
|
|||
MS_LOG(ERROR) << "new subGraph fail!";
|
||||
return RET_ERROR;
|
||||
}
|
||||
MS_ASSERT(sub_graph.name() != nullptr);
|
||||
subgraph->name_ = sub_graph.name()->c_str();
|
||||
MS_ASSERT(sub_graph.inputIndices() != nullptr);
|
||||
auto in_count = sub_graph.inputIndices()->size();
|
||||
|
@ -68,6 +69,7 @@ const void *GetMetaGraphByVerison(const char *buf, const int &schema_version) {
|
|||
}
|
||||
|
||||
int GenerateModelByVersion(const void *meta_graph, Model *model, const int &schema_version) {
|
||||
MS_ASSERT(meta_graph != nullptr);
|
||||
MS_ASSERT(model != nullptr);
|
||||
int status = RET_ERROR;
|
||||
if (schema_version == SCHEMA_VERSION::SCHEMA_CUR) {
|
||||
|
|
|
@ -27,7 +27,7 @@ namespace mindspore {
|
|||
class ParamValueLite : public Value {
|
||||
public:
|
||||
ParamValueLite() : tensor_addr_(nullptr), tensor_size_(0) {}
|
||||
virtual ~ParamValueLite() {
|
||||
~ParamValueLite() override {
|
||||
if (tensor_addr_ != nullptr) {
|
||||
auto tensor_mem = reinterpret_cast<char *>(tensor_addr_);
|
||||
delete[](tensor_mem);
|
||||
|
|
|
@ -58,7 +58,7 @@ class DequantUtil {
|
|||
}
|
||||
}
|
||||
} else if (input_tensor->GetQuantParams().size() != kPerTensor) {
|
||||
size_t channels = static_cast<size_t>(input_tensor->Batch());
|
||||
auto channels = static_cast<size_t>(input_tensor->Batch());
|
||||
if (input_tensor->GetQuantParams().size() != channels) {
|
||||
MS_LOG(ERROR) << "Quant param not equal channel num " << input_tensor->GetQuantParams().size() << channels;
|
||||
free(dequant_datas);
|
||||
|
@ -136,6 +136,10 @@ class DequantUtil {
|
|||
|
||||
template <typename T1, typename T2>
|
||||
static void UnPackUtil(const schema::Tensor *input_tensor, int origin_bit, void *unpack_int_data) {
|
||||
if (input_tensor == nullptr || input_tensor->data() == nullptr) {
|
||||
MS_LOG(ERROR) << "tensor data is null";
|
||||
return;
|
||||
}
|
||||
auto weight_data = input_tensor->data()->data();
|
||||
int pack_size =
|
||||
input_tensor->dataType() == kNumberTypeInt8 ? input_tensor->data()->size() : input_tensor->data()->size() / 2;
|
||||
|
|
|
@ -848,6 +848,7 @@ ThreadPool *CreateThreadPool(int thread_num, int mode) {
|
|||
if (thread_pool->thread_list == NULL) {
|
||||
LOG_ERROR("create thread list failed");
|
||||
DestroyThreadPool(thread_pool);
|
||||
thread_pool = NULL;
|
||||
return NULL;
|
||||
}
|
||||
thread_pool->thread_list->head = NULL;
|
||||
|
|
|
@ -15,11 +15,7 @@
|
|||
*/
|
||||
|
||||
#include "tools/anf_importer/import_from_protobuf.h"
|
||||
|
||||
#include <fcntl.h>
|
||||
#include <unistd.h>
|
||||
|
||||
#include <fstream>
|
||||
#include <map>
|
||||
#include <memory>
|
||||
#include <stack>
|
||||
|
@ -243,7 +239,7 @@ int AnfImporterFromProtobuf::BuildParameterForFuncGraph(const ParameterPtr &node
|
|||
node->set_abstract(abstract_tensor);
|
||||
|
||||
if (default_para_map_.find(value_proto.name()) != default_para_map_.end()) {
|
||||
Tensor *tensor_info = new Tensor(kDefaultValueSwitchMap[tensor_typeproto.elem_type()], shape);
|
||||
auto *tensor_info = new Tensor(kDefaultValueSwitchMap[tensor_typeproto.elem_type()], shape);
|
||||
if (tensor_info == nullptr) {
|
||||
return RET_MEMORY_FAILED;
|
||||
}
|
||||
|
@ -435,7 +431,7 @@ bool AnfImporterFromProtobuf::GetAttrValueForCNode(const PrimitivePtr &prim, con
|
|||
}
|
||||
if (kParseTypeSwitchMap[type] == FORM_PARSE_SCALAR) {
|
||||
if (kv.size() == 1) {
|
||||
std::unordered_map<std::string, ValuePtr>::iterator iter = kv.begin();
|
||||
auto iter = kv.begin();
|
||||
prim->AddAttr(attr_name, iter->second);
|
||||
} else {
|
||||
auto res = ParserScalarAttrValue(ref_attr_name, kv);
|
||||
|
@ -459,7 +455,7 @@ bool AnfImporterFromProtobuf::ObtainValueNodeInTensorForm(const std::string &val
|
|||
param_value->set_tensor_shape(shape_vector);
|
||||
param_value->set_tensor_type(kDefaultValueSwitchMap[attr_tensor_type]);
|
||||
const std::string &tensor_buf = attr_tensor.raw_data();
|
||||
auto tensor_data = new (std::nothrow) char[tensor_buf.size()];
|
||||
auto tensor_data = new (std::nothrow) char[tensor_buf.size() + 1];
|
||||
if (tensor_data == nullptr) {
|
||||
MS_LOG(ERROR) << "Tensor_data is nullptr";
|
||||
return false;
|
||||
|
@ -648,14 +644,14 @@ CNodePtr AnfImporterFromProtobuf::BuildCNodeForFuncGraph(const FuncGraphPtr &out
|
|||
MS_LOG(ERROR) << "funcgraph new cnode failed";
|
||||
return nullptr;
|
||||
}
|
||||
if (0 == kv.size()) {
|
||||
if (kv.empty()) {
|
||||
AbstractBasePtrList elem;
|
||||
for (size_t index = 1; index < cnode_ptr->inputs().size(); ++index) {
|
||||
elem.push_back(cnode_ptr->input(index)->abstract());
|
||||
}
|
||||
cnode_ptr->set_abstract(std::make_shared<abstract::AbstractTuple>(elem));
|
||||
} else if (1 == kv.size()) {
|
||||
std::unordered_map<std::string, abstract::AbstractTensorPtr>::iterator iter = kv.begin();
|
||||
auto iter = kv.begin();
|
||||
cnode_ptr->set_abstract(iter->second);
|
||||
} else {
|
||||
auto abstract = ParserAttrShape(shape_ref_attr_name, kv);
|
||||
|
|
|
@ -57,17 +57,17 @@ class AnfImporterFromProtobuf : public AnfImporter {
|
|||
const schema::QuantType &quantType);
|
||||
bool BuildReturnForFuncGraph(const FuncGraphPtr &outputFuncGraph, const onnx::GraphProto &importProto,
|
||||
const CNodePtr &cnode_ptr);
|
||||
bool GetAttrValueForCNode(const PrimitivePtr &prim, const onnx::AttributeProto &attr_proto);
|
||||
bool ObtainCNodeAttrInTypeForm(const PrimitivePtr &prim, const std::string &attr_name,
|
||||
const onnx::TensorProto &attr_tensor);
|
||||
ValuePtr ObtainCNodeAttrInScalarForm(const onnx::TensorProto &attr_tensor);
|
||||
bool ObtainCNodeAttrInTensorForm(const PrimitivePtr &prim, const std::string &attr_name,
|
||||
const onnx::TensorProto &attr_tensor);
|
||||
static bool GetAttrValueForCNode(const PrimitivePtr &prim, const onnx::AttributeProto &attr_proto);
|
||||
static bool ObtainCNodeAttrInTypeForm(const PrimitivePtr &prim, const std::string &attr_name,
|
||||
const onnx::TensorProto &attr_tensor);
|
||||
static ValuePtr ObtainCNodeAttrInScalarForm(const onnx::TensorProto &attr_tensor);
|
||||
static bool ObtainCNodeAttrInTensorForm(const PrimitivePtr &prim, const std::string &attr_name,
|
||||
const onnx::TensorProto &attr_tensor);
|
||||
bool BuildValueNodeForFuncGraph(const onnx::NodeProto &node_proto);
|
||||
bool ObtainValueNodeInTensorForm(const string &value_node_name, const onnx::TensorProto &attr_tensor);
|
||||
bool GetAttrValueForValueNode(const std::string &value_node_name, const onnx::AttributeProto &attr_proto);
|
||||
bool ObtainValueNodeInTypeForm(const string &value_node_name, const onnx::TensorProto &attr_tensor);
|
||||
std::unordered_map<std::string, abstract::AbstractTensorPtr> GetAbstractForCNode(
|
||||
static std::unordered_map<std::string, abstract::AbstractTensorPtr> GetAbstractForCNode(
|
||||
const onnx::AttributeProto &attr_proto);
|
||||
|
||||
private:
|
||||
|
|
|
@ -147,7 +147,6 @@ STATUS MulAddFusionPass::AddNewScaleNode(MetaGraphT *graph, const std::unique_pt
|
|||
scaleParam->axis = 0 - shape_size;
|
||||
mulNode->inputIndex.push_back(addBiasIndex);
|
||||
MS_ASSERT(addNode->primitive != nullptr);
|
||||
MS_ASSERT(addNode->primitive->value != nullptr);
|
||||
MS_ASSERT(addNode->primitive->value.AsAdd() != nullptr);
|
||||
auto activationType = addNode->primitive->value.AsAdd()->activationType;
|
||||
if (activationType == ActivationType_RELU || activationType == ActivationType_RELU6 ||
|
||||
|
@ -163,7 +162,6 @@ STATUS MulAddFusionPass::AddNewScaleNode(MetaGraphT *graph, const std::unique_pt
|
|||
// repace addnode as activation
|
||||
std::unique_ptr<ActivationT> activationParam(new ActivationT());
|
||||
MS_ASSERT(addNode->primitive != nullptr);
|
||||
MS_ASSERT(addNode->primitive->value != nullptr);
|
||||
MS_ASSERT(addNode->primitive->value.AsAdd() != nullptr);
|
||||
activationParam->type = addNode->primitive->value.AsAdd()->activationType;
|
||||
addNode->primitive->value.type = schema::PrimitiveType_Activation;
|
||||
|
|
|
@ -19,8 +19,7 @@
|
|||
#include <memory>
|
||||
#include <vector>
|
||||
|
||||
namespace mindspore {
|
||||
namespace lite {
|
||||
namespace mindspore::lite {
|
||||
constexpr int32_t kSingleGroup = 1;
|
||||
bool OnnxConvParser::ParseGroupConvolution(const std::unique_ptr<schema::Conv2DT> &attr, schema::CNodeT *op) {
|
||||
MS_LOG(DEBUG) << "onnx DepthwiseConvParser";
|
||||
|
@ -140,6 +139,7 @@ STATUS OnnxConvParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::Nod
|
|||
}
|
||||
std::vector<int> weight_shape;
|
||||
auto size = (*nodeIter).dims_size();
|
||||
weight_shape.reserve(size);
|
||||
for (int i = 0; i < size; ++i) {
|
||||
weight_shape.emplace_back((*nodeIter).dims(i));
|
||||
}
|
||||
|
@ -157,7 +157,6 @@ STATUS OnnxConvParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::Nod
|
|||
auto iter = std::find_if((*nodeIter).attribute().begin(), (*nodeIter).attribute().end(),
|
||||
[](const onnx::AttributeProto &attr) { return attr.name() == "shape"; });
|
||||
if (iter != (*nodeIter).attribute().end()) {
|
||||
MS_ASSERT(iter->ints() != nullptr);
|
||||
MS_ASSERT(iter->ints().begin() != nullptr);
|
||||
MS_ASSERT(iter->ints().end() != nullptr);
|
||||
dims.insert(dims.begin(), iter->ints().begin(), iter->ints().end());
|
||||
|
@ -188,5 +187,4 @@ OnnxNodeRegistrar g_onnxConvParser("Conv", new OnnxConvParser());
|
|||
OnnxNodeRegistrar g_onnxInt8ConvParser("Int8Conv", new OnnxConvParser());
|
||||
OnnxNodeRegistrar g_onnxConvReluParser("ConvRelu", new OnnxConvParser());
|
||||
OnnxNodeRegistrar g_onnxInt8ConvReluParser("Int8ConvRelu", new OnnxConvParser());
|
||||
} // namespace lite
|
||||
} // namespace mindspore
|
||||
} // namespace mindspore::lite
|
||||
|
|
|
@ -31,7 +31,7 @@ class OnnxConvParser : public OnnxNodeParser {
|
|||
STATUS Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node, schema::CNodeT *op) override;
|
||||
|
||||
private:
|
||||
bool ParseGroupConvolution(const std::unique_ptr<schema::Conv2DT> &attr, schema::CNodeT *op);
|
||||
static bool ParseGroupConvolution(const std::unique_ptr<schema::Conv2DT> &attr, schema::CNodeT *op);
|
||||
};
|
||||
} // namespace lite
|
||||
} // namespace mindspore
|
||||
|
|
|
@ -17,8 +17,7 @@
|
|||
#include "tools/converter/parser/onnx/onnx_lp_norm_parser.h"
|
||||
#include <memory>
|
||||
|
||||
namespace mindspore {
|
||||
namespace lite {
|
||||
namespace mindspore::lite {
|
||||
STATUS OnnxLpNormParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node,
|
||||
schema::CNodeT *op) {
|
||||
MS_LOG(DEBUG) << "onnx LpNormParser";
|
||||
|
@ -38,13 +37,12 @@ STATUS OnnxLpNormParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::N
|
|||
return RET_NULL_PTR;
|
||||
}
|
||||
|
||||
auto onnx_node_attr = onnx_node.attribute();
|
||||
for (int i = 0; i < onnx_node_attr.size(); ++i) {
|
||||
MS_ASSERT(onnx_node_attr.at(i) != nullptr);
|
||||
if (onnx_node_attr.at(i).name() == "axis") {
|
||||
attr->axis = onnx_node_attr.at(i).i();
|
||||
} else if (onnx_node_attr.at(i).name() == "p") {
|
||||
attr->p = onnx_node_attr.at(i).i();
|
||||
for (const auto &onnx_node_attr : onnx_node.attribute()) {
|
||||
const auto &attribute_name = onnx_node_attr.name();
|
||||
if (attribute_name == "axis") {
|
||||
attr->axis = onnx_node_attr.i();
|
||||
} else if (attribute_name == "p") {
|
||||
attr->p = onnx_node_attr.i();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -54,5 +52,4 @@ STATUS OnnxLpNormParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::N
|
|||
}
|
||||
|
||||
OnnxNodeRegistrar g_onnxLpNormParser("LpNormalization", new OnnxLpNormParser());
|
||||
} // namespace lite
|
||||
} // namespace mindspore
|
||||
} // namespace mindspore::lite
|
||||
|
|
|
@ -17,8 +17,7 @@
|
|||
#include "tools/converter/parser/onnx/onnx_lrn_parser.h"
|
||||
#include <memory>
|
||||
|
||||
namespace mindspore {
|
||||
namespace lite {
|
||||
namespace mindspore::lite {
|
||||
STATUS OnnxLrnParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node, schema::CNodeT *op) {
|
||||
MS_LOG(DEBUG) << "onnx LrnParser";
|
||||
if (op == nullptr) {
|
||||
|
@ -37,18 +36,17 @@ STATUS OnnxLrnParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::Node
|
|||
return RET_NULL_PTR;
|
||||
}
|
||||
|
||||
auto onnx_node_attr = onnx_node.attribute();
|
||||
int32_t size = 0;
|
||||
for (int i = 0; i < onnx_node_attr.size(); ++i) {
|
||||
MS_ASSERT(onnx_node_attr.at(i) != nullptr);
|
||||
if (onnx_node_attr.at(i).name() == "alpha") {
|
||||
attr->alpha = onnx_node_attr.at(i).f();
|
||||
} else if (onnx_node_attr.at(i).name() == "beta") {
|
||||
attr->beta = onnx_node_attr.at(i).f();
|
||||
} else if (onnx_node_attr.at(i).name() == "bias") {
|
||||
attr->bias = onnx_node_attr.at(i).f();
|
||||
} else if (onnx_node_attr.at(i).name() == "size") {
|
||||
size = static_cast<int32_t>(onnx_node_attr.at(i).i());
|
||||
for (const auto &onnx_node_attr : onnx_node.attribute()) {
|
||||
const auto &attribute_name = onnx_node_attr.name();
|
||||
if (attribute_name == "alpha") {
|
||||
attr->alpha = onnx_node_attr.f();
|
||||
} else if (attribute_name == "beta") {
|
||||
attr->beta = onnx_node_attr.f();
|
||||
} else if (attribute_name == "bias") {
|
||||
attr->bias = onnx_node_attr.f();
|
||||
} else if (attribute_name == "size") {
|
||||
size = static_cast<int32_t>(onnx_node_attr.i());
|
||||
attr->depth_radius = size / 2;
|
||||
}
|
||||
}
|
||||
|
@ -66,5 +64,4 @@ STATUS OnnxLrnParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::Node
|
|||
|
||||
OnnxNodeRegistrar g_onnxLrnxParser("Lrn", new OnnxLrnParser());
|
||||
OnnxNodeRegistrar g_onnxLRNxParser("LRN", new OnnxLrnParser());
|
||||
} // namespace lite
|
||||
} // namespace mindspore
|
||||
} // namespace mindspore::lite
|
||||
|
|
|
@ -192,22 +192,24 @@ class Calibrator {
|
|||
|
||||
STATUS AddQuantizedOp(const CNodePtr &node);
|
||||
|
||||
STATUS RecordMaxValue(const std::vector<float> &data, const std::unique_ptr<DivergInfo> &diverg_info);
|
||||
static STATUS RecordMaxValue(const std::vector<float> &data, const std::unique_ptr<DivergInfo> &diverg_info);
|
||||
|
||||
STATUS UpdateDivergInverval(std::unordered_map<std::string, std::vector<std::unique_ptr<DivergInfo>>> *diverg_info);
|
||||
static STATUS UpdateDivergInverval(
|
||||
std::unordered_map<std::string, std::vector<std::unique_ptr<DivergInfo>>> *diverg_info);
|
||||
|
||||
STATUS UpdateDataFrequency(const std::vector<float> &data, const std::unique_ptr<DivergInfo> &diverg_info);
|
||||
static STATUS UpdateDataFrequency(const std::vector<float> &data, const std::unique_ptr<DivergInfo> &diverg_info);
|
||||
void Dump();
|
||||
|
||||
STATUS ComputeThreshold();
|
||||
|
||||
std::unordered_map<CNodePtr, float> GetScale(
|
||||
static std::unordered_map<CNodePtr, float> GetScale(
|
||||
std::unordered_map<std::string, std::unique_ptr<DivergInfo>> *diverg_info);
|
||||
|
||||
std::unordered_map<CNodePtr, int32_t> GetZeropoint(
|
||||
static std::unordered_map<CNodePtr, int32_t> GetZeropoint(
|
||||
std::unordered_map<std::string, std::unique_ptr<DivergInfo>> *diverg_info);
|
||||
|
||||
std::map<CNodePtr, MaxMin> GetMinMax(std::unordered_map<std::string, std::unique_ptr<DivergInfo>> *diverg_info);
|
||||
static std::map<CNodePtr, MaxMin> GetMinMax(
|
||||
std::unordered_map<std::string, std::unique_ptr<DivergInfo>> *diverg_info);
|
||||
|
||||
std::unordered_map<std::string, std::vector<std::unique_ptr<DivergInfo>>> *GetInputDivergInfo();
|
||||
|
||||
|
|
|
@ -110,8 +110,9 @@ ParameterPtr CreateNewParamter(const FuncGraphPtr &func_graph, Tensor *tensor) {
|
|||
parameter->set_default_param(param_value);
|
||||
return parameter;
|
||||
}
|
||||
kernel::LiteKernel *GetLiteKernel(std::vector<Tensor *> inputs, std::vector<Tensor *> outputs, OpParameter *parameter,
|
||||
lite::InnerContext *context, mindspore::lite::PrimitiveC *primitive) {
|
||||
kernel::LiteKernel *GetLiteKernel(std::vector<Tensor *> inputs, const std::vector<Tensor *> &outputs,
|
||||
OpParameter *parameter, lite::InnerContext *context,
|
||||
mindspore::lite::PrimitiveC *primitive) {
|
||||
MS_ASSERT(nullptr != lite_primitive);
|
||||
auto data_type = inputs.front()->data_type();
|
||||
kernel::KernelKey desc{kernel::KERNEL_ARCH::kCPU, data_type, (schema::PrimitiveType)primitive->Type()};
|
||||
|
@ -163,15 +164,15 @@ lite::STATUS ReplaceCNode(const FuncGraphPtr &func_graph, const CNodePtr &any_no
|
|||
} // namespace
|
||||
void FreeTensors(std::vector<Tensor *> *input_tensor, std::vector<Tensor *> *output_tensor) {
|
||||
if (input_tensor != nullptr) {
|
||||
for (size_t i = 0; i < input_tensor->size(); i++) {
|
||||
delete (*input_tensor)[i];
|
||||
(*input_tensor)[i] = nullptr;
|
||||
for (auto &i : *input_tensor) {
|
||||
delete i;
|
||||
i = nullptr;
|
||||
}
|
||||
}
|
||||
if (output_tensor != nullptr) {
|
||||
for (size_t i = 0; i < output_tensor->size(); i++) {
|
||||
delete (*output_tensor)[i];
|
||||
(*output_tensor)[i] = nullptr;
|
||||
for (auto &i : *output_tensor) {
|
||||
delete i;
|
||||
i = nullptr;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -231,9 +232,9 @@ const AnfNodePtr ConstFoldPass::Process(const FuncGraphPtr &func_graph, const An
|
|||
// here, input_tensor's format need to be transposed nhwc according to fmkType,
|
||||
// but for the time being, we only transpose the tensor with 0/1/2/3D.
|
||||
// Others should be added in future.
|
||||
for (size_t j = 0; j < input_tensors.size(); ++j) {
|
||||
input_tensors[j]->SetFormat(schema::Format::Format_NHWC);
|
||||
if (input_tensors[j]->shape().size() == 4) {
|
||||
for (auto &input_tensor : input_tensors) {
|
||||
input_tensor->SetFormat(schema::Format::Format_NHWC);
|
||||
if (input_tensor->shape().size() == 4) {
|
||||
MS_LOG(INFO) << "init input_tensor format to nhwc";
|
||||
}
|
||||
}
|
||||
|
|
|
@ -233,14 +233,11 @@ const void ConvTransformFusion::CalNewWeightTensor(float *weight_data, int kerne
|
|||
delete[] tmp_weight_data;
|
||||
return;
|
||||
}
|
||||
|
||||
if (tmp_weight_data != nullptr) {
|
||||
delete[] tmp_weight_data;
|
||||
}
|
||||
delete[] tmp_weight_data;
|
||||
}
|
||||
|
||||
const void ConvTransformFusion::CalNewBiasTensor(float *bias_data, int kernel_num, bool bias_flag,
|
||||
const float *trans_scale, const float *trans_bias) const {
|
||||
const float *trans_scale, const float *trans_bias) {
|
||||
MS_ASSERT(bias_data != nullptr);
|
||||
if (bias_flag) {
|
||||
auto tmp_bias_data = new (std::nothrow) float[kernel_num];
|
||||
|
|
|
@ -31,7 +31,7 @@ class ConvTransformFusion : public PatternProcessPass {
|
|||
virtual const void InitTransParam(const CNodePtr &, int, float *, float *) const = 0;
|
||||
const void GenNewConvTensor(const FuncGraphPtr &, const CNodePtr &, int, const float *, const float *) const;
|
||||
const void CalNewWeightTensor(float *, int, int, const float *) const;
|
||||
const void CalNewBiasTensor(float *, int, bool, const float *, const float *) const;
|
||||
static const void CalNewBiasTensor(float *, int, bool, const float *, const float *);
|
||||
};
|
||||
} // namespace mindspore::opt
|
||||
#endif // MINDSPORE_LITE_SRC_PASS_FUSION_CONV_TRANSFORM_FUSION_H_
|
||||
|
|
|
@ -79,7 +79,6 @@ bool RemoveUnusedTransposeOpPass::Run(const FuncGraphPtr &func_graph) {
|
|||
MS_LOG(ERROR) << "Transpose node of onnx need to removed which has not primitiveT";
|
||||
return RET_ERROR;
|
||||
}
|
||||
MS_ASSERT(primT->value != nullptr);
|
||||
MS_ASSERT(primT->value.AsTranspose() != nullptr);
|
||||
std::vector<int32_t> perm = primT->value.AsTranspose()->perm;
|
||||
if (perm == kPermNHWC) {
|
||||
|
|
Loading…
Reference in New Issue