forked from mindspore-Ecosystem/mindspore
modify for static check
This commit is contained in:
parent
2cd48d8cc9
commit
8bc9170255
|
@ -217,8 +217,6 @@ void ColMajor2Row8MajorFp16(void *src_ptr, float16_t *dst_ptr, size_t row, size_
|
|||
|
||||
void MatMul16x8(const float16_t *a, const float16_t *b, float16_t *dst, const float16_t *bias, ActType act_type,
|
||||
int deep, int row, int col, int stride, bool write_nhwc) {
|
||||
// int row_16 = UP_ROUND(row, C16NUM);
|
||||
// int col_8 = UP_ROUND(col, C8NUM);
|
||||
if (write_nhwc) {
|
||||
/* col16-major * row8-major => col-major */
|
||||
for (int r = 0; r < row; r++) {
|
||||
|
@ -257,24 +255,6 @@ void MatMul16x8(const float16_t *a, const float16_t *b, float16_t *dst, const fl
|
|||
dst[ci] = value;
|
||||
}
|
||||
}
|
||||
// /* col16-major * row8-major => row16x8-major */
|
||||
// for (int r = 0; r < row_16; r++) {
|
||||
// for (int c = 0; c < col_8; c++) {
|
||||
// int r16div = r / C16NUM, r16mod = r % C16NUM;
|
||||
// int c8div = c / C8NUM, c8mod = c % C8NUM;
|
||||
// size_t ci = c8div * row_16 * C8NUM + r * C8NUM + c8mod;
|
||||
// float16_t value = 0;
|
||||
// for (int d = 0; d < deep; d++) {
|
||||
// size_t ai = r16div * deep * C16NUM + d * C16NUM + r16mod;
|
||||
// size_t bi = c8div * deep * C8NUM + d * C8NUM + c8mod;
|
||||
// value = value + a[ai] * b[bi];
|
||||
// }
|
||||
// if (bias != NULL) value += bias[col];
|
||||
// if (act_type == ActType_Relu6) value = MSMIN(6.0f, value);
|
||||
// if (act_type != ActType_No) value = MSMAX(0.0f, value);
|
||||
// dst[ci] = value;
|
||||
// }
|
||||
// }
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -108,7 +108,7 @@ int CompareOutputData(const float *output_data, size_t output_size, const float
|
|||
}
|
||||
|
||||
int CompareOutput(const float *output_data, size_t output_num, std::string file_path) {
|
||||
size_t ground_truth_size;
|
||||
size_t ground_truth_size = 0;
|
||||
auto ground_truth = reinterpret_cast<float *>(mindspore::lite::ReadFile(file_path.c_str(), &ground_truth_size));
|
||||
size_t ground_truth_num = ground_truth_size / sizeof(float);
|
||||
printf("ground truth num : %zu\n", ground_truth_num);
|
||||
|
|
|
@ -54,7 +54,7 @@ int CompareRelativeOutput(const float *output_data, std::string file_path) {
|
|||
}
|
||||
|
||||
float RelativeOutputError(const float *output_data, std::string file_path) {
|
||||
size_t output_size;
|
||||
size_t output_size = 0;
|
||||
auto ground_truth = reinterpret_cast<float *>(mindspore::lite::ReadFile(file_path.c_str(), &output_size));
|
||||
size_t output_num = output_size / sizeof(float);
|
||||
float error = CompareOutputRelativeData(output_data, ground_truth, output_num);
|
||||
|
|
|
@ -21,7 +21,6 @@
|
|||
#include "nnacl/fp32_grad/pooling_grad.h"
|
||||
#include "include/errorcode.h"
|
||||
#include "src/runtime/runtime_api.h"
|
||||
// #include "src/train/ops/train_ops.h"
|
||||
|
||||
using mindspore::kernel::KERNEL_ARCH::kCPU;
|
||||
using mindspore::lite::KernelRegistrar;
|
||||
|
|
|
@ -35,7 +35,7 @@ class ParallelExecutor : public Executor {
|
|||
int Run(std::vector<Tensor *> &in_tensors, std::vector<Tensor *> &out_tensors,
|
||||
std::vector<kernel::LiteKernel *> &kernels, Allocator *allocator = nullptr,
|
||||
const KernelCallBack &before = nullptr, const KernelCallBack &after = nullptr) override;
|
||||
inline kernel::LiteKernel *GetReadyKernel(const int index) { return readyKernels.at(index); }
|
||||
inline kernel::LiteKernel *GetReadyKernel(const int index) const { return readyKernels.at(index); }
|
||||
inline void SetResult(const int index, const int result) { results.at(index) = result; }
|
||||
|
||||
private:
|
||||
|
|
|
@ -39,7 +39,7 @@ void *LiteBackendAllocWorkspace(int deviceType, int deviceId, uint64_t size, int
|
|||
return p->AllocWorkSpaceMem(size);
|
||||
}
|
||||
|
||||
int LiteBackendFreeWorkspace(int deviceType, int deviceId, void *ptr) {
|
||||
int LiteBackendFreeWorkspace(int deviceType, int deviceId, const void *ptr) {
|
||||
std::lock_guard<std::mutex> lock(gWorkspaceMutex);
|
||||
auto p = mindspore::predict::WorkspacePool::GetInstance();
|
||||
if (p == nullptr) {
|
||||
|
|
|
@ -38,7 +38,7 @@ INTERNAL_API_DLL ThreadPool *CreateLiteThreadPool(int thread_num, int mode);
|
|||
INTERNAL_API_DLL void LiteAPISetLastError(const char *msg);
|
||||
INTERNAL_API_DLL void *LiteBackendAllocWorkspace(int deviceType, int deviceId, uint64_t size, int dtypeCode,
|
||||
int dtypeBits);
|
||||
INTERNAL_API_DLL int LiteBackendFreeWorkspace(int deviceType, int deviceId, void *ptr);
|
||||
INTERNAL_API_DLL int LiteBackendFreeWorkspace(int deviceType, int deviceId, const void *ptr);
|
||||
INTERNAL_API_DLL int LiteBackendRegisterSystemLibSymbol(const char *name, void *ptr);
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
|
@ -371,9 +371,9 @@ bool AnfImporterFromProtobuf::ObtainCNodeAttrInTensorForm(const PrimitivePtr &pr
|
|||
prim->set_attr(attr_name, MakeValue<double>(attr_value));
|
||||
} else if (attr_tensor_type == onnx::TensorProto_DataType_INT64) {
|
||||
size_t data_size = sizeof(int64_t);
|
||||
int32_t attr_value = 0;
|
||||
int64_t attr_value = 0;
|
||||
ret = memcpy_s(&attr_value, data_size, tensor_buf.data(), tensor_buf.size());
|
||||
prim->set_attr(attr_name, MakeValue<int32_t>(attr_value));
|
||||
prim->set_attr(attr_name, MakeValue<int64_t>(attr_value));
|
||||
} else if (attr_tensor_type == onnx::TensorProto_DataType_BOOL) {
|
||||
size_t data_size = sizeof(bool);
|
||||
bool attr_value = false;
|
||||
|
|
|
@ -427,8 +427,10 @@ int Benchmark::RunBenchmark() {
|
|||
return ret;
|
||||
}
|
||||
}
|
||||
if (model != nullptr) {
|
||||
if (model == nullptr) {
|
||||
MS_LOG(ERROR) << "model is null.";
|
||||
model->Free();
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
ms_inputs_ = session_->GetInputs();
|
||||
auto end_prepare_time = GetTimeUs();
|
||||
|
|
|
@ -143,11 +143,18 @@ STATUS TransOpInsertPass::ChangeOpAxis(schema::MetaGraphT *graph, const std::uni
|
|||
if (type == PrimitiveType_Concat) {
|
||||
auto origin_axis = node->primitive->value.AsConcat()->axis;
|
||||
auto axis_map = GetNc2NhAxisMap();
|
||||
MS_ASSERT(node->primitive->value.AsConcat() != nullptr);
|
||||
if (node->primitive->value.AsConcat() == nullptr) {
|
||||
MS_LOG(ERROR) << "node->primitive->value.AsConcat() is nullptr";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
node->primitive->value.AsConcat()->axis = axis_map[origin_axis];
|
||||
}
|
||||
if (type == PrimitiveType_StridedSlice) {
|
||||
auto attr = node->primitive->value.AsStridedSlice();
|
||||
if (attr == nullptr) {
|
||||
MS_LOG(ERROR) << "node->primitive->value.AsStridedSlice() is nullptr";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
auto origin_begin = attr->begin;
|
||||
attr->begin = {origin_begin[NCHW_N], origin_begin[NCHW_H], origin_begin[NCHW_W], origin_begin[NCHW_C]};
|
||||
auto origin_end = attr->end;
|
||||
|
@ -158,14 +165,20 @@ STATUS TransOpInsertPass::ChangeOpAxis(schema::MetaGraphT *graph, const std::uni
|
|||
if (type == PrimitiveType_Split) {
|
||||
auto origin_axis = node->primitive->value.AsSplit()->splitDim;
|
||||
auto axis_map = GetNc2NhAxisMap();
|
||||
MS_ASSERT(node->primitive->value.AsSplit != nullptr);
|
||||
if (node->primitive->value.AsSplit() == nullptr) {
|
||||
MS_LOG(ERROR) << "node->primitive->value.AsSplit() is nullptr";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
node->primitive->value.AsSplit()->splitDim = axis_map[origin_axis];
|
||||
}
|
||||
if (type == PrimitiveType_Crop) {
|
||||
auto origin_axis = node->primitive->value.AsCrop()->axis;
|
||||
auto offsets = node->primitive->value.AsCrop()->offsets;
|
||||
auto axis_map = GetNc2NhAxisMap();
|
||||
MS_ASSERT(node->primitive->value.AsCrop() != nullptr);
|
||||
if (node->primitive->value.AsCrop() == nullptr) {
|
||||
MS_LOG(ERROR) << "node->primitive->value.AsCrop() is nullptr";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
node->primitive->value.AsCrop()->axis = axis_map[origin_axis];
|
||||
// nchw->nhwc,offsets need pad 0;
|
||||
if (axis_map[origin_axis] == 0) {
|
||||
|
@ -181,13 +194,12 @@ STATUS TransOpInsertPass::ChangeOpAxis(schema::MetaGraphT *graph, const std::uni
|
|||
MS_LOG(ERROR) << "Crop error";
|
||||
return RET_ERROR;
|
||||
}
|
||||
MS_ASSERT(node->primitive->value.AsCrop() != nullptr);
|
||||
node->primitive->value.AsCrop()->offsets = offsets;
|
||||
}
|
||||
if (type == PrimitiveType_Slice) {
|
||||
auto attr = node->primitive->value.AsSlice();
|
||||
if (attr == nullptr) {
|
||||
MS_LOG(ERROR) << "attr is nullptr";
|
||||
MS_LOG(ERROR) << "node->primitive->value.AsSlice() is nullptr";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
auto origin_begin = attr->begin;
|
||||
|
|
|
@ -26,6 +26,7 @@ namespace lite {
|
|||
class CaffeReluParser : public CaffeNodeParser {
|
||||
public:
|
||||
CaffeReluParser() : CaffeNodeParser("relu") {}
|
||||
~CaffeReluParser() override = default;
|
||||
|
||||
STATUS Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, schema::CNodeT *op,
|
||||
std::vector<schema::TensorT *> *weightVec) override;
|
||||
|
|
|
@ -41,6 +41,7 @@ class OnnxNodeRegistrar {
|
|||
OnnxNodeRegistrar(const std::string &name, OnnxNodeParser *parser) {
|
||||
OnnxNodeParserRegistry::GetInstance()->parsers[name] = parser;
|
||||
}
|
||||
~OnnxNodeRegistrar() = default;
|
||||
};
|
||||
} // namespace lite
|
||||
} // namespace mindspore
|
||||
|
|
|
@ -1324,7 +1324,6 @@ STATUS PostTrainingQuantizer::BiasCorrection(FuncGraphPtr func_graph) {
|
|||
MS_ASSERT(param_value != nullptr);
|
||||
param_value->set_tensor_shape(shape);
|
||||
param_value->set_tensor_type(kNumberTypeFloat32);
|
||||
// param_value->set_format(tensor->format);
|
||||
|
||||
auto size = sizeof(float) * bias_diff.size();
|
||||
char *tensor_data = new (std::nothrow) char[size];
|
||||
|
|
|
@ -85,6 +85,7 @@ STATUS GetRightMatmulInputParamter(const CNodePtr &stack_node, const ParameterPt
|
|||
}
|
||||
if (EOK != memcpy_s(new_tensor_data + (i - 1) * tensor_size, tensor_size, tensor_addr, tensor_size)) {
|
||||
MS_LOG(ERROR) << "memcpy_s data failed";
|
||||
delete[] new_tensor_data;
|
||||
return RET_ERROR;
|
||||
}
|
||||
}
|
||||
|
@ -156,7 +157,10 @@ const AnfNodePtr BatchMatMulFusion::Process(const FuncGraphPtr &func_graph, cons
|
|||
rmatmul_quant_params.pop_back();
|
||||
// no bias quantParams
|
||||
rmatmul_quant_params.emplace_back(jointed_quant_params);
|
||||
MS_ASSERT(matmul_cvalue != nullptr);
|
||||
if (matmul_cvalue == nullptr) {
|
||||
MS_LOG(ERROR) << "matmul_cvalue is nullptr.";
|
||||
return nullptr;
|
||||
}
|
||||
matmul_cvalue->SetInputQuantParams(rmatmul_quant_params);
|
||||
matmul_cvalue->SetOutputQuantParams(fc_prim->GetOutputQuantParams());
|
||||
auto matmul_value_node = NewValueNode(std::shared_ptr<lite::PrimitiveC>(matmul_cvalue));
|
||||
|
@ -164,14 +168,16 @@ const AnfNodePtr BatchMatMulFusion::Process(const FuncGraphPtr &func_graph, cons
|
|||
|
||||
// batchmatmul right node may be const
|
||||
if (right_reshape_node->isa<Parameter>()) {
|
||||
// return stack_cnode;
|
||||
auto rmatmul_paramter = func_graph->add_parameter();
|
||||
if (GetRightMatmulInputParamter(stack_cnode, rmatmul_paramter) != RET_OK) {
|
||||
MS_LOG(ERROR) << "GetRightMatmulInputParamter failed";
|
||||
return node;
|
||||
}
|
||||
auto prim = GetValueNode<std::shared_ptr<lite::PrimitiveC>>(matmul_value_node);
|
||||
MS_ASSERT(prim->GetPrimitiveT()->value.AsMatMul() != nullptr);
|
||||
if (prim->GetPrimitiveT()->value.AsMatMul() == nullptr) {
|
||||
MS_LOG(ERROR) << "prim->GetPrimitiveT()->value.AsMatMul() is nullptr.";
|
||||
return nullptr;
|
||||
}
|
||||
prim->GetPrimitiveT()->value.AsMatMul()->transposeB = true;
|
||||
matmul_inputs.push_back(rmatmul_paramter);
|
||||
} else {
|
||||
|
|
|
@ -48,8 +48,10 @@ bool ClipConvertActivationPass::Run(const FuncGraphPtr &graph) {
|
|||
auto primitive_c = GetValueNode<std::shared_ptr<PrimitiveC>>(clip_cnode->input(0));
|
||||
MS_ASSERT(primitive_c != nullptr);
|
||||
auto primT = primitive_c->GetPrimitiveT();
|
||||
MS_ASSERT(primT != nullptr);
|
||||
MS_ASSERT(primT->value.AsClip() != nullptr);
|
||||
if (primT == nullptr || primT->value.AsClip() == nullptr) {
|
||||
MS_LOG(ERROR) << "primT is null";
|
||||
return false;
|
||||
}
|
||||
float max = primT->value.AsClip()->max;
|
||||
float min = primT->value.AsClip()->min;
|
||||
if ((min == -1) && (max == -1)) {
|
||||
|
|
|
@ -29,8 +29,6 @@ class ClipConvertActivationPass : public Pass {
|
|||
public:
|
||||
ClipConvertActivationPass() : Pass("clip_convert_activation_pass") {}
|
||||
~ClipConvertActivationPass() override = default;
|
||||
// void SetQuantType(QuantType type);
|
||||
// void SetFmkType(FmkType fmkType);
|
||||
bool Run(const FuncGraphPtr &graph) override;
|
||||
};
|
||||
} // namespace mindspore::opt
|
||||
|
|
Loading…
Reference in New Issue