forked from mindspore-Ecosystem/mindspore
fix api
This commit is contained in:
parent
911b7b9950
commit
cf5a6e324a
|
@ -364,6 +364,7 @@ constexpr char kDecModeAesGcm[] = "AES-GCM";
|
|||
struct MSCallBackParam {
|
||||
std::string node_name; /**< node name argument */
|
||||
std::string node_type; /**< node type argument */
|
||||
double execute_time; /**< gpu execute time */
|
||||
};
|
||||
|
||||
/// \brief KernelCallBack defined the function pointer for callBack.
|
||||
|
|
|
@ -157,7 +157,11 @@ std::vector<char> Status::GetErrDescriptionChar() const {
|
|||
if (data_ == nullptr) {
|
||||
return std::vector<char>();
|
||||
}
|
||||
return StringToChar(data_->err_description);
|
||||
if (data_->err_description.empty()) {
|
||||
return ToCString();
|
||||
} else {
|
||||
return StringToChar(data_->err_description);
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<char> Status::CodeAsCString(enum StatusCode c) {
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/**
|
||||
* Copyright 2020 Huawei Technologies Co., Ltd
|
||||
* Copyright 2020-2022 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
@ -16,10 +16,7 @@
|
|||
#ifndef MINDSPORE_LITE_INCLUDE_LITE_TYPES_H_
|
||||
#define MINDSPORE_LITE_INCLUDE_LITE_TYPES_H_
|
||||
|
||||
#include <functional>
|
||||
#include <memory>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
namespace mindspore {
|
||||
class Allocator;
|
||||
|
@ -62,20 +59,6 @@ typedef enum {
|
|||
MT_TRAIN, /**< Both Train and Inference part of the compiled model are serialized */
|
||||
MT_INFERENCE /**< Only the Inference part of the compiled model is serialized */
|
||||
} ModelType;
|
||||
|
||||
/// \brief CallBackParam defined input arguments for callBack function.
|
||||
struct CallBackParam {
|
||||
std::string node_name; /**< node name argument */
|
||||
std::string node_type; /**< node type argument */
|
||||
};
|
||||
|
||||
struct GPUCallBackParam : CallBackParam {
|
||||
double execute_time{-1.f};
|
||||
};
|
||||
|
||||
/// \brief KernelCallBack defined the function pointer for callBack.
|
||||
using KernelCallBack = std::function<bool(std::vector<lite::Tensor *> inputs, std::vector<lite::Tensor *> outputs,
|
||||
const CallBackParam &opInfo)>;
|
||||
} // namespace lite
|
||||
} // namespace mindspore
|
||||
#endif // MINDSPORE_LITE_INCLUDE_LITE_TYPES_H_
|
||||
|
|
|
@ -193,7 +193,7 @@ Status ModelC::RunGraph(const MSKernelCallBackC &before, const MSKernelCallBackC
|
|||
if (before != nullptr) {
|
||||
before_call_back = [&](const std::vector<mindspore::lite::Tensor *> &before_inputs,
|
||||
const std::vector<mindspore::lite::Tensor *> &before_outputs,
|
||||
const lite::CallBackParam &call_param) {
|
||||
const MSCallBackParam &call_param) {
|
||||
std::vector<LiteTensorImpl> inputs_impl;
|
||||
std::vector<LiteTensorImpl> outputs_impl;
|
||||
std::vector<MSTensorHandle> op_inputs;
|
||||
|
@ -218,7 +218,7 @@ Status ModelC::RunGraph(const MSKernelCallBackC &before, const MSKernelCallBackC
|
|||
if (after != nullptr) {
|
||||
after_call_back = [&](const std::vector<mindspore::lite::Tensor *> &after_inputs,
|
||||
const std::vector<mindspore::lite::Tensor *> &after_outputs,
|
||||
const lite::CallBackParam &call_param) {
|
||||
const MSCallBackParam &call_param) {
|
||||
std::vector<LiteTensorImpl> inputs_impl;
|
||||
std::vector<LiteTensorImpl> outputs_impl;
|
||||
std::vector<MSTensorHandle> op_inputs;
|
||||
|
|
|
@ -185,26 +185,20 @@ Status ModelImpl::RunGraph(const MSKernelCallBack &before, const MSKernelCallBac
|
|||
if (before != nullptr) {
|
||||
before_call_back = [&](const std::vector<mindspore::lite::Tensor *> &before_inputs,
|
||||
const std::vector<mindspore::lite::Tensor *> &before_outputs,
|
||||
const lite::CallBackParam &call_param) {
|
||||
const MSCallBackParam &call_param) {
|
||||
std::vector<MSTensor> inputs = LiteTensorsToMSTensors(before_inputs);
|
||||
std::vector<MSTensor> outputs = LiteTensorsToMSTensors(before_outputs);
|
||||
MSCallBackParam mscall_param;
|
||||
mscall_param.node_name = call_param.node_name;
|
||||
mscall_param.node_type = call_param.node_type;
|
||||
return before(inputs, outputs, mscall_param);
|
||||
return before(inputs, outputs, call_param);
|
||||
};
|
||||
}
|
||||
|
||||
if (after != nullptr) {
|
||||
after_call_back = [&](const std::vector<mindspore::lite::Tensor *> &before_inputs,
|
||||
const std::vector<mindspore::lite::Tensor *> &before_outputs,
|
||||
const lite::CallBackParam &call_param) {
|
||||
const MSCallBackParam &call_param) {
|
||||
std::vector<MSTensor> inputs = LiteTensorsToMSTensors(before_inputs);
|
||||
std::vector<MSTensor> outputs = LiteTensorsToMSTensors(before_outputs);
|
||||
MSCallBackParam mscall_param;
|
||||
mscall_param.node_name = call_param.node_name;
|
||||
mscall_param.node_type = call_param.node_type;
|
||||
return after(inputs, outputs, mscall_param);
|
||||
return after(inputs, outputs, call_param);
|
||||
};
|
||||
}
|
||||
auto ret = session_->RunGraph(before_call_back, after_call_back);
|
||||
|
|
|
@ -40,7 +40,7 @@ int OpenCLExecutor::RunOrTune(const std::vector<Tensor *> &inputs, const std::ve
|
|||
}
|
||||
for (auto *kernel : kernels) {
|
||||
MS_ASSERT(kernel);
|
||||
GPUCallBackParam callbackParam;
|
||||
MSCallBackParam callbackParam;
|
||||
callbackParam.node_name = kernel->name();
|
||||
callbackParam.node_type = kernel->type_str();
|
||||
if ((before != nullptr) && !before(kernel->in_tensors(), kernel->out_tensors(), callbackParam)) {
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/**
|
||||
* Copyright 2020-2021 Huawei Technologies Co., Ltd
|
||||
* Copyright 2020-2022 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
@ -39,6 +39,11 @@
|
|||
#include "extendrt/mindir_loader/abstract_kernel.h"
|
||||
#include "include/lite_types.h"
|
||||
|
||||
namespace mindspore::lite {
|
||||
using KernelCallBack = std::function<bool(std::vector<lite::Tensor *> inputs, std::vector<lite::Tensor *> outputs,
|
||||
const MSCallBackParam &opInfo)>;
|
||||
}
|
||||
|
||||
using mindspore::infer::Abstractkernel;
|
||||
using mindspore::lite::KernelCallBack;
|
||||
|
||||
|
|
|
@ -1,2 +1,2 @@
|
|||
Note: This is the mindspore Lite inference framework size threshold. Offline review is required before modify this value!!!
|
||||
1022256
|
||||
1022266
|
||||
|
|
|
@ -1208,8 +1208,7 @@ int BenchmarkUnifiedApi::InitTimeProfilingCallbackParameter() {
|
|||
std::lock_guard<std::mutex> _l(op_times_mutex_);
|
||||
float cost = static_cast<float>(opEnd - op_start_times_by_name_[call_param.node_name]) / kFloatMSEC;
|
||||
if (flags_->device_ == "GPU") {
|
||||
auto gpu_param = reinterpret_cast<const GPUCallBackParam &>(call_param);
|
||||
cost = static_cast<float>(gpu_param.execute_time);
|
||||
cost = static_cast<float>(call_param.execute_time);
|
||||
}
|
||||
op_cost_total_ += cost;
|
||||
op_times_by_type_[call_param.node_type].first++;
|
||||
|
@ -1257,8 +1256,7 @@ int BenchmarkUnifiedApi::InitTimeProfilingCallbackParameter() {
|
|||
|
||||
float cost = static_cast<float>(opEnd - op_begin_) / kFloatMSEC;
|
||||
if (flags_->device_ == "GPU") {
|
||||
auto gpu_param = reinterpret_cast<const GPUCallBackParam &>(call_param);
|
||||
cost = static_cast<float>(gpu_param.execute_time);
|
||||
cost = static_cast<float>(call_param.execute_time);
|
||||
}
|
||||
op_cost_total_ += cost;
|
||||
op_times_by_type_[call_param.node_type].first++;
|
||||
|
|
Loading…
Reference in New Issue