forked from mindspore-Ecosystem/mindspore
!33036 DVPP method adapt and lite whl package bugfix
Merge pull request !33036 from chenping/runtime
This commit is contained in:
commit
644cc19eb8
|
@ -140,6 +140,7 @@ class MS_API MSTensor {
|
|||
|
||||
MSTensor();
|
||||
explicit MSTensor(const std::shared_ptr<Impl> &impl);
|
||||
// if malloc data, user need to free after constructing MSTensor, else memory leak.
|
||||
inline MSTensor(const std::string &name, DataType type, const std::vector<int64_t> &shape, const void *data,
|
||||
size_t data_len);
|
||||
explicit MSTensor(std::nullptr_t);
|
||||
|
|
|
@ -480,14 +480,18 @@ if(MSLITE_ENABLE_RUNTIME_CONVERT)
|
|||
quantizer_mid fusion_mid proto_mid graph_pass_mid preprocess_mid
|
||||
cpu_kernel_mid ccsrc_src_mid converter_src_mid anf_exporter_mid
|
||||
config_parser_mid mslite_converter_plugin mindspore_core coder_mid
|
||||
ccsrc_debug_common_mid_ mindir_proto_mid _mindspore_transform_express_ir_obj
|
||||
mindir_serializer_mid mindspore::protobuf ${SECUREC_LIBRARY})
|
||||
target_link_libraries(mindspore-lite_static
|
||||
quantizer_mid fusion_mid proto_mid graph_pass_mid preprocess_mid
|
||||
cpu_kernel_mid ccsrc_src_mid converter_src_mid anf_exporter_mid
|
||||
config_parser_mid mslite_converter_plugin mindspore_core coder_mid
|
||||
ccsrc_debug_common_mid_ mindir_proto_mid _mindspore_transform_express_ir_obj
|
||||
mindir_serializer_mid mindspore::protobuf ${SECUREC_LIBRARY})
|
||||
if(NOT ENABLE_CLOUD_AND_LITE)
|
||||
target_link_libraries(mindspore-lite
|
||||
ccsrc_debug_common_mid_ mindir_proto_mid _mindspore_transform_express_ir_obj)
|
||||
target_link_libraries(mindspore-lite_static
|
||||
ccsrc_debug_common_mid_ mindir_proto_mid _mindspore_transform_express_ir_obj)
|
||||
endif()
|
||||
if(MSLITE_ENABLE_ACL)
|
||||
target_link_libraries(mindspore-lite lite_acl_mid
|
||||
mindspore_shared_lib)
|
||||
|
@ -552,4 +556,4 @@ endif()
|
|||
|
||||
if(ENABLE_CLOUD_AND_LITE)
|
||||
set_target_properties(mindspore-lite PROPERTIES INSTALL_RPATH $ORIGIN)
|
||||
endif()
|
||||
endif()
|
||||
|
|
|
@ -315,6 +315,20 @@ int CheckTensorsInvalid(const std::vector<Tensor *> &tensors) {
|
|||
return RET_OK;
|
||||
}
|
||||
|
||||
std::string ShapeToString(const std::vector<int> &shape) {
|
||||
std::string result = "[";
|
||||
int max_size = 40;
|
||||
result.reserve(max_size);
|
||||
for (size_t i = 0; i < shape.size(); ++i) {
|
||||
result += std::to_string(shape[i]);
|
||||
if (i + 1 < shape.size()) {
|
||||
result += ", ";
|
||||
}
|
||||
}
|
||||
result += "]";
|
||||
return result;
|
||||
}
|
||||
|
||||
int CheckGraphInputShapes(const std::vector<Tensor *> &inputs,
|
||||
const std::unordered_map<Tensor *, std::vector<int>> &input_shape_map) {
|
||||
for (const auto input : inputs) {
|
||||
|
@ -324,9 +338,17 @@ int CheckGraphInputShapes(const std::vector<Tensor *> &inputs,
|
|||
return RET_ERROR;
|
||||
}
|
||||
if (!input_shape_map.at(input).empty() && input_shape_map.at(input) != input->shape()) {
|
||||
#ifndef ENABLE_LITE_ACL
|
||||
MS_LOG(ERROR) << "graph input:" << input->tensor_name()
|
||||
<< " shape has been illegally modified, please modify the input shape with method Resize().";
|
||||
return RET_ERROR;
|
||||
#else
|
||||
MS_LOG(WARNING) << "Please check graph input " << input->tensor_name()
|
||||
<< " shape:" << ShapeToString(input->shape())
|
||||
<< " has been modified by DVPP method to shape:" << ShapeToString(input_shape_map.at(input))
|
||||
<< "."
|
||||
<< "If not, the modification is illegal, please modify the input shape with method Resize().";
|
||||
#endif
|
||||
}
|
||||
}
|
||||
return RET_OK;
|
||||
|
|
|
@ -302,8 +302,15 @@ Status ModelImpl::Predict(const std::vector<MSTensor> &inputs, std::vector<MSTen
|
|||
if (user_input.MutableData() != input->data()) {
|
||||
if (input->Size() != user_input.DataSize()) {
|
||||
ResetTensorData(old_data, input_tensors);
|
||||
#ifndef ENABLE_LITE_ACL
|
||||
MS_LOG(ERROR) << "Tensor " << user_input.Name() << " has wrong data size.";
|
||||
return kLiteInputTensorError;
|
||||
#else
|
||||
MS_LOG(WARNING) << "Please check tensor " << user_input.Name()
|
||||
<< " has been modified data size by DVPP method.";
|
||||
std::vector<int> truncate_shape = {static_cast<int>(user_input.DataSize())};
|
||||
input->set_shape(truncate_shape);
|
||||
#endif
|
||||
}
|
||||
input->set_data(user_input.MutableData());
|
||||
}
|
||||
|
|
|
@ -104,7 +104,24 @@ tensor::MSTensor *tensor::MSTensor::CreateTensorByDeepCopy(const std::string &na
|
|||
tensor->set_data(const_cast<void *>(new_data));
|
||||
}
|
||||
|
||||
tensor->set_shape(shape);
|
||||
size_t shape_size = 1;
|
||||
if (shape.empty()) {
|
||||
shape_size = 0;
|
||||
} else {
|
||||
for (size_t i = 0; i < shape.size(); ++i) {
|
||||
if (shape[i] < 0) {
|
||||
delete tensor;
|
||||
return nullptr;
|
||||
}
|
||||
shape_size *= static_cast<size_t>(shape[i]);
|
||||
}
|
||||
}
|
||||
if (data_len != shape_size * data_type_size) {
|
||||
std::vector<int> truncate_shape = {static_cast<int>(data_len)};
|
||||
tensor->set_shape(truncate_shape);
|
||||
} else {
|
||||
tensor->set_shape(shape);
|
||||
}
|
||||
tensor->set_tensor_name(name);
|
||||
tensor->set_data_type(type);
|
||||
return tensor;
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
*/
|
||||
|
||||
#include "src/runtime/kernel/ascend/src/model_process.h"
|
||||
#include <sys/time.h>
|
||||
#include <utility>
|
||||
#include <algorithm>
|
||||
#include <map>
|
||||
|
@ -426,9 +427,9 @@ STATUS ModelProcess::CheckTensorByTensorInfo(const std::vector<mindspore::MSTens
|
|||
if (!IsDynamicShape()) {
|
||||
for (size_t i = 0; i < tensor_info.size(); ++i) {
|
||||
if (tensor[i].Shape() != tensor_info[i].dims) {
|
||||
MS_LOG(ERROR) << "Note: input " << i << " shape not match, required " << ShapeToString(tensor_info[i].dims)
|
||||
<< ", given " << ShapeToString(tensor[i].Shape());
|
||||
return lite::RET_ERROR;
|
||||
MS_LOG(WARNING) << "Note: input " << i << " shape not match, required " << ShapeToString(tensor_info[i].dims)
|
||||
<< ", given " << ShapeToString(tensor[i].Shape()) << "."
|
||||
<< "Please check input shape has been modified by DVPP method.";
|
||||
}
|
||||
if (tensor[i].DataType() != TransToDataType(tensor_info[i].data_type)) {
|
||||
MS_LOG(ERROR) << "Note: input " << i << " data type not match, required "
|
||||
|
@ -588,7 +589,24 @@ STATUS ModelProcess::PredictFromHost(const std::vector<mindspore::MSTensor> &inp
|
|||
DestroyInputsDataset();
|
||||
return ret; // forward status error
|
||||
}
|
||||
aclError acl_ret = aclmdlExecute(model_id_, inputs_, outputs_);
|
||||
|
||||
aclError acl_ret;
|
||||
auto env = std::getenv("GLOG_v");
|
||||
if (env != nullptr && env[0] == '1') {
|
||||
struct timeval start_time;
|
||||
struct timeval end_time;
|
||||
(void)gettimeofday(&start_time, nullptr);
|
||||
acl_ret = aclmdlExecute(model_id_, inputs_, outputs_);
|
||||
(void)gettimeofday(&end_time, nullptr);
|
||||
constexpr uint64_t kUSecondInSecond = 1000000;
|
||||
uint64_t cost =
|
||||
(kUSecondInSecond * static_cast<uint64_t>(end_time.tv_sec) + static_cast<uint64_t>(end_time.tv_usec)) -
|
||||
(kUSecondInSecond * static_cast<uint64_t>(start_time.tv_sec) + static_cast<uint64_t>(start_time.tv_usec));
|
||||
MS_LOG(INFO) << "Model execute in " << cost << " us";
|
||||
} else {
|
||||
acl_ret = aclmdlExecute(model_id_, inputs_, outputs_);
|
||||
}
|
||||
|
||||
DestroyInputsDataset();
|
||||
if (acl_ret != ACL_ERROR_NONE) {
|
||||
MS_LOG(ERROR) << "Execute Model Failed, ret = " << acl_ret;
|
||||
|
|
|
@ -119,6 +119,15 @@ schema::MetaGraphT *Converter::Convert(const std::unique_ptr<converter::Flags> &
|
|||
MS_LOG(ERROR) << "Parser/Import model return nullptr";
|
||||
return nullptr;
|
||||
}
|
||||
MS_CHECK_TRUE_MSG(funcgraph_transform_ != nullptr, nullptr, "funcgraph_transform init failed.");
|
||||
// funcgraph_transform
|
||||
graph = funcgraph_transform_->Transform(graph, flag.get());
|
||||
MS_CHECK_TRUE_MSG(graph != nullptr, nullptr, "Transform anf graph return nullptr.");
|
||||
// export protobuf
|
||||
auto status = MindIRSerialize(flag, graph);
|
||||
if (status != RET_OK) {
|
||||
MS_LOG(WARNING) << "Export to mindir proto return nullptr.";
|
||||
}
|
||||
return TransferFuncGraph(flag, graph);
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue