!3735 fix bug in build.sh for output
Merge pull request !3735 from hangq/to_merge
This commit is contained in:
commit
a6692888c4
13
build.sh
13
build.sh
|
@ -444,11 +444,6 @@ build_protobuf() {
|
|||
fi
|
||||
}
|
||||
|
||||
build_gtest() {
|
||||
cd ${BASEPATH}
|
||||
git submodule update --init --recursive third_party/googletest
|
||||
}
|
||||
|
||||
gene_clhpp() {
|
||||
CL_SRC_DIR="${BASEPATH}/mindspore/lite/src/runtime/kernel/opencl/cl"
|
||||
for sub_dir in "${CL_SRC_DIR}"/*
|
||||
|
@ -530,9 +525,11 @@ build_lite()
|
|||
build_protobuf
|
||||
fi
|
||||
build_flatbuffer
|
||||
build_gtest
|
||||
|
||||
cd "${BASEPATH}/mindspore/lite"
|
||||
if [[ "${INC_BUILD}" == "off" ]]; then
|
||||
rm -rf build
|
||||
fi
|
||||
mkdir -pv build
|
||||
cd build
|
||||
BUILD_TYPE="Release"
|
||||
|
@ -590,7 +587,7 @@ build_lite()
|
|||
sha256sum MSLite-0.5.0-linux_x86_64.tar.gz > MSLite-0.5.0-linux_x86_64.tar.gz.256sha
|
||||
rm -rf MSLite-0.5.0-linux_x86_64/
|
||||
elif [[ "$LITE_PLATFORM" == "arm64" ]]; then
|
||||
OUTPUT_DIR=${BASEPATH}/mindspore/lite/output/MSLite-0.5.0-linux_arm64
|
||||
OUTPUT_DIR=${BASEPATH}/output/MSLite-0.5.0-linux_arm64
|
||||
rm -rf ${OUTPUT_DIR} && mkdir -p ${OUTPUT_DIR} && cd ${OUTPUT_DIR}
|
||||
mkdir -p ${OUTPUT_DIR}/time_profile && mkdir -p ${OUTPUT_DIR}/benchmark
|
||||
mkdir -p ${OUTPUT_DIR}/include && mkdir -p ${OUTPUT_DIR}/lib
|
||||
|
@ -609,7 +606,7 @@ build_lite()
|
|||
sha256sum MSLite-0.5.0-linux_arm64.tar.gz > MSLite-0.5.0-linux_arm64.tar.gz.256sha
|
||||
rm -rf MSLite-0.5.0-linux_arm64/
|
||||
elif [[ "$LITE_PLATFORM" == "arm32" ]]; then
|
||||
OUTPUT_DIR=${BASEPATH}/mindspore/lite/output/MSLite-0.5.0-linux_arm32
|
||||
OUTPUT_DIR=${BASEPATH}/output/MSLite-0.5.0-linux_arm32
|
||||
rm -rf ${OUTPUT_DIR} && mkdir -p ${OUTPUT_DIR} && cd ${OUTPUT_DIR}
|
||||
mkdir -p ${OUTPUT_DIR}/time_profile && mkdir -p ${OUTPUT_DIR}/benchmark
|
||||
mkdir -p ${OUTPUT_DIR}/include && mkdir -p ${OUTPUT_DIR}/lib
|
||||
|
|
|
@ -39,6 +39,7 @@ class Model {
|
|||
protected:
|
||||
std::shared_ptr<ModelImpl> modelImpl = nullptr;
|
||||
};
|
||||
|
||||
class ModelBuilder {
|
||||
public:
|
||||
struct OutEdge {
|
||||
|
@ -54,4 +55,3 @@ class ModelBuilder {
|
|||
} // namespace mindspore
|
||||
|
||||
#endif // MINDSPORE_LITE_INCLUDE_MODEL_H
|
||||
|
||||
|
|
|
@ -68,44 +68,6 @@ int LiteSession::ConvertTensors(const lite::Model *model) {
|
|||
return RET_OK;
|
||||
}
|
||||
|
||||
int LiteSession::ConvertKernels(const lite::Model *model, Context *context) {
|
||||
// MS_EXCEPTION_IF_NULL(model);
|
||||
// auto meta_graph = model->GetMetaGraph();
|
||||
// MS_EXCEPTION_IF_NULL(meta_graph);
|
||||
// uint32_t kernelCount = meta_graph->nodes()->size();
|
||||
// for (uint32_t i = 0; i < kernelCount; i++) {
|
||||
// auto cNode = meta_graph->nodes()->GetAs<schema::CNode>(i);
|
||||
// std::vector<tensor::Tensor *> inputs;
|
||||
// std::vector<tensor::Tensor *> outputs;
|
||||
// auto inIndexes = cNode->inputIndex();
|
||||
// for (size_t j = 0; j < inIndexes->size(); j++) {
|
||||
// inputs.emplace_back(this->tensors.at(size_t(inIndexes->GetAs<uint32_t>(j))));
|
||||
// }
|
||||
// auto outIndexes = cNode->outputIndex();
|
||||
// for (size_t j = 0; j < outIndexes->size(); j++) {
|
||||
// outputs.emplace_back(this->tensors.at(size_t(outIndexes->GetAs<uint32_t>(j))));
|
||||
// }
|
||||
// const auto *primitive = model->GetOp(cNode->name()->str());
|
||||
// if (primitive == nullptr) {
|
||||
// MS_LOG(ERROR) << "Op " << cNode->name()->str() << " should exist in model";
|
||||
// return RET_ERROR;
|
||||
// }
|
||||
// auto ret = primitive->InferShape(inputs, outputs);
|
||||
// if (0 != ret) {
|
||||
// MS_LOG(ERROR) << "InferShape failed, node : " << cNode->name()->str();
|
||||
// return ret;
|
||||
// }
|
||||
// auto *kernel = lite::KernelFactory::GetInstance()->GetKernel(inputs, outputs, cNode, context);
|
||||
// if (nullptr == kernel) {
|
||||
// MS_LOG(ERROR) << "Create kernel return nullptr, name: " << cNode->name()->str()
|
||||
// << ", type: " << schema::EnumNamePrimitiveType(cNode->primitive()->value_type());
|
||||
// return RET_ERROR;
|
||||
// }
|
||||
// kernels.emplace_back(kernel);
|
||||
// }
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
void LiteSession::InitGraphInOutTensor(const lite::Model *model) {
|
||||
auto meta_graph = model->GetMetaGraph();
|
||||
MS_ASSERT(this->input_map.empty());
|
||||
|
|
|
@ -56,7 +56,7 @@ class LiteSession : public session::LiteSession {
|
|||
|
||||
protected:
|
||||
int ConvertTensors(const lite::Model *model);
|
||||
int ConvertKernels(const lite::Model *model, Context *context);
|
||||
|
||||
void InitGraphInOutTensor(const lite::Model *model);
|
||||
|
||||
protected:
|
||||
|
|
|
@ -316,6 +316,8 @@ int Benchmark::MarkPerformance() {
|
|||
int Benchmark::MarkAccuracy() {
|
||||
MS_LOG(INFO) << "MarkAccuracy";
|
||||
for (size_t i = 0; i < msInputs.size(); i++) {
|
||||
MS_ASSERT(msInputs.at(i) != nullptr);
|
||||
MS_ASSERT(msInputs.at(i)->data_type() == TypeId::kNumberTypeFloat32);
|
||||
auto inData = reinterpret_cast<float *>(msInputs.at(i)->MutableData());
|
||||
std::cout << "InData" << i << ": ";
|
||||
for (size_t j = 0; j < 20; j++) {
|
||||
|
@ -331,14 +333,6 @@ int Benchmark::MarkAccuracy() {
|
|||
|
||||
ReadCalibData();
|
||||
CompareOutput();
|
||||
if (cleanData) {
|
||||
for (auto &msOutput : msOutputs) {
|
||||
for (auto &outputTensor : msOutput.second) {
|
||||
delete outputTensor;
|
||||
}
|
||||
}
|
||||
msOutputs.clear();
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -407,10 +401,6 @@ int Benchmark::RunBenchmark(const std::string &deviceType) {
|
|||
}
|
||||
|
||||
if (cleanData) {
|
||||
for (auto &msInput : msInputs) {
|
||||
delete msInput;
|
||||
}
|
||||
msInputs.clear();
|
||||
for (auto &data : calibData) {
|
||||
data.second->shape.clear();
|
||||
data.second->data.clear();
|
||||
|
|
Loading…
Reference in New Issue