!3735 fix bug in build.sh for output

Merge pull request !3735 from hangq/to_merge
This commit is contained in:
mindspore-ci-bot 2020-07-30 20:11:07 +08:00 committed by Gitee
commit a6692888c4
5 changed files with 9 additions and 60 deletions

View File

@ -444,11 +444,6 @@ build_protobuf() {
fi fi
} }
build_gtest() {
cd ${BASEPATH}
git submodule update --init --recursive third_party/googletest
}
gene_clhpp() { gene_clhpp() {
CL_SRC_DIR="${BASEPATH}/mindspore/lite/src/runtime/kernel/opencl/cl" CL_SRC_DIR="${BASEPATH}/mindspore/lite/src/runtime/kernel/opencl/cl"
for sub_dir in "${CL_SRC_DIR}"/* for sub_dir in "${CL_SRC_DIR}"/*
@ -530,9 +525,11 @@ build_lite()
build_protobuf build_protobuf
fi fi
build_flatbuffer build_flatbuffer
build_gtest
cd "${BASEPATH}/mindspore/lite" cd "${BASEPATH}/mindspore/lite"
if [[ "${INC_BUILD}" == "off" ]]; then
rm -rf build
fi
mkdir -pv build mkdir -pv build
cd build cd build
BUILD_TYPE="Release" BUILD_TYPE="Release"
@ -590,7 +587,7 @@ build_lite()
sha256sum MSLite-0.5.0-linux_x86_64.tar.gz > MSLite-0.5.0-linux_x86_64.tar.gz.256sha sha256sum MSLite-0.5.0-linux_x86_64.tar.gz > MSLite-0.5.0-linux_x86_64.tar.gz.256sha
rm -rf MSLite-0.5.0-linux_x86_64/ rm -rf MSLite-0.5.0-linux_x86_64/
elif [[ "$LITE_PLATFORM" == "arm64" ]]; then elif [[ "$LITE_PLATFORM" == "arm64" ]]; then
OUTPUT_DIR=${BASEPATH}/mindspore/lite/output/MSLite-0.5.0-linux_arm64 OUTPUT_DIR=${BASEPATH}/output/MSLite-0.5.0-linux_arm64
rm -rf ${OUTPUT_DIR} && mkdir -p ${OUTPUT_DIR} && cd ${OUTPUT_DIR} rm -rf ${OUTPUT_DIR} && mkdir -p ${OUTPUT_DIR} && cd ${OUTPUT_DIR}
mkdir -p ${OUTPUT_DIR}/time_profile && mkdir -p ${OUTPUT_DIR}/benchmark mkdir -p ${OUTPUT_DIR}/time_profile && mkdir -p ${OUTPUT_DIR}/benchmark
mkdir -p ${OUTPUT_DIR}/include && mkdir -p ${OUTPUT_DIR}/lib mkdir -p ${OUTPUT_DIR}/include && mkdir -p ${OUTPUT_DIR}/lib
@ -609,7 +606,7 @@ build_lite()
sha256sum MSLite-0.5.0-linux_arm64.tar.gz > MSLite-0.5.0-linux_arm64.tar.gz.256sha sha256sum MSLite-0.5.0-linux_arm64.tar.gz > MSLite-0.5.0-linux_arm64.tar.gz.256sha
rm -rf MSLite-0.5.0-linux_arm64/ rm -rf MSLite-0.5.0-linux_arm64/
elif [[ "$LITE_PLATFORM" == "arm32" ]]; then elif [[ "$LITE_PLATFORM" == "arm32" ]]; then
OUTPUT_DIR=${BASEPATH}/mindspore/lite/output/MSLite-0.5.0-linux_arm32 OUTPUT_DIR=${BASEPATH}/output/MSLite-0.5.0-linux_arm32
rm -rf ${OUTPUT_DIR} && mkdir -p ${OUTPUT_DIR} && cd ${OUTPUT_DIR} rm -rf ${OUTPUT_DIR} && mkdir -p ${OUTPUT_DIR} && cd ${OUTPUT_DIR}
mkdir -p ${OUTPUT_DIR}/time_profile && mkdir -p ${OUTPUT_DIR}/benchmark mkdir -p ${OUTPUT_DIR}/time_profile && mkdir -p ${OUTPUT_DIR}/benchmark
mkdir -p ${OUTPUT_DIR}/include && mkdir -p ${OUTPUT_DIR}/lib mkdir -p ${OUTPUT_DIR}/include && mkdir -p ${OUTPUT_DIR}/lib

View File

@ -39,6 +39,7 @@ class Model {
protected: protected:
std::shared_ptr<ModelImpl> modelImpl = nullptr; std::shared_ptr<ModelImpl> modelImpl = nullptr;
}; };
class ModelBuilder { class ModelBuilder {
public: public:
struct OutEdge { struct OutEdge {
@ -54,4 +55,3 @@ class ModelBuilder {
} // namespace mindspore } // namespace mindspore
#endif // MINDSPORE_LITE_INCLUDE_MODEL_H #endif // MINDSPORE_LITE_INCLUDE_MODEL_H

View File

@ -68,44 +68,6 @@ int LiteSession::ConvertTensors(const lite::Model *model) {
return RET_OK; return RET_OK;
} }
int LiteSession::ConvertKernels(const lite::Model *model, Context *context) {
// MS_EXCEPTION_IF_NULL(model);
// auto meta_graph = model->GetMetaGraph();
// MS_EXCEPTION_IF_NULL(meta_graph);
// uint32_t kernelCount = meta_graph->nodes()->size();
// for (uint32_t i = 0; i < kernelCount; i++) {
// auto cNode = meta_graph->nodes()->GetAs<schema::CNode>(i);
// std::vector<tensor::Tensor *> inputs;
// std::vector<tensor::Tensor *> outputs;
// auto inIndexes = cNode->inputIndex();
// for (size_t j = 0; j < inIndexes->size(); j++) {
// inputs.emplace_back(this->tensors.at(size_t(inIndexes->GetAs<uint32_t>(j))));
// }
// auto outIndexes = cNode->outputIndex();
// for (size_t j = 0; j < outIndexes->size(); j++) {
// outputs.emplace_back(this->tensors.at(size_t(outIndexes->GetAs<uint32_t>(j))));
// }
// const auto *primitive = model->GetOp(cNode->name()->str());
// if (primitive == nullptr) {
// MS_LOG(ERROR) << "Op " << cNode->name()->str() << " should exist in model";
// return RET_ERROR;
// }
// auto ret = primitive->InferShape(inputs, outputs);
// if (0 != ret) {
// MS_LOG(ERROR) << "InferShape failed, node : " << cNode->name()->str();
// return ret;
// }
// auto *kernel = lite::KernelFactory::GetInstance()->GetKernel(inputs, outputs, cNode, context);
// if (nullptr == kernel) {
// MS_LOG(ERROR) << "Create kernel return nullptr, name: " << cNode->name()->str()
// << ", type: " << schema::EnumNamePrimitiveType(cNode->primitive()->value_type());
// return RET_ERROR;
// }
// kernels.emplace_back(kernel);
// }
return RET_OK;
}
void LiteSession::InitGraphInOutTensor(const lite::Model *model) { void LiteSession::InitGraphInOutTensor(const lite::Model *model) {
auto meta_graph = model->GetMetaGraph(); auto meta_graph = model->GetMetaGraph();
MS_ASSERT(this->input_map.empty()); MS_ASSERT(this->input_map.empty());

View File

@ -56,7 +56,7 @@ class LiteSession : public session::LiteSession {
protected: protected:
int ConvertTensors(const lite::Model *model); int ConvertTensors(const lite::Model *model);
int ConvertKernels(const lite::Model *model, Context *context);
void InitGraphInOutTensor(const lite::Model *model); void InitGraphInOutTensor(const lite::Model *model);
protected: protected:

View File

@ -316,6 +316,8 @@ int Benchmark::MarkPerformance() {
int Benchmark::MarkAccuracy() { int Benchmark::MarkAccuracy() {
MS_LOG(INFO) << "MarkAccuracy"; MS_LOG(INFO) << "MarkAccuracy";
for (size_t i = 0; i < msInputs.size(); i++) { for (size_t i = 0; i < msInputs.size(); i++) {
MS_ASSERT(msInputs.at(i) != nullptr);
MS_ASSERT(msInputs.at(i)->data_type() == TypeId::kNumberTypeFloat32);
auto inData = reinterpret_cast<float *>(msInputs.at(i)->MutableData()); auto inData = reinterpret_cast<float *>(msInputs.at(i)->MutableData());
std::cout << "InData" << i << ": "; std::cout << "InData" << i << ": ";
for (size_t j = 0; j < 20; j++) { for (size_t j = 0; j < 20; j++) {
@ -331,14 +333,6 @@ int Benchmark::MarkAccuracy() {
ReadCalibData(); ReadCalibData();
CompareOutput(); CompareOutput();
if (cleanData) {
for (auto &msOutput : msOutputs) {
for (auto &outputTensor : msOutput.second) {
delete outputTensor;
}
}
msOutputs.clear();
}
return 0; return 0;
} }
@ -407,10 +401,6 @@ int Benchmark::RunBenchmark(const std::string &deviceType) {
} }
if (cleanData) { if (cleanData) {
for (auto &msInput : msInputs) {
delete msInput;
}
msInputs.clear();
for (auto &data : calibData) { for (auto &data : calibData) {
data.second->shape.clear(); data.second->shape.clear();
data.second->data.clear(); data.second->data.clear();