Limit npu compilation platform to arm64 only.

This commit is contained in:
yeyunpeng 2020-12-25 15:54:54 +08:00
parent be56c201ef
commit 8aa6e2aaad
7 changed files with 19 additions and 13 deletions

View File

@ -508,12 +508,18 @@ build_lite()
LITE_ENABLE_NPU="on"
fi
if [ "${LITE_ENABLE_GPU}" == "on" ] || [ $1 == "arm64" ]; then
if [[ $1 == "arm64" && "X$DEVICE" != "Xcpu" ]]; then
LITE_ENABLE_GPU="on"
echo "start get opencl"
fi
if [ "${LITE_ENABLE_NPU}" == "on" ]; then
checkddk
if [ "${LITE_PLATFORM}" == "arm64" ]; then
checkddk
else
echo "NPU only support platform arm64."
exit 1
fi
fi
cd "${BASEPATH}/mindspore/lite"
@ -648,7 +654,6 @@ build_jni_arm32() {
build_java() {
JAVA_PATH=${BASEPATH}/mindspore/lite/java
LITE_ENABLE_GPU="on"
get_version
build_lite_java_arm64
build_lite_java_arm32

View File

@ -39,7 +39,7 @@ extern "C" JNIEXPORT jlong JNICALL Java_com_mindspore_lite_config_MSConfig_creat
break;
}
case 2: // DT_NPU
MS_LOGE("We only support CPU now.");
MS_LOGE("We only support CPU and GPU now.");
return (jlong)context;
break;
default:

View File

@ -20,10 +20,7 @@
namespace mindspore::lite {
void NPUPassManager::AddPass(NPUBasePass *pass) { all_pass_.push_back(pass); }
int NPUPassManager::Run(const InnerContext *context) {
if (!context->IsNpuEnabled()) {
return RET_OK;
}
int NPUPassManager::Run() {
for (auto pass : all_pass_) {
auto ret = pass->Run();
if (ret != RET_OK) {

View File

@ -31,7 +31,7 @@ class NPUPassManager {
void AddPass(NPUBasePass *pass);
int Run(const InnerContext *context);
int Run();
void Clear();

View File

@ -54,7 +54,8 @@ kernel::LiteKernel *NPUKernelCreator(const std::vector<lite::Tensor *> &inputs,
const lite::InnerContext *ctx, const kernel::KernelKey &desc,
const mindspore::lite::PrimitiveC *primitive) {
if (!primitive->infer_flag()) {
MS_LOG(ERROR) << "NPU does not support runtime inference shape";
MS_LOG(ERROR) << "NPU does not support runtime inference shape. Type is:"
<< schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(primitive->Type()));
return nullptr;
}

View File

@ -567,6 +567,9 @@ void Scheduler::FindAllInoutKernels(const std::vector<kernel::LiteKernel *> &ker
int Scheduler::RunPass(std::vector<kernel::LiteKernel *> *dst_kernels) {
int ret = RET_OK;
#if SUPPORT_NPU
if (!context_->IsNpuEnabled()) {
return RET_OK;
}
auto transform_pass = new NPUTransformPass(context_, dst_kernels, src_tensors_);
mindspore::lite::NPUPassManager::GetInstance()->AddPass(transform_pass);
auto concat_format_pass = new NPUInsertTransformPass(context_, dst_kernels, src_tensors_);
@ -574,7 +577,7 @@ int Scheduler::RunPass(std::vector<kernel::LiteKernel *> *dst_kernels) {
auto fusion_pass = new NPUFusionPass(dst_kernels);
mindspore::lite::NPUPassManager::GetInstance()->AddPass(fusion_pass);
ret = mindspore::lite::NPUPassManager::GetInstance()->Run(context_);
ret = mindspore::lite::NPUPassManager::GetInstance()->Run();
#endif
return ret;
}

View File

@ -1316,8 +1316,8 @@ function Run_arm64() {
# Run npu converted models:
while read line; do
model_name=`echo ${mindspore_line_info}|awk -F ' ' '{print $1}'`
accuracy_limit=`echo ${mindspore_line_info}|awk -F ' ' '{print $2}'`
model_name=`echo ${line}|awk -F ' ' '{print $1}'`
accuracy_limit=`echo ${line}|awk -F ' ' '{print $2}'`
echo "mindspore run npu: ${model_name}, accuracy limit:${accuracy_limit}" >> "${run_arm64_log_file}"
echo 'cd /data/local/tmp/benchmark_test' > adb_run_cmd.txt
echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/benchmark_test;./benchmark --device=NPU --modelFile='${model_name}'.ms --inDataFile=/data/local/tmp/input_output/input/'${model_name}'.ms.bin --benchmarkDataFile=/data/local/tmp/input_output/output/'${model_name}'.ms.out --accuracyThreshold='${accuracy_limit} >> "${run_arm64_log_file}"