diff --git a/include/api/context.h b/include/api/context.h index 81a80728753..e26eb5d726e 100644 --- a/include/api/context.h +++ b/include/api/context.h @@ -308,7 +308,7 @@ void GPUDeviceInfo::SetPrecisionMode(const std::string &precision_mode) { } std::string GPUDeviceInfo::GetPrecisionMode() const { return CharToString(GetPrecisionModeChar()); } -/// \brief Derived from DeviceInfoContext, The configuration of the model running on the Ascend310. This option is +/// \brief Derived from DeviceInfoContext, The configuration of the model running on the Ascend. This option is /// invalid for MindSpore Lite. class MS_API AscendDeviceInfo : public DeviceInfoContext { public: diff --git a/include/api/model.h b/include/api/model.h index 4a93415ec7a..36e45711391 100644 --- a/include/api/model.h +++ b/include/api/model.h @@ -212,7 +212,7 @@ class MS_API Model { /// \brief Inference model. /// - /// \param[in] device_type Device type,options are kGPU, kAscend, kAscend910, etc. + /// \param[in] device_type Device type,options are kGPU, kAscend etc. /// \param[in] model_type The type of model file, options are ModelType::kMindIR, ModelType::kOM. /// /// \return Is supported or not. diff --git a/mindspore/ccsrc/cxx_api/model/acl/acl_model_options.cc b/mindspore/ccsrc/cxx_api/model/acl/acl_model_options.cc index 32fcd384a3f..0de0280817e 100644 --- a/mindspore/ccsrc/cxx_api/model/acl/acl_model_options.cc +++ b/mindspore/ccsrc/cxx_api/model/acl/acl_model_options.cc @@ -33,15 +33,15 @@ AclModelOptions::AclModelOptions(const std::shared_ptr &context) { if (device_infos.size() != 1) { return; } - auto ascend310_info = device_infos[0]->Cast(); - if (ascend310_info == nullptr) { + auto ascend_info = device_infos[0]->Cast(); + if (ascend_info == nullptr) { return; } - insert_op_cfg_path_ = ascend310_info->GetInsertOpConfigPath(); - input_format_ = ascend310_info->GetInputFormat(); - input_shape_map_ = ascend310_info->GetInputShapeMap(); - auto out_type = ascend310_info->GetOutputType(); + insert_op_cfg_path_ = ascend_info->GetInsertOpConfigPath(); + input_format_ = ascend_info->GetInputFormat(); + input_shape_map_ = ascend_info->GetInputShapeMap(); + auto out_type = ascend_info->GetOutputType(); auto iter = kSupportedDtypeOptionMap.find(out_type); if (out_type == DataType::kTypeUnknown) { // do nothing @@ -50,13 +50,13 @@ AclModelOptions::AclModelOptions(const std::shared_ptr &context) { } else { output_type_ = iter->second; } - dynamic_batch_size_ = ascend310_info->GetDynamicBatchSize(); - dynamic_image_size_ = ascend310_info->GetDynamicImageSize(); - precision_mode_ = ascend310_info->GetPrecisionMode(); - op_select_impl_mode_ = ascend310_info->GetOpSelectImplMode(); - fusion_switch_cfg_path_ = ascend310_info->GetFusionSwitchConfigPath(); - device_id_ = ascend310_info->GetDeviceID(); - buffer_optimize_mode_ = ascend310_info->GetBufferOptimizeMode(); + dynamic_batch_size_ = ascend_info->GetDynamicBatchSize(); + dynamic_image_size_ = ascend_info->GetDynamicImageSize(); + precision_mode_ = ascend_info->GetPrecisionMode(); + op_select_impl_mode_ = ascend_info->GetOpSelectImplMode(); + fusion_switch_cfg_path_ = ascend_info->GetFusionSwitchConfigPath(); + device_id_ = ascend_info->GetDeviceID(); + buffer_optimize_mode_ = ascend_info->GetBufferOptimizeMode(); const char *soc_name = aclrtGetSocName(); if (soc_name == nullptr) { MS_LOG(WARNING) << "Get soc version failed."; diff --git a/mindspore/lite/include/context.h b/mindspore/lite/include/context.h index a481315e2d3..c842e778a38 100644 --- a/mindspore/lite/include/context.h +++ b/mindspore/lite/include/context.h @@ -44,7 +44,7 @@ typedef struct NpuDeviceInfo { int frequency_ = 3; /**< npu frequency inference, low 1, medium 2, high 3, extreme 4, other values will be set to 3 */ } NpuDeviceInfo; -/// \brief Ascend310DeviceInfo defined for Ascend's configuration information. +/// \brief AscendDeviceInfo defined for Ascend's configuration information. typedef struct AscendDeviceInfo { uint32_t device_id_; std::string batch_size_; @@ -55,7 +55,7 @@ struct DeviceInfo { CpuDeviceInfo cpu_device_info_; GpuDeviceInfo gpu_device_info_; NpuDeviceInfo npu_device_info_; - AscendDeviceInfo ascend310_device_info_; + AscendDeviceInfo ascend_device_info_; }; /// \brief DeviceContext defined for holding backend's configuration information. diff --git a/mindspore/lite/include/lite_types.h b/mindspore/lite/include/lite_types.h index 7081146b4d1..ea3c468b50a 100644 --- a/mindspore/lite/include/lite_types.h +++ b/mindspore/lite/include/lite_types.h @@ -27,10 +27,10 @@ typedef enum { /// \brief DeviceType defined for holding user's preferred backend. typedef enum { - DT_CPU, /**< CPU device type */ - DT_GPU, /**< GPU device type */ - DT_NPU, /**< NPU device type */ - DT_ASCEND310 /**< ASCEND310 device type */ + DT_CPU, /**< CPU device type */ + DT_GPU, /**< GPU device type */ + DT_NPU, /**< NPU device type */ + DT_ASCEND /**< ASCEND device type */ } DeviceType; typedef enum { diff --git a/mindspore/lite/src/CMakeLists.txt b/mindspore/lite/src/CMakeLists.txt index 2f8636a5ced..d1db8c412d6 100644 --- a/mindspore/lite/src/CMakeLists.txt +++ b/mindspore/lite/src/CMakeLists.txt @@ -276,6 +276,7 @@ add_library(lite_src_mid OBJECT ${LITE_SRC}) add_dependencies(lite_src_mid fbs_src) if(MSLITE_ENABLE_ACL) + include_directories(${TOP_DIR}/graphengine/inc/external) add_subdirectory(runtime/kernel/ascend310) link_directories(${ASCEND_RUNTIME_PATH} ${ASCEND_TOOLKIT_RUNTIME_PATH}) endif() diff --git a/mindspore/lite/src/common/context_util.cc b/mindspore/lite/src/common/context_util.cc index c4b98519187..429f76c5498 100644 --- a/mindspore/lite/src/common/context_util.cc +++ b/mindspore/lite/src/common/context_util.cc @@ -88,22 +88,22 @@ std::vector GetBatchSize(const std::string &batch_size) { return res; } -std::shared_ptr Ascend310DeviceInfoFromAscend310DeviceContext( - const lite::DeviceContext &ascend310_context) { - if (ascend310_context.device_type_ != DT_ASCEND310) { - MS_LOG(ERROR) << "Function input parameter is not ascend310 context."; +std::shared_ptr AscendDeviceInfoFromAscendDeviceContext( + const lite::DeviceContext &ascend_context) { + if (ascend_context.device_type_ != DT_ASCEND) { + MS_LOG(ERROR) << "Function input parameter is not ascend context."; return nullptr; } - auto ascend310_info = std::make_shared(); - MS_CHECK_TRUE_RET(ascend310_info != nullptr, nullptr); - ascend310_info->SetDeviceID(ascend310_context.device_info_.ascend310_device_info_.device_id_); - std::string batch_size = ascend310_context.device_info_.ascend310_device_info_.batch_size_; + auto ascend_info = std::make_shared(); + MS_CHECK_TRUE_RET(ascend_info != nullptr, nullptr); + ascend_info->SetDeviceID(ascend_context.device_info_.ascend_device_info_.device_id_); + std::string batch_size = ascend_context.device_info_.ascend_device_info_.batch_size_; if (!batch_size.empty()) { auto val = GetBatchSize(batch_size); - ascend310_info->SetDynamicBatchSize(val); + ascend_info->SetDynamicBatchSize(val); } - ascend310_info->SetDynamicImageSize(ascend310_context.device_info_.ascend310_device_info_.image_size_); - return ascend310_info; + ascend_info->SetDynamicImageSize(ascend_context.device_info_.ascend_device_info_.image_size_); + return ascend_info; } } // namespace @@ -126,7 +126,7 @@ mindspore::Context *MSContextFromContext(const lite::Context *context) { transfer_funcs = {{DT_CPU, CPUDeviceInfoFromCPUDeviceContext}, {DT_GPU, GPUDeviceInfoFromGPUDeviceContext}, {DT_NPU, NPUDeviceInfoFromNPUDeviceContext}, - {DT_ASCEND310, Ascend310DeviceInfoFromAscend310DeviceContext}}; + {DT_ASCEND, AscendDeviceInfoFromAscendDeviceContext}}; for (auto &device_context : context->device_list_) { auto device_type = device_context.device_type_; if (transfer_funcs.find(device_type) == transfer_funcs.end()) { diff --git a/mindspore/lite/src/cxx_api/context.cc b/mindspore/lite/src/cxx_api/context.cc index e5e260aea95..046d096efc4 100644 --- a/mindspore/lite/src/cxx_api/context.cc +++ b/mindspore/lite/src/cxx_api/context.cc @@ -37,18 +37,18 @@ constexpr auto kModelOptionKirinNpuFrequency = "mindspore.option.kirin_npu.frequ constexpr auto kModelOptionProvider = "mindspore.option.provider"; constexpr auto kModelOptionProviderDevice = "mindspore.option.provider.device"; constexpr auto kModelOptionDeviceID = "mindspore.option.device_id"; -constexpr auto kModelOptionAscend310DeviceID = kModelOptionDeviceID; -constexpr auto kModelOptionAscend310InsertOpCfgPath = "mindspore.option.ascend310.insert_op_config_file_path"; -constexpr auto kModelOptionAscend310InputFormat = "mindspore.option.ascend310.input_format"; -constexpr auto kModelOptionAscend310InputShapeMap = "mindspore.option.ascend310.input_shape_map"; -constexpr auto kModelOptionAscend310InputShape = "mindspore.option.ascend310.input_shape"; -constexpr auto kModelOptionAscend310OutputType = "mindspore.option.ascend310.output_type"; -constexpr auto kModelOptionAscend310PrecisionMode = "mindspore.option.ascend310.precision_mode"; -constexpr auto kModelOptionAscend310OpSelectImplMode = "mindspore.option.ascend310.op_select_impl_mode"; -constexpr auto KModelOptionAscend310FusionSwitchCfgPath = "mindspore.option.ascend310.fusion_switch_config_file_path"; -constexpr auto kModelOptionAscend310DynamicBatchSize = "mindspore.option.ascend310.dynamic_batch_size"; -constexpr auto kModelOptionAscend310DynamicImageSize = "mindspore.option.ascend310.dynamic_image_size"; -constexpr auto kModelOptionAscend310BufferOptimize = "mindspore.option.ascend310.buffer_optimize"; +constexpr auto kModelOptionAscendDeviceID = kModelOptionDeviceID; +constexpr auto kModelOptionAscendInsertOpCfgPath = "mindspore.option.ascend.insert_op_config_file_path"; +constexpr auto kModelOptionAscendInputFormat = "mindspore.option.ascend.input_format"; +constexpr auto kModelOptionAscendInputShapeMap = "mindspore.option.ascend.input_shape_map"; +constexpr auto kModelOptionAscendInputShape = "mindspore.option.ascend.input_shape"; +constexpr auto kModelOptionAscendOutputType = "mindspore.option.ascend.output_type"; +constexpr auto kModelOptionAscendPrecisionMode = "mindspore.option.ascend.precision_mode"; +constexpr auto kModelOptionAscendOpSelectImplMode = "mindspore.option.ascend.op_select_impl_mode"; +constexpr auto KModelOptionAscendFusionSwitchCfgPath = "mindspore.option.ascend.fusion_switch_config_file_path"; +constexpr auto kModelOptionAscendDynamicBatchSize = "mindspore.option.ascend.dynamic_batch_size"; +constexpr auto kModelOptionAscendDynamicImageSize = "mindspore.option.ascend.dynamic_image_size"; +constexpr auto kModelOptionAscendBufferOptimize = "mindspore.option.ascend.buffer_optimize"; Context::Context() : data_(std::make_shared()) {} @@ -372,7 +372,7 @@ void AscendDeviceInfo::SetDeviceID(uint32_t device_id) { MS_LOG(ERROR) << "Invalid context."; return; } - data_->params[kModelOptionAscend310DeviceID] = device_id; + data_->params[kModelOptionAscendDeviceID] = device_id; } uint32_t AscendDeviceInfo::GetDeviceID() const { @@ -380,7 +380,7 @@ uint32_t AscendDeviceInfo::GetDeviceID() const { MS_LOG(ERROR) << "Invalid context."; return 0; } - return GetValue(data_, kModelOptionAscend310DeviceID); + return GetValue(data_, kModelOptionAscendDeviceID); } void AscendDeviceInfo::SetInsertOpConfigPath(const std::vector &cfg_path) { @@ -388,14 +388,14 @@ void AscendDeviceInfo::SetInsertOpConfigPath(const std::vector &cfg_path) MS_LOG(ERROR) << "Invalid context."; return; } - data_->params[kModelOptionAscend310InsertOpCfgPath] = CharToString(cfg_path); + data_->params[kModelOptionAscendInsertOpCfgPath] = CharToString(cfg_path); } std::vector AscendDeviceInfo::GetInsertOpConfigPathChar() const { if (data_ == nullptr) { MS_LOG(ERROR) << "Invalid context."; return std::vector(); } - const std::string &ref = GetValue(data_, kModelOptionAscend310InsertOpCfgPath); + const std::string &ref = GetValue(data_, kModelOptionAscendInsertOpCfgPath); return StringToChar(ref); } @@ -404,7 +404,7 @@ void AscendDeviceInfo::SetInputFormat(const std::vector &format) { MS_LOG(ERROR) << "Invalid context."; return; } - data_->params[kModelOptionAscend310InputFormat] = CharToString(format); + data_->params[kModelOptionAscendInputFormat] = CharToString(format); } std::vector AscendDeviceInfo::GetInputFormatChar() const { @@ -412,7 +412,7 @@ std::vector AscendDeviceInfo::GetInputFormatChar() const { MS_LOG(ERROR) << "Invalid context."; return std::vector(); } - const std::string &ref = GetValue(data_, kModelOptionAscend310InputFormat); + const std::string &ref = GetValue(data_, kModelOptionAscendInputFormat); return StringToChar(ref); } @@ -421,14 +421,14 @@ void AscendDeviceInfo::SetInputShape(const std::vector &shape) { MS_LOG(ERROR) << "Invalid context."; return; } - data_->params[kModelOptionAscend310InputShape] = CharToString(shape); + data_->params[kModelOptionAscendInputShape] = CharToString(shape); } std::vector AscendDeviceInfo::GetInputShapeChar() const { if (data_ == nullptr) { MS_LOG(ERROR) << "Invalid context."; return std::vector(); } - const std::string &ref = GetValue(data_, kModelOptionAscend310InputShape); + const std::string &ref = GetValue(data_, kModelOptionAscendInputShape); return StringToChar(ref); } @@ -444,7 +444,7 @@ void AscendDeviceInfo::SetDynamicBatchSize(const std::vector &dynamic_ba } batchs += std::to_string(dynamic_batch_size[i]); } - data_->params[kModelOptionAscend310DynamicBatchSize] = batchs; + data_->params[kModelOptionAscendDynamicBatchSize] = batchs; } std::vector AscendDeviceInfo::GetDynamicBatchSizeChar() const { @@ -452,7 +452,7 @@ std::vector AscendDeviceInfo::GetDynamicBatchSizeChar() const { MS_LOG(ERROR) << "Invalid context."; return std::vector(); } - const std::string &ref = GetValue(data_, kModelOptionAscend310DynamicBatchSize); + const std::string &ref = GetValue(data_, kModelOptionAscendDynamicBatchSize); return StringToChar(ref); } @@ -461,7 +461,7 @@ void AscendDeviceInfo::SetDynamicImageSize(const std::vector &dynamic_imag MS_LOG(ERROR) << "Invalid context."; return; } - data_->params[kModelOptionAscend310DynamicImageSize] = CharToString(dynamic_image_size); + data_->params[kModelOptionAscendDynamicImageSize] = CharToString(dynamic_image_size); } std::vector AscendDeviceInfo::GetDynamicImageSizeChar() const { @@ -469,7 +469,7 @@ std::vector AscendDeviceInfo::GetDynamicImageSizeChar() const { MS_LOG(ERROR) << "Invalid context."; return std::vector(); } - const std::string &ref = GetValue(data_, kModelOptionAscend310DynamicImageSize); + const std::string &ref = GetValue(data_, kModelOptionAscendDynamicImageSize); return StringToChar(ref); } @@ -478,7 +478,7 @@ void AscendDeviceInfo::SetPrecisionMode(const std::vector &precision_mode) MS_LOG(ERROR) << "Invalid context."; return; } - data_->params[kModelOptionAscend310PrecisionMode] = CharToString(precision_mode); + data_->params[kModelOptionAscendPrecisionMode] = CharToString(precision_mode); } std::vector AscendDeviceInfo::GetPrecisionModeChar() const { @@ -486,7 +486,7 @@ std::vector AscendDeviceInfo::GetPrecisionModeChar() const { MS_LOG(ERROR) << "Invalid context."; return std::vector(); } - const std::string &ref = GetValue(data_, kModelOptionAscend310PrecisionMode); + const std::string &ref = GetValue(data_, kModelOptionAscendPrecisionMode); return StringToChar(ref); } @@ -495,7 +495,7 @@ void AscendDeviceInfo::SetOpSelectImplMode(const std::vector &op_select_im MS_LOG(ERROR) << "Invalid context."; return; } - data_->params[kModelOptionAscend310OpSelectImplMode] = CharToString(op_select_impl_mode); + data_->params[kModelOptionAscendOpSelectImplMode] = CharToString(op_select_impl_mode); } std::vector AscendDeviceInfo::GetOpSelectImplModeChar() const { @@ -503,7 +503,7 @@ std::vector AscendDeviceInfo::GetOpSelectImplModeChar() const { MS_LOG(ERROR) << "Invalid context."; return std::vector(); } - const std::string &ref = GetValue(data_, kModelOptionAscend310OpSelectImplMode); + const std::string &ref = GetValue(data_, kModelOptionAscendOpSelectImplMode); return StringToChar(ref); } @@ -512,14 +512,14 @@ void AscendDeviceInfo::SetFusionSwitchConfigPath(const std::vector &cfg_pa MS_LOG(ERROR) << "Invalid context."; return; } - data_->params[KModelOptionAscend310FusionSwitchCfgPath] = CharToString(cfg_path); + data_->params[KModelOptionAscendFusionSwitchCfgPath] = CharToString(cfg_path); } std::vector AscendDeviceInfo::GetFusionSwitchConfigPathChar() const { if (data_ == nullptr) { MS_LOG(ERROR) << "Invalid context."; return std::vector(); } - const std::string &ref = GetValue(data_, KModelOptionAscend310FusionSwitchCfgPath); + const std::string &ref = GetValue(data_, KModelOptionAscendFusionSwitchCfgPath); return StringToChar(ref); } @@ -528,7 +528,7 @@ void AscendDeviceInfo::SetInputShapeMap(const std::map> &s MS_LOG(ERROR) << "Invalid context."; return; } - data_->params[kModelOptionAscend310InputShapeMap] = shape; + data_->params[kModelOptionAscendInputShapeMap] = shape; } std::map> AscendDeviceInfo::GetInputShapeMap() const { @@ -536,7 +536,7 @@ std::map> AscendDeviceInfo::GetInputShapeMap() const { MS_LOG(ERROR) << "Invalid context."; return std::map>(); } - return GetValue>>(data_, kModelOptionAscend310InputShapeMap); + return GetValue>>(data_, kModelOptionAscendInputShapeMap); } void AscendDeviceInfo::SetOutputType(enum DataType output_type) { @@ -544,7 +544,7 @@ void AscendDeviceInfo::SetOutputType(enum DataType output_type) { MS_LOG(ERROR) << "Invalid context."; return; } - data_->params[kModelOptionAscend310OutputType] = output_type; + data_->params[kModelOptionAscendOutputType] = output_type; } enum DataType AscendDeviceInfo::GetOutputType() const { @@ -552,7 +552,7 @@ enum DataType AscendDeviceInfo::GetOutputType() const { MS_LOG(ERROR) << "Invalid context."; return DataType::kTypeUnknown; } - return GetValue(data_, kModelOptionAscend310OutputType); + return GetValue(data_, kModelOptionAscendOutputType); } void AscendDeviceInfo::SetBufferOptimizeMode(const std::vector &buffer_optimize_mode) { @@ -560,7 +560,7 @@ void AscendDeviceInfo::SetBufferOptimizeMode(const std::vector &buffer_opt MS_LOG(ERROR) << "Invalid context."; return; } - data_->params[kModelOptionAscend310BufferOptimize] = CharToString(buffer_optimize_mode); + data_->params[kModelOptionAscendBufferOptimize] = CharToString(buffer_optimize_mode); } std::vector AscendDeviceInfo::GetBufferOptimizeModeChar() const { @@ -568,7 +568,7 @@ std::vector AscendDeviceInfo::GetBufferOptimizeModeChar() const { MS_LOG(ERROR) << "Invalid context."; return std::vector(); } - const std::string &ref = GetValue(data_, kModelOptionAscend310BufferOptimize); + const std::string &ref = GetValue(data_, kModelOptionAscendBufferOptimize); return StringToChar(ref); } } // namespace mindspore diff --git a/mindspore/lite/src/cxx_api/converters.cc b/mindspore/lite/src/cxx_api/converters.cc index 4f0b809433a..23f90b19dd8 100644 --- a/mindspore/lite/src/cxx_api/converters.cc +++ b/mindspore/lite/src/cxx_api/converters.cc @@ -60,12 +60,12 @@ Status ContextUtils::AddNpuDevice(int frequency, lite::InnerContext *inner_conte return kSuccess; } -Status ContextUtils::AddAscend310Device(lite::InnerContext *inner_context, DeviceInfoContext *device) { +Status ContextUtils::AddAscendDevice(lite::InnerContext *inner_context, DeviceInfoContext *device) { lite::DeviceInfo device_info = {0}; - auto ascend310_context = device->Cast(); - device_info.ascend310_device_info_ = {ascend310_context->GetDeviceID(), ascend310_context->GetDynamicBatchSize(), - ascend310_context->GetDynamicImageSize()}; - inner_context->device_list_.push_back({lite::DT_ASCEND310, device_info}); + auto ascend_context = device->Cast(); + device_info.ascend_device_info_ = {ascend_context->GetDeviceID(), ascend_context->GetDynamicBatchSize(), + ascend_context->GetDynamicImageSize()}; + inner_context->device_list_.push_back({lite::DT_ASCEND, device_info}); return kSuccess; } @@ -111,7 +111,7 @@ lite::InnerContext *ContextUtils::Convert(Context *context) { auto npu_context = device->Cast(); ret = AddNpuDevice(npu_context->GetFrequency(), inner_context.get()); } else if (device->GetDeviceType() == kAscend) { - ret = AddAscend310Device(inner_context.get(), device.get()); + ret = AddAscendDevice(inner_context.get(), device.get()); } if (ret != kSuccess) { MS_LOG(ERROR) << "Add device failed!"; diff --git a/mindspore/lite/src/cxx_api/converters.h b/mindspore/lite/src/cxx_api/converters.h index 632caed5b40..114bf525c32 100644 --- a/mindspore/lite/src/cxx_api/converters.h +++ b/mindspore/lite/src/cxx_api/converters.h @@ -43,7 +43,7 @@ class ContextUtils { const std::string &provider_device, const std::shared_ptr &allocator, lite::InnerContext *inner_context); static Status AddNpuDevice(int frequency, lite::InnerContext *inner_context); - static Status AddAscend310Device(lite::InnerContext *inner_context, DeviceInfoContext *device); + static Status AddAscendDevice(lite::InnerContext *inner_context, DeviceInfoContext *device); static bool IsAffinityModeValid(int affinity_mode) { return affinity_mode >= lite::NO_BIND && affinity_mode <= lite::MID_CPU; } diff --git a/mindspore/lite/src/cxx_api/model/model.cc b/mindspore/lite/src/cxx_api/model/model.cc index 6c032d5e108..5d6de2d9049 100644 --- a/mindspore/lite/src/cxx_api/model/model.cc +++ b/mindspore/lite/src/cxx_api/model/model.cc @@ -18,6 +18,9 @@ #ifdef GPU_TENSORRT #include #endif +#ifdef ENABLE_LITE_ACL +#include "acl/acl_base.h" +#endif #include #include "include/api/callback/callback.h" #include "include/api/context.h" @@ -153,8 +156,11 @@ Model::Model() : impl_(nullptr) {} Model::~Model() {} bool Model::CheckModelSupport(enum DeviceType device_type, ModelType model_type) { - if (device_type == kGPU) { + if (device_type == kCPU) { + return true; + } #ifdef GPU_TENSORRT + if (device_type == kGPU) { int driver_version = 0; int ret = cudaDriverGetVersion(&driver_version); if (ret != cudaSuccess || driver_version == 0) { @@ -162,18 +168,24 @@ bool Model::CheckModelSupport(enum DeviceType device_type, ModelType model_type) return false; } return true; -#else - return false; -#endif - } else if (device_type == kCPU) { -#ifdef ENABLE_LITE_ACL - return false; -#else - return true; -#endif - } else { - return false; } +#endif +#ifdef ENABLE_LITE_ACL + if (device_type == kAscend || device_type == kAscend310) { + const char *soc_name_c = aclrtGetSocName(); + if (soc_name_c == nullptr) { + MS_LOG(WARNING) << "aclrtGetSocName failed."; + return false; + } + std::string soc_name(soc_name_c); + if (soc_name.find("910") != std::string::npos) { + MS_LOG(WARNING) << "Device not support, aclrtGetSocName: " << soc_name; + return false; + } + return true; + } +#endif + return false; } std::vector Model::GetInputs() { diff --git a/mindspore/lite/src/inner_context.cc b/mindspore/lite/src/inner_context.cc index 4aea6ced405..960f3be9e07 100644 --- a/mindspore/lite/src/inner_context.cc +++ b/mindspore/lite/src/inner_context.cc @@ -279,7 +279,7 @@ bool InnerContext::IsProviderEnabled() const { bool InnerContext::IsAllDeviceTypeValid() const { return std::all_of(this->device_list_.begin(), this->device_list_.end(), [](const DeviceContext &device) { - return device.device_type_ >= DT_CPU && device.device_type_ <= DT_ASCEND310; + return device.device_type_ >= DT_CPU && device.device_type_ <= DT_ASCEND; }); } diff --git a/mindspore/lite/src/runtime/kernel/ascend310/src/custom_kernel.cc b/mindspore/lite/src/runtime/kernel/ascend310/src/custom_kernel.cc index 53cedd8586f..bc89891a624 100644 --- a/mindspore/lite/src/runtime/kernel/ascend310/src/custom_kernel.cc +++ b/mindspore/lite/src/runtime/kernel/ascend310/src/custom_kernel.cc @@ -34,17 +34,17 @@ constexpr auto kNCHWHeightIdx = 2; constexpr auto kNCHWWidthIdx = 3; constexpr auto kImageSizeHwNum = 2; } // namespace -CustomAscend310Kernel::CustomAscend310Kernel(const std::vector &inputs, - const std::vector &outputs, - const schema::Primitive *primitive, const mindspore::Context *ctx) +CustomAscendKernel::CustomAscendKernel(const std::vector &inputs, + const std::vector &outputs, + const schema::Primitive *primitive, const mindspore::Context *ctx) : Kernel(inputs, outputs, primitive, ctx), load_model_(false), acl_options_({}), model_infer_(nullptr), InputDataIndex_(0) {} -CustomAscend310Kernel::~CustomAscend310Kernel() { - if (model_infer_ != nullptr) { +CustomAscendKernel::~CustomAscendKernel() { + if (load_model_) { int ret = model_infer_->Finalize(); if (ret != lite::RET_OK) { MS_LOG(ERROR) << "Model finalize failed."; @@ -52,7 +52,7 @@ CustomAscend310Kernel::~CustomAscend310Kernel() { } } -STATUS CustomAscend310Kernel::PrepareModelInfer() { +STATUS CustomAscendKernel::PrepareModelInfer() { if (inputs_.size() < 1) { MS_LOG(ERROR) << "Inputs size should not be less than 1."; return lite::RET_ERROR; @@ -85,7 +85,7 @@ STATUS CustomAscend310Kernel::PrepareModelInfer() { return lite::RET_OK; } -STATUS CustomAscend310Kernel::Prepare() { +STATUS CustomAscendKernel::Prepare() { if (load_model_) { MS_LOG(INFO) << "Custom kernel has been prepared."; return lite::RET_OK; @@ -100,7 +100,7 @@ STATUS CustomAscend310Kernel::Prepare() { return lite::RET_OK; } -void CustomAscend310Kernel::RecordInputDataIndex() { +void CustomAscendKernel::RecordInputDataIndex() { for (size_t idx = 0; idx < inputs_.size(); ++idx) { if (inputs_[idx].Data() == nullptr) { InputDataIndex_ = idx; @@ -109,14 +109,14 @@ void CustomAscend310Kernel::RecordInputDataIndex() { } } -STATUS CustomAscend310Kernel::ReSize() { +STATUS CustomAscendKernel::ReSize() { if (!load_model_) { return Prepare(); } return lite::RET_OK; } -STATUS CustomAscend310Kernel::ProcDynamicInput(std::vector *inputs) { +STATUS CustomAscendKernel::ProcDynamicInput(std::vector *inputs) { if (acl_options_.batch_size.empty() && acl_options_.image_size.empty()) { MS_LOG(INFO) << "Input is not dynamic mode."; return lite::RET_OK; @@ -154,7 +154,7 @@ STATUS CustomAscend310Kernel::ProcDynamicInput(std::vector return lite::RET_OK; } -STATUS CustomAscend310Kernel::GetRealBatchSize(std::vector *inputs, int32_t *batch_size) { +STATUS CustomAscendKernel::GetRealBatchSize(std::vector *inputs, int32_t *batch_size) { CHECK_NULL_RETURN(batch_size); if (InputDataIndex_ >= inputs->size()) { MS_LOG(ERROR) << " Input data index " << InputDataIndex_ << " is larger than input size " << inputs->size(); @@ -177,8 +177,8 @@ STATUS CustomAscend310Kernel::GetRealBatchSize(std::vector return lite::RET_OK; } -STATUS CustomAscend310Kernel::GetRealImageSize(std::vector *inputs, int32_t *image_size, - int32_t num) { +STATUS CustomAscendKernel::GetRealImageSize(std::vector *inputs, int32_t *image_size, + int32_t num) { CHECK_NULL_RETURN(image_size); if (InputDataIndex_ >= inputs->size()) { MS_LOG(ERROR) << "Input data index " << InputDataIndex_ << " is larger than input size " << inputs->size(); @@ -217,7 +217,7 @@ STATUS CustomAscend310Kernel::GetRealImageSize(std::vector return lite::RET_OK; } -STATUS CustomAscend310Kernel::Execute() { +STATUS CustomAscendKernel::Execute() { if (!load_model_) { MS_LOG(WARNING) << "Custom kernel has not been prepared."; return lite::RET_OK; @@ -246,7 +246,7 @@ std::shared_ptr CustomCreateKernel(const std::vector(inputs, outputs, primitive, ctx); + auto kernel = std::make_shared(inputs, outputs, primitive, ctx); if (kernel == nullptr) { MS_LOG(ERROR) << "New custom kernel is nullptr"; return nullptr; @@ -262,8 +262,8 @@ const auto kFloat32 = DataType::kNumberTypeFloat32; const auto kInt8 = DataType::kNumberTypeInt8; const auto kUInt8 = DataType::kNumberTypeUInt8; } // namespace -REGISTER_CUSTOM_KERNEL(ASCEND310, ACL, kFloat32, ACL, kernel::acl::CustomCreateKernel) -REGISTER_CUSTOM_KERNEL(ASCEND310, ACL, kInt8, ACL, kernel::acl::CustomCreateKernel) -REGISTER_CUSTOM_KERNEL(ASCEND310, ACL, kUInt8, ACL, kernel::acl::CustomCreateKernel) +REGISTER_CUSTOM_KERNEL(ASCEND, ACL, kFloat32, ACL, kernel::acl::CustomCreateKernel) +REGISTER_CUSTOM_KERNEL(ASCEND, ACL, kInt8, ACL, kernel::acl::CustomCreateKernel) +REGISTER_CUSTOM_KERNEL(ASCEND, ACL, kUInt8, ACL, kernel::acl::CustomCreateKernel) } // namespace registry } // namespace mindspore diff --git a/mindspore/lite/src/runtime/kernel/ascend310/src/custom_kernel.h b/mindspore/lite/src/runtime/kernel/ascend310/src/custom_kernel.h index bd44e62cb2e..a5210740ec7 100644 --- a/mindspore/lite/src/runtime/kernel/ascend310/src/custom_kernel.h +++ b/mindspore/lite/src/runtime/kernel/ascend310/src/custom_kernel.h @@ -31,11 +31,11 @@ namespace mindspore::kernel { namespace acl { using mindspore::lite::STATUS; -class CustomAscend310Kernel : public kernel::Kernel { +class CustomAscendKernel : public kernel::Kernel { public: - CustomAscend310Kernel(const std::vector &inputs, const std::vector &outputs, - const mindspore::schema::Primitive *primitive, const mindspore::Context *ctx); - ~CustomAscend310Kernel() override; + CustomAscendKernel(const std::vector &inputs, const std::vector &outputs, + const mindspore::schema::Primitive *primitive, const mindspore::Context *ctx); + ~CustomAscendKernel() override; STATUS Prepare() override; STATUS ReSize() override; diff --git a/mindspore/lite/tools/benchmark/benchmark_base.cc b/mindspore/lite/tools/benchmark/benchmark_base.cc index 34045f61ff5..450464c53c1 100644 --- a/mindspore/lite/tools/benchmark/benchmark_base.cc +++ b/mindspore/lite/tools/benchmark/benchmark_base.cc @@ -295,7 +295,7 @@ int BenchmarkBase::CheckThreadNumValid() { int BenchmarkBase::CheckDeviceTypeValid() { if (flags_->device_ != "CPU" && flags_->device_ != "GPU" && flags_->device_ != "NPU" && - flags_->device_ != "Ascend310") { + flags_->device_ != "Ascend310" && flags_->device_ != "Ascend710") { MS_LOG(ERROR) << "Device type:" << flags_->device_ << " is not supported."; std::cerr << "Device type:" << flags_->device_ << " is not supported." << std::endl; return RET_ERROR; diff --git a/mindspore/lite/tools/benchmark/benchmark_base.h b/mindspore/lite/tools/benchmark/benchmark_base.h index 4e7de4bf06f..ae9c001d8bf 100644 --- a/mindspore/lite/tools/benchmark/benchmark_base.h +++ b/mindspore/lite/tools/benchmark/benchmark_base.h @@ -119,7 +119,7 @@ class MS_API BenchmarkFlags : public virtual FlagParser { AddFlag(&BenchmarkFlags::model_type_, "modelType", "Input model type. MindIR | MindIR_Opt", "MindIR"); AddFlag(&BenchmarkFlags::in_data_file_, "inDataFile", "Input data file, if not set, use random input", ""); AddFlag(&BenchmarkFlags::config_file_, "configFile", "Config file", ""); - AddFlag(&BenchmarkFlags::device_, "device", "CPU | GPU | NPU | Ascend310", "CPU"); + AddFlag(&BenchmarkFlags::device_, "device", "CPU | GPU | NPU | Ascend310 | Ascend710", "CPU"); AddFlag(&BenchmarkFlags::cpu_bind_mode_, "cpuBindMode", "Input 0 for NO_BIND, 1 for HIGHER_CPU, 2 for MID_CPU.", 1); // MarkPerformance AddFlag(&BenchmarkFlags::loop_count_, "loopCount", "Run loop count", 10); diff --git a/mindspore/lite/tools/benchmark/benchmark_unified_api.cc b/mindspore/lite/tools/benchmark/benchmark_unified_api.cc index e28c3a2db0c..0f57bd838d3 100644 --- a/mindspore/lite/tools/benchmark/benchmark_unified_api.cc +++ b/mindspore/lite/tools/benchmark/benchmark_unified_api.cc @@ -23,7 +23,6 @@ #include #include #include -#include "include/context.h" #include "include/ms_tensor.h" #include "include/version.h" #include "schema/model_generated.h" @@ -375,10 +374,10 @@ int BenchmarkUnifiedApi::InitMSContext(const std::shared_ptr device_list.push_back(npu_device_info); } - if (flags_->device_ == "Ascend310") { - std::shared_ptr ascend310_device_info = std::make_shared(); - ascend310_device_info->SetDeviceID(0); - device_list.push_back(ascend310_device_info); + if (flags_->device_ == "Ascend310" || flags_->device_ == "Ascend710") { + std::shared_ptr ascend_device_info = std::make_shared(); + ascend_device_info->SetDeviceID(0); + device_list.push_back(ascend_device_info); } // CPU priority is behind GPU and NPU diff --git a/mindspore/lite/tools/converter/adapter/acl/acl_pass_impl.cc b/mindspore/lite/tools/converter/adapter/acl/acl_pass_impl.cc index f2fc9b527e2..65b44e35afd 100644 --- a/mindspore/lite/tools/converter/adapter/acl/acl_pass_impl.cc +++ b/mindspore/lite/tools/converter/adapter/acl/acl_pass_impl.cc @@ -343,42 +343,42 @@ STATUS AclPassImpl::ConvertGraphToOm(const FuncGraphPtr &func_graph, Buffer *om_ return lite::RET_OK; } -void AclPassImpl::SetAclModelInitOptions(const std::shared_ptr &ascend310_info) { +void AclPassImpl::SetAclModelInitOptions(const std::shared_ptr &ascend_info) { if (!acl_model_option_cfg_.fusion_switch_config_file_path.empty()) { - ascend310_info->SetFusionSwitchConfigPath(acl_model_option_cfg_.fusion_switch_config_file_path); + ascend_info->SetFusionSwitchConfigPath(acl_model_option_cfg_.fusion_switch_config_file_path); } if (!acl_model_option_cfg_.op_select_impl_mode.empty()) { - ascend310_info->SetOpSelectImplMode(acl_model_option_cfg_.op_select_impl_mode); + ascend_info->SetOpSelectImplMode(acl_model_option_cfg_.op_select_impl_mode); } if (!acl_model_option_cfg_.buffer_optimize.empty()) { - ascend310_info->SetBufferOptimizeMode(acl_model_option_cfg_.buffer_optimize); + ascend_info->SetBufferOptimizeMode(acl_model_option_cfg_.buffer_optimize); } } -void AclPassImpl::SetAclModelBuildOptions(const std::shared_ptr &ascend310_info) { +void AclPassImpl::SetAclModelBuildOptions(const std::shared_ptr &ascend_info) { if (acl_model_option_cfg_.output_type != DataType::kInvalidType) { - ascend310_info->SetOutputType(acl_model_option_cfg_.output_type); + ascend_info->SetOutputType(acl_model_option_cfg_.output_type); } if (acl_model_option_cfg_.input_shape_map.size() > 0) { - ascend310_info->SetInputShapeMap(acl_model_option_cfg_.input_shape_map); + ascend_info->SetInputShapeMap(acl_model_option_cfg_.input_shape_map); } if (acl_model_option_cfg_.dynamic_batch_size.size() > 0) { - ascend310_info->SetDynamicBatchSize(acl_model_option_cfg_.dynamic_batch_size); + ascend_info->SetDynamicBatchSize(acl_model_option_cfg_.dynamic_batch_size); } if (!acl_model_option_cfg_.dynamic_image_size.empty()) { - ascend310_info->SetDynamicImageSize(acl_model_option_cfg_.dynamic_image_size); + ascend_info->SetDynamicImageSize(acl_model_option_cfg_.dynamic_image_size); } if (!acl_model_option_cfg_.input_format.empty()) { - ascend310_info->SetInputFormat(acl_model_option_cfg_.input_format); + ascend_info->SetInputFormat(acl_model_option_cfg_.input_format); } if (!acl_model_option_cfg_.input_shape.empty()) { - ascend310_info->SetInputShape(acl_model_option_cfg_.input_shape); + ascend_info->SetInputShape(acl_model_option_cfg_.input_shape); } if (!acl_model_option_cfg_.precision_mode.empty()) { - ascend310_info->SetPrecisionMode(acl_model_option_cfg_.precision_mode); + ascend_info->SetPrecisionMode(acl_model_option_cfg_.precision_mode); } if (!acl_model_option_cfg_.insert_op_config_file_path.empty()) { - ascend310_info->SetInsertOpConfigPath(acl_model_option_cfg_.insert_op_config_file_path); + ascend_info->SetInsertOpConfigPath(acl_model_option_cfg_.insert_op_config_file_path); } } @@ -387,15 +387,15 @@ std::shared_ptr AclPassImpl::CreateModelContext() { if (model_context == nullptr) { return nullptr; } - auto ascend310_info = std::make_shared(); - if (ascend310_info == nullptr) { + auto ascend_info = std::make_shared(); + if (ascend_info == nullptr) { return nullptr; } - ascend310_info->SetDeviceID(acl_model_option_cfg_.device_id); - SetAclModelInitOptions(ascend310_info); - SetAclModelBuildOptions(ascend310_info); + ascend_info->SetDeviceID(acl_model_option_cfg_.device_id); + SetAclModelInitOptions(ascend_info); + SetAclModelBuildOptions(ascend_info); - model_context->MutableDeviceInfo().emplace_back(ascend310_info); + model_context->MutableDeviceInfo().emplace_back(ascend_info); return model_context; } diff --git a/mindspore/lite/tools/converter/adapter/acl/acl_pass_impl.h b/mindspore/lite/tools/converter/adapter/acl/acl_pass_impl.h index 33e5e2d881d..81509039550 100644 --- a/mindspore/lite/tools/converter/adapter/acl/acl_pass_impl.h +++ b/mindspore/lite/tools/converter/adapter/acl/acl_pass_impl.h @@ -62,8 +62,8 @@ class AclPassImpl { STATUS GetFuncGraphOutputInfo(const FuncGraphPtr &func_graph); STATUS TraceOutput(const AnfNodePtr &node); std::shared_ptr CreateModelContext(); - void SetAclModelInitOptions(const std::shared_ptr &ascend310_info); - void SetAclModelBuildOptions(const std::shared_ptr &ascend310_info); + void SetAclModelInitOptions(const std::shared_ptr &ascend_info); + void SetAclModelBuildOptions(const std::shared_ptr &ascend_info); std::string AdjustCnodeName(const PrimitivePtr &prim); bool IsDynamicInput(); diff --git a/mindspore/lite/tools/converter/converter_flags.cc b/mindspore/lite/tools/converter/converter_flags.cc index e3e17585908..7ee8ba473d9 100644 --- a/mindspore/lite/tools/converter/converter_flags.cc +++ b/mindspore/lite/tools/converter/converter_flags.cc @@ -82,7 +82,7 @@ Flags::Flags() { ""); AddFlag(&Flags::graphInputFormatStr, "inputDataFormat", "Assign the input format of exported model. Only Valid for 4-dimensional input. NHWC | NCHW", "NHWC"); - AddFlag(&Flags::device, "device", "Set the target device. Only valid when device is Ascend310.", ""); + AddFlag(&Flags::device, "device", "Set the target device. Only valid when device is Ascend310 or Ascend710.", ""); } int Flags::InitInputOutputDataType() {