From def1f77dba5d2f7ba9bdc8dec3486d85dad1590b Mon Sep 17 00:00:00 2001 From: zhangyanhui Date: Wed, 14 Dec 2022 17:44:09 +0800 Subject: [PATCH] add warning info in exportModel & modify API info in model.h --- include/api/model.h | 6 +++--- mindspore/lite/src/litert/cxx_api/converters.h | 4 ++++ 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/include/api/model.h b/include/api/model.h index 1480c200f90..905ff805dbe 100644 --- a/include/api/model.h +++ b/include/api/model.h @@ -151,7 +151,7 @@ class MS_API Model { /// \return Status. Status UpdateWeights(const std::vector &new_weights); - /// \brief Inference model. + /// \brief Inference model API. If use this API in train mode, it's equal to RunStep API. /// /// \param[in] inputs A vector where model inputs are arranged in sequence. /// \param[out] outputs Which is a pointer to a vector. The model outputs are filled in the container in sequence. @@ -162,7 +162,7 @@ class MS_API Model { Status Predict(const std::vector &inputs, std::vector *outputs, const MSKernelCallBack &before = nullptr, const MSKernelCallBack &after = nullptr); - /// \brief Inference model. + /// \brief Inference model API. If use this API in train mode, it's equal to RunStep API. /// /// \param[in] before CallBack before predict. /// \param[in] after CallBack after predict. @@ -170,7 +170,7 @@ class MS_API Model { /// \return Status. Status Predict(const MSKernelCallBack &before = nullptr, const MSKernelCallBack &after = nullptr); - /// \brief Run model by step. + /// \brief Training API. Run model by step. /// /// \param[in] before CallBack before RunStep. /// \param[in] after CallBack after RunStep. diff --git a/mindspore/lite/src/litert/cxx_api/converters.h b/mindspore/lite/src/litert/cxx_api/converters.h index 16398763904..3d1131b1de5 100644 --- a/mindspore/lite/src/litert/cxx_api/converters.h +++ b/mindspore/lite/src/litert/cxx_api/converters.h @@ -25,6 +25,7 @@ #include "include/train/train_cfg.h" #include "src/litert/inner_context.h" #include "src/litert/c_api/context_c.h" +#include "src/common/log_adapter.h" namespace mindspore { class ContextUtils { @@ -59,6 +60,9 @@ inline lite::QuantizationType A2L_ConvertQT(mindspore::QuantizationType qt) { if (qt == kWeightQuant) { return lite::QT_WEIGHT; } + if (qt == kFullQuant || qt == kUnknownQuantType) { + MS_LOG(WARNING) << qt << " does not support, set the quantizationType to default."; + } return lite::QT_DEFAULT; }