!20220 [MS][LITE][TOD] Activated UT of CxxApiLite and ported fixes from R1.3
Merge pull request !20220 from ehaleva/API2
This commit is contained in:
commit
6b9c26a29d
|
@ -21,7 +21,11 @@
|
|||
#include <memory>
|
||||
#include "include/api/status.h"
|
||||
#include "include/api/types.h"
|
||||
#ifdef ENABLE_ANDROID
|
||||
#include "mindspore/lite/src/cxx_api/tensor/tensor_impl.h"
|
||||
#else
|
||||
#include "mindspore/core/ir/api_tensor_impl.h"
|
||||
#endif
|
||||
#include "minddata/dataset/core/tensor.h"
|
||||
|
||||
namespace mindspore {
|
||||
|
@ -29,7 +33,7 @@ namespace dataset {
|
|||
class DETensor : public mindspore::MSTensor::Impl {
|
||||
public:
|
||||
DETensor() = default;
|
||||
~DETensor() override = default;
|
||||
~DETensor() = default;
|
||||
explicit DETensor(std::shared_ptr<dataset::Tensor> tensor_impl);
|
||||
#ifndef ENABLE_ANDROID
|
||||
explicit DETensor(std::shared_ptr<dataset::DeviceTensor> device_tensor_impl, bool is_device);
|
||||
|
|
|
@ -29,7 +29,7 @@ checkopts()
|
|||
do
|
||||
case "${opt}" in
|
||||
D)
|
||||
MNIST_DATA_PATH=$OPTARG
|
||||
MNIST_DATA_PATH=$(realpath $OPTARG)
|
||||
;;
|
||||
d)
|
||||
DOCKER=$OPTARG
|
||||
|
@ -46,8 +46,8 @@ checkopts()
|
|||
}
|
||||
|
||||
checkopts "$@"
|
||||
if [ "$MNIST_DATA_PATH" == "" ]; then
|
||||
echo "MNIST Dataset directory path was not provided"
|
||||
if [ "$MNIST_DATA_PATH" == "" ] || [ ! -d "$MNIST_DATA_PATH" ]; then
|
||||
echo "MNIST Dataset directory path was not provided or wrong path " $MNIST_DATA_PATH
|
||||
display_usage
|
||||
exit 1
|
||||
fi
|
||||
|
|
|
@ -68,21 +68,60 @@ int main(int argc, char **argv) {
|
|||
auto inputs = model.GetInputs();
|
||||
MS_ASSERT(inputs.size() >= 1);
|
||||
|
||||
auto *input_data = reinterpret_cast<float *>(inputs.at(0).MutableData());
|
||||
int index = 0;
|
||||
std::cout << "There are " << inputs.size() << " input tensors with sizes: " << std::endl;
|
||||
for (auto tensor : inputs) {
|
||||
std::cout << "tensor " << index++ << ": shape is [";
|
||||
for (auto dim : tensor.Shape()) {
|
||||
std::cout << dim << " ";
|
||||
}
|
||||
std::cout << "]" << std::endl;
|
||||
}
|
||||
|
||||
mindspore::MSTensor *input_tensor = inputs.at(0).Clone();
|
||||
auto *input_data = reinterpret_cast<float *>(input_tensor->MutableData());
|
||||
std::ifstream in;
|
||||
in.open("dataset/batch_of32.dat", std::ios::in | std::ios::binary);
|
||||
in.read(reinterpret_cast<char *>(&input_data), inputs.at(0).ElementNum() * sizeof(float));
|
||||
if (in.fail()) {
|
||||
std::cout << "error loading dataset/batch_of32.dat file reading" << std::endl;
|
||||
MS_ASSERT(!in.fail());
|
||||
}
|
||||
in.read(reinterpret_cast<char *>(input_data), inputs.at(0).ElementNum() * sizeof(float));
|
||||
in.close();
|
||||
|
||||
std::vector<mindspore::MSTensor> outputs;
|
||||
status = model.Predict(inputs, &outputs);
|
||||
status = model.Predict({*input_tensor}, &outputs);
|
||||
if (status != mindspore::kSuccess) {
|
||||
std::cout << "Error " << status << " during running predict of model " << infer_model_fn;
|
||||
MS_ASSERT(status != mindspore::kSuccess);
|
||||
}
|
||||
std::cout << "Got Vector of size: " << outputs.size() << std::endl;
|
||||
|
||||
index = 0;
|
||||
std::cout << "There are " << outputs.size() << " output tensors with sizes: " << std::endl;
|
||||
for (auto tensor : outputs) {
|
||||
std::cout << "[ " << tensor.Shape().at(0) << ", " << tensor.Shape().at(1) << "]\n";
|
||||
std::cout << "tensor " << index++ << ": shape is [";
|
||||
for (auto dim : tensor.Shape()) {
|
||||
std::cout << dim << " ";
|
||||
}
|
||||
std::cout << "]" << std::endl;
|
||||
}
|
||||
|
||||
if (outputs.size() > 0) {
|
||||
std::cout << "The predicted classes are:" << std::endl;
|
||||
auto predictions = reinterpret_cast<float *>(outputs.at(0).MutableData());
|
||||
int i = 0;
|
||||
for (int b = 0; b < outputs.at(0).Shape().at(0); b++) {
|
||||
int max_c = 0;
|
||||
float max_p = predictions[i];
|
||||
for (int c = 0; c < outputs.at(0).Shape().at(1); c++, i++) {
|
||||
if (predictions[i] > max_p) {
|
||||
max_c = c;
|
||||
max_p = predictions[i];
|
||||
}
|
||||
}
|
||||
std::cout << max_c << ", ";
|
||||
}
|
||||
std::cout << std::endl;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -12,7 +12,7 @@ set(CMAKE_CXX_STANDARD 17)
|
|||
set(CMAKE_CXX_FLAGS_DEBUG "$ENV{CXXFLAGS} -O0 -g2 -ggdb -fno-inline-functions -fno-omit-frame-pointer \
|
||||
-D_LIBCPP_INLINE_VISIBILITY='' -D_LIBCPP_DISABLE_EXTERN_TEMPLATE=1 -DHALF_ENABLE_CPP11_USER_LITERALS=0 \
|
||||
-D_FORTIFY_SOURCE=2 -Wno-cpp")
|
||||
set(CMAKE_CXX_FLAGS_DEBUG "$ENV{CXXFLAGS} -Werror -Wno-return-std-move -Wno-unused-private-field \
|
||||
set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -Werror -Wno-return-std-move -Wno-unused-private-field \
|
||||
-Wno-unused-lambda-capture -Wno-sign-compare -Wno-overloaded-virtual -Wno-unneeded-internal-declaration \
|
||||
-Wno-unused-variable -Wno-pessimizing-move -Wno-inconsistent-missing-override")
|
||||
|
||||
|
|
|
@ -14,8 +14,8 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef MINDSPORE_LITE_SRC_CXX_API_TENSOR_TENSOR_IMPL_H
|
||||
#define MINDSPORE_LITE_SRC_CXX_API_TENSOR_TENSOR_IMPL_H
|
||||
#ifndef MINDSPORE_LITE_SRC_CXX_API_TENSOR_TENSOR_IMPL_H_
|
||||
#define MINDSPORE_LITE_SRC_CXX_API_TENSOR_TENSOR_IMPL_H_
|
||||
|
||||
#include <cstddef>
|
||||
#include <numeric>
|
||||
|
@ -38,7 +38,7 @@ class MSTensor::Impl {
|
|||
public:
|
||||
Impl() {}
|
||||
|
||||
~Impl() {
|
||||
virtual ~Impl() {
|
||||
if (lite_tensor_ == nullptr) {
|
||||
return;
|
||||
}
|
||||
|
@ -65,7 +65,7 @@ class MSTensor::Impl {
|
|||
|
||||
static std::vector<std::string> MS_API TensorImplToStrings(const std::shared_ptr<Impl> &impl);
|
||||
|
||||
const std::string &Name() const {
|
||||
virtual const std::string &Name() const {
|
||||
static std::string empty = "";
|
||||
if (lite_tensor_ == nullptr) {
|
||||
MS_LOG(ERROR) << "Invalid tensor.";
|
||||
|
@ -82,7 +82,7 @@ class MSTensor::Impl {
|
|||
lite_tensor_->set_tensor_name(name);
|
||||
}
|
||||
|
||||
enum DataType DataType() const {
|
||||
virtual enum DataType DataType() const {
|
||||
if (lite_tensor_ == nullptr) {
|
||||
MS_LOG(ERROR) << "Invalid tensor.";
|
||||
return DataType::kTypeUnknown;
|
||||
|
@ -106,18 +106,20 @@ class MSTensor::Impl {
|
|||
return static_cast<int64_t>(lite_tensor_->ElementsNum());
|
||||
}
|
||||
|
||||
const std::vector<int64_t> &Shape() {
|
||||
virtual const std::vector<int64_t> &Shape() const {
|
||||
static std::vector<int64_t> empty;
|
||||
if (lite_tensor_ == nullptr) {
|
||||
MS_LOG(ERROR) << "Invalid tensor.";
|
||||
return empty;
|
||||
}
|
||||
auto shape = lite_tensor_->shape();
|
||||
shape_.resize(shape.size());
|
||||
std::transform(shape.begin(), shape.end(), shape_.begin(), [](int c) { return static_cast<int64_t>(c); });
|
||||
return shape_;
|
||||
lite_shape.resize(shape.size());
|
||||
std::transform(shape.begin(), shape.end(), lite_shape.begin(), [](int c) { return static_cast<int64_t>(c); });
|
||||
return lite_shape;
|
||||
}
|
||||
|
||||
virtual std::shared_ptr<Impl> Clone() const { return nullptr; }
|
||||
|
||||
void SetShape(const std::vector<int64_t> &shape) {
|
||||
if (lite_tensor_ == nullptr) {
|
||||
MS_LOG(ERROR) << "Invalid tensor.";
|
||||
|
@ -161,7 +163,7 @@ class MSTensor::Impl {
|
|||
lite_tensor_->set_format(format);
|
||||
}
|
||||
|
||||
std::shared_ptr<const void> Data() const {
|
||||
virtual std::shared_ptr<const void> Data() const {
|
||||
if (lite_tensor_ == nullptr) {
|
||||
MS_LOG(ERROR) << "Invalid tensor.";
|
||||
return nullptr;
|
||||
|
@ -175,7 +177,7 @@ class MSTensor::Impl {
|
|||
return std::shared_ptr<const void>(lite_tensor_->data(), [](const void *) {});
|
||||
}
|
||||
|
||||
void *MutableData() {
|
||||
virtual void *MutableData() {
|
||||
if (lite_tensor_ == nullptr) {
|
||||
MS_LOG(ERROR) << "Invalid tensor.";
|
||||
return nullptr;
|
||||
|
@ -183,7 +185,7 @@ class MSTensor::Impl {
|
|||
return lite_tensor_->MutableData();
|
||||
}
|
||||
|
||||
size_t DataSize() const {
|
||||
virtual size_t DataSize() const {
|
||||
if (lite_tensor_ == nullptr) {
|
||||
MS_LOG(ERROR) << "Invalid tensor.";
|
||||
return 0;
|
||||
|
@ -199,7 +201,7 @@ class MSTensor::Impl {
|
|||
lite_tensor_->set_data(data);
|
||||
}
|
||||
|
||||
bool IsDevice() const { return false; }
|
||||
virtual bool IsDevice() const { return false; }
|
||||
|
||||
tensor::MSTensor *lite_tensor() const { return lite_tensor_; }
|
||||
|
||||
|
@ -219,10 +221,10 @@ class MSTensor::Impl {
|
|||
private:
|
||||
tensor::MSTensor *lite_tensor_ = nullptr;
|
||||
std::string tensor_name_ = "";
|
||||
std::vector<int64_t> shape_ = {};
|
||||
mutable std::vector<int64_t> lite_shape;
|
||||
bool own_data_ = false;
|
||||
bool from_session_ = false;
|
||||
};
|
||||
} // namespace mindspore
|
||||
|
||||
#endif // MINDSPORE_LITE_SRC_CXX_API_TENSOR_TENSOR_IMPL_H
|
||||
#endif // MINDSPORE_LITE_SRC_CXX_API_TENSOR_TENSOR_IMPL_H_
|
||||
|
|
Loading…
Reference in New Issue