From 50dcb79bdf2afba446a5b03b5b26be9fee8f6338 Mon Sep 17 00:00:00 2001 From: ervinzhang Date: Thu, 30 Jul 2020 16:29:17 -0400 Subject: [PATCH] added MindData lite --- .gitmodules | 10 + build.sh | 46 +++++ .../ccsrc/minddata/dataset/api/CMakeLists.txt | 2 + .../ccsrc/minddata/dataset/api/de_tensor.cc | 188 ++++++++++++++++++ .../ccsrc/minddata/dataset/api/execute.cc | 55 +++++ .../ccsrc/minddata/dataset/core/client.h | 3 + .../ccsrc/minddata/dataset/core/tensor.cc | 4 + .../ccsrc/minddata/dataset/core/tensor.h | 9 + .../minddata/dataset/include/de_tensor.h | 53 +++++ .../ccsrc/minddata/dataset/include/execute.h | 51 +++++ .../ccsrc/minddata/dataset/include/tensor.h | 24 ++- .../kernels/image/resize_with_bbox_op.cc | 1 - mindspore/lite/CMakeLists.txt | 27 +++ mindspore/lite/minddata/CMakeLists.txt | 44 ++++ mindspore/lite/src/CMakeLists.txt | 4 + mindspore/lite/test/CMakeLists.txt | 10 + mindspore/lite/test/dataset/de_tensor_test.cc | 98 +++++++++ mindspore/lite/test/dataset/eager_test.cc | 165 +++++++++++++++ third_party/eigen | 1 + third_party/libjpeg-turbo | 1 + third_party/opencv | 1 + 21 files changed, 789 insertions(+), 8 deletions(-) create mode 100644 mindspore/ccsrc/minddata/dataset/api/de_tensor.cc create mode 100644 mindspore/ccsrc/minddata/dataset/api/execute.cc create mode 100644 mindspore/ccsrc/minddata/dataset/include/de_tensor.h create mode 100644 mindspore/ccsrc/minddata/dataset/include/execute.h create mode 100644 mindspore/lite/minddata/CMakeLists.txt create mode 100644 mindspore/lite/test/dataset/de_tensor_test.cc create mode 100644 mindspore/lite/test/dataset/eager_test.cc create mode 160000 third_party/eigen create mode 160000 third_party/libjpeg-turbo create mode 160000 third_party/opencv diff --git a/.gitmodules b/.gitmodules index 80eac2de7dc..9ac5eb15ed9 100644 --- a/.gitmodules +++ b/.gitmodules @@ -24,3 +24,13 @@ [submodule "third_party/OpenCL-Headers"] path = third_party/OpenCL-Headers url = https://github.com/KhronosGroup/OpenCL-Headers.git +[submodule "third_party/opencv"] + path = third_party/opencv + url = https://github.com/opencv/opencv.git +[submodule "third_party/eigen"] + path = third_party/eigen + url = https://gitlab.com/libeigen/eigen.git +[submodule "third_party/libjpeg-turbo"] + path = third_party/libjpeg-turbo + url = https://github.com/libjpeg-turbo/libjpeg-turbo.git + ignore = dirty diff --git a/build.sh b/build.sh index bd11f4fa043..af8cd3164a1 100755 --- a/build.sh +++ b/build.sh @@ -519,6 +519,50 @@ build_opencl() { fi } +build_opencv() { + cd ${BASEPATH} + if [[ "${INC_BUILD}" == "off" ]]; then + git submodule update --init --recursive third_party/opencv + cd ${BASEPATH}/third_party/opencv + rm -rf build && mkdir -p build && cd build && cmake ${CMAKE_MINDDATA_ARGS} -DBUILD_SHARED_LIBS=ON -DBUILD_ANDROID_PROJECTS=OFF \ + -DBUILD_LIST=core,imgcodecs,imgproc -DBUILD_ZLIB=ON .. && make -j$THREAD_NUM + fi +} + +build_jpeg_turbo() { + cd ${BASEPATH} + if [[ "${INC_BUILD}" == "off" ]]; then + git submodule update --init --recursive third_party/libjpeg-turbo + cd ${BASEPATH}/third_party/libjpeg-turbo + rm -rf build && mkdir -p build && cd build && cmake ${CMAKE_MINDDATA_ARGS} -DCMAKE_BUILD_TYPE=Release \ + -DCMAKE_INSTALL_PREFIX="${BASEPATH}/third_party/libjpeg-turbo" .. && make -j$THREAD_NUM && make install + fi +} + +build_eigen() { + cd ${BASEPATH} + git submodule update --init --recursive third_party/eigen +} + +build_minddata_lite_deps() +{ + echo "start build minddata lite project" + if [[ "${LITE_PLATFORM}" == "arm64" ]]; then + CMAKE_MINDDATA_ARGS="-DCMAKE_TOOLCHAIN_FILE="${ANDROID_NDK}/build/cmake/android.toolchain.cmake" -DANDROID_NATIVE_API_LEVEL="19" \ + -DANDROID_NDK="${ANDROID_NDK}" -DANDROID_ABI="arm64-v8a" -DANDROID_TOOLCHAIN_NAME="aarch64-linux-android-clang" \ + -DANDROID_STL="c++_shared" -DCMAKE_BUILD_TYPE=${BUILD_TYPE}" + elif [[ "${LITE_PLATFORM}" == "arm32" ]]; then + CMAKE_MINDDATA_ARGS="-DCMAKE_TOOLCHAIN_FILE="${ANDROID_NDK}/build/cmake/android.toolchain.cmake" -DANDROID_NATIVE_API_LEVEL="19" \ + -DANDROID_NDK="${ANDROID_NDK}" -DANDROID_ABI="armeabi-v7a" -DANDROID_TOOLCHAIN_NAME="clang" \ + -DANDROID_STL="c++_shared" -DCMAKE_BUILD_TYPE=${BUILD_TYPE}" + else + CMAKE_MINDDATA_ARGS="-DCMAKE_BUILD_TYPE=${BUILD_TYPE} " + fi + build_opencv + build_eigen + build_jpeg_turbo +} + build_lite() { echo "start build mindspore lite project" @@ -533,6 +577,8 @@ build_lite() build_flatbuffer build_gtest + build_minddata_lite_deps + cd "${BASEPATH}/mindspore/lite" if [[ "${INC_BUILD}" == "off" ]]; then rm -rf build diff --git a/mindspore/ccsrc/minddata/dataset/api/CMakeLists.txt b/mindspore/ccsrc/minddata/dataset/api/CMakeLists.txt index ae0b9cc28ed..93de69aad24 100644 --- a/mindspore/ccsrc/minddata/dataset/api/CMakeLists.txt +++ b/mindspore/ccsrc/minddata/dataset/api/CMakeLists.txt @@ -13,4 +13,6 @@ add_library(cpp-API OBJECT iterator.cc transforms.cc samplers.cc + de_tensor.cc + execute.cc ) diff --git a/mindspore/ccsrc/minddata/dataset/api/de_tensor.cc b/mindspore/ccsrc/minddata/dataset/api/de_tensor.cc new file mode 100644 index 00000000000..a2f61c899d8 --- /dev/null +++ b/mindspore/ccsrc/minddata/dataset/api/de_tensor.cc @@ -0,0 +1,188 @@ +#include "minddata/dataset/include/de_tensor.h" +#include "minddata/dataset/core/constants.h" +#include "minddata/dataset/core/data_type.h" +#include "mindspore/core/ir/dtype/type_id.h" +#include "utils/hashing.h" +#include "mindspore/lite/src/ir/tensor.h" + +namespace mindspore { +namespace tensor { +dataset::DataType MSTypeToDEType(TypeId data_type) { + switch (data_type) { + case kNumberTypeBool: + return dataset::DataType(dataset::DataType::DE_BOOL); + case kNumberTypeInt8: + return dataset::DataType(dataset::DataType::DE_INT8); + case kNumberTypeUInt8: + return dataset::DataType(dataset::DataType::DE_UINT8); + case kNumberTypeInt16: + return dataset::DataType(dataset::DataType::DE_INT16); + case kNumberTypeUInt16: + return dataset::DataType(dataset::DataType::DE_UINT16); + case kNumberTypeInt32: + return dataset::DataType(dataset::DataType::DE_INT32); + case kNumberTypeUInt32: + return dataset::DataType(dataset::DataType::DE_UINT32); + case kNumberTypeInt64: + return dataset::DataType(dataset::DataType::DE_INT64); + case kNumberTypeUInt64: + return dataset::DataType(dataset::DataType::DE_UINT64); + case kNumberTypeFloat16: + return dataset::DataType(dataset::DataType::DE_FLOAT16); + case kNumberTypeFloat32: + return dataset::DataType(dataset::DataType::DE_FLOAT32); + case kNumberTypeFloat64: + return dataset::DataType(dataset::DataType::DE_FLOAT64); + default: + // maybe throw? + return dataset::DataType(dataset::DataType::DE_UNKNOWN); + } +} + +TypeId DETypeToMSType(dataset::DataType data_type) { + switch (data_type.value()) { + case dataset::DataType::DE_BOOL: + return mindspore::TypeId::kNumberTypeBool; + case dataset::DataType::DE_INT8: + return mindspore::TypeId::kNumberTypeInt8; + case dataset::DataType::DE_UINT8: + return mindspore::TypeId::kNumberTypeUInt8; + case dataset::DataType::DE_INT16: + return mindspore::TypeId::kNumberTypeInt16; + case dataset::DataType::DE_UINT16: + return mindspore::TypeId::kNumberTypeUInt16; + case dataset::DataType::DE_INT32: + return mindspore::TypeId::kNumberTypeInt32; + case dataset::DataType::DE_UINT32: + return mindspore::TypeId::kNumberTypeUInt32; + case dataset::DataType::DE_INT64: + return mindspore::TypeId::kNumberTypeInt64; + case dataset::DataType::DE_UINT64: + return mindspore::TypeId::kNumberTypeUInt64; + case dataset::DataType::DE_FLOAT16: + return mindspore::TypeId::kNumberTypeFloat16; + case dataset::DataType::DE_FLOAT32: + return mindspore::TypeId::kNumberTypeFloat32; + case dataset::DataType::DE_FLOAT64: + return mindspore::TypeId::kNumberTypeFloat64; + default: + // maybe throw? + return kTypeUnknown; + } +} + +MSTensor *DETensor::CreateTensor(TypeId data_type, const std::vector &shape) { + return new DETensor(data_type, shape); +} + +MSTensor *DETensor::CreateTensor(const std::string &path) { + std::shared_ptr t; + (void) dataset::Tensor::CreateFromFile(path, &t); + return new DETensor(std::move(t)); +} + +DETensor::DETensor(TypeId data_type, const std::vector &shape) { + std::vector t_shape; + t_shape.reserve(shape.size()); + std::transform(shape.begin(), shape.end(), + std::back_inserter(t_shape), + [](int s) -> dataset::dsize_t {return static_cast(s);}); + dataset::Tensor::CreateEmpty(dataset::TensorShape(t_shape), MSTypeToDEType(data_type), &this->tensor_impl_); +} + +DETensor::DETensor(std::shared_ptr tensor_ptr) { this->tensor_impl_ = std::move(tensor_ptr); } + +MSTensor *DETensor::ConvertToLiteTensor() { + // static MSTensor::CreateTensor is only for the LiteTensor + MSTensor *tensor = MSTensor::CreateTensor(this->data_type(), this->shape()); + MS_ASSERT(tensor->Size() == this->Size()); + memcpy_s(tensor->MutableData(), tensor->Size(), this->MutableData(), this->Size()); + return tensor; +} + +std::shared_ptr DETensor::tensor() const { + MS_ASSERT(this->tensor_impl_ != nullptr); + return this->tensor_impl_; +} + +TypeId DETensor::data_type() const { + MS_ASSERT(this->tensor_impl_ != nullptr); + return DETypeToMSType(this->tensor_impl_->type()); +} + +TypeId DETensor::set_data_type(TypeId data_type) { + MS_ASSERT(this->tensor_impl_ != nullptr); + if (data_type != this->data_type()) { + std::shared_ptr temp; + dataset::Tensor::CreateFromMemory(this->tensor_impl_->shape(), MSTypeToDEType(data_type), this->tensor_impl_->GetBuffer(), &temp); + this->tensor_impl_ = temp; + } + return data_type; +} + +std::vector DETensor::shape() const { + MS_ASSERT(this->tensor_impl_ != nullptr); + std::vector t_shape = this->tensor_impl_->shape().AsVector(); + std::vector shape; + shape.reserve(t_shape.size()); + std::transform(t_shape.begin(), t_shape.end(), + std::back_inserter(shape), + [](dataset::dsize_t s) -> int {return static_cast(s);}); + return shape; +} + +size_t DETensor::set_shape(const std::vector &shape) { + MS_ASSERT(this->tensor_impl_ != nullptr); + std::vector t_shape; + t_shape.reserve(shape.size()); + std::transform(shape.begin(), shape.end(), + std::back_inserter(t_shape), + [](int s) -> dataset::dsize_t {return static_cast(s);}); + dataset::Status rc = this->tensor_impl_->Reshape(dataset::TensorShape(t_shape)); + //TODO: what if t_shape has different size? + return shape.size(); +} + +int DETensor::DimensionSize(size_t index) const { + MS_ASSERT(this->tensor_impl_ != nullptr); + int dim_size = -1; + auto shape = this->shape(); + if (index < shape.size()) { + dim_size = shape[index]; + } else { + MS_LOG(ERROR) << "Dimension index is wrong: " << index; + } + return dim_size; +} + +int DETensor::ElementsNum() const { + MS_ASSERT(this->tensor_impl_ != nullptr); + return this->tensor_impl_->Size(); +} + +std::size_t DETensor::hash() const { + MS_ASSERT(this->tensor_impl_ != nullptr); + auto shape = this->shape(); + std::size_t hash_value = std::hash{}(SizeToInt(this->data_type())); + hash_value = hash_combine(hash_value, std::hash{}(shape.size())); + // hash all elements may costly, so only take at most 4 elements into account based on + // some experiments. + for (size_t i = 0; (i < shape.size()) && (i < 4); ++i) { + hash_value = hash_combine(hash_value, (std::hash{}(shape[i]))); + } + return hash_value; +} + +size_t DETensor::Size() const { + MS_ASSERT(this->tensor_impl_ != nullptr); + return this->tensor_impl_->SizeInBytes(); +} + +void *DETensor::MutableData() const { + MS_ASSERT(this->tensor_impl_ != nullptr); + // TODO: friend the DETensor? + return this->tensor_impl_->GetMutableBuffer(); +} + +} // namespace tensor +} // namespace mindspore \ No newline at end of file diff --git a/mindspore/ccsrc/minddata/dataset/api/execute.cc b/mindspore/ccsrc/minddata/dataset/api/execute.cc new file mode 100644 index 00000000000..78acf38469c --- /dev/null +++ b/mindspore/ccsrc/minddata/dataset/api/execute.cc @@ -0,0 +1,55 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "minddata/dataset/include/execute.h" +#include "minddata/dataset/include/de_tensor.h" +#include "minddata/dataset/include/tensor.h" +#include "minddata/dataset/kernels/tensor_op.h" + +namespace mindspore { +namespace dataset { +namespace api { + +Execute::Execute(const std::shared_ptr &op) : op_(std::move(op)) {} + +std::shared_ptr Execute::operator()(std::shared_ptr input){ + // Build the op + if (op_ == nullptr) { + MS_LOG(ERROR) << "Input TensorOperation is not valid"; + return nullptr; + } + + std::shared_ptr de_input = std::dynamic_pointer_cast(input)->tensor(); + if (de_input == nullptr) { + MS_LOG(ERROR) << "Input Tensor is not valid"; + return nullptr; + } + std::shared_ptr transform = op_->Build(); + std::shared_ptr de_output; + Status rc = transform->Compute(de_input, &de_output); + + if (rc.IsError()) { + // execution failed + MS_LOG(ERROR) << "Operation execution failed : " << rc.ToString(); + return nullptr; + } + return std::shared_ptr(new tensor::DETensor(std::move(de_output))); +} + + +} // namespace api +} // namespace dataset +} // namespace mindspore diff --git a/mindspore/ccsrc/minddata/dataset/core/client.h b/mindspore/ccsrc/minddata/dataset/core/client.h index b538bb20e1c..3de90cfeb26 100644 --- a/mindspore/ccsrc/minddata/dataset/core/client.h +++ b/mindspore/ccsrc/minddata/dataset/core/client.h @@ -25,8 +25,11 @@ #include "minddata/dataset/core/tensor_shape.h" #include "minddata/dataset/engine/data_schema.h" #include "minddata/dataset/engine/dataset_iterator.h" + +#ifndef ENABLE_ANDROID #include "minddata/dataset/engine/datasetops/source/mindrecord_op.h" #include "minddata/dataset/engine/datasetops/source/tf_reader_op.h" +#endif #ifdef ENABLE_PYTHON #include "minddata/dataset/engine/datasetops/barrier_op.h" diff --git a/mindspore/ccsrc/minddata/dataset/core/tensor.cc b/mindspore/ccsrc/minddata/dataset/core/tensor.cc index b8717c26fa0..cd7e6dd75fd 100644 --- a/mindspore/ccsrc/minddata/dataset/core/tensor.cc +++ b/mindspore/ccsrc/minddata/dataset/core/tensor.cc @@ -213,6 +213,7 @@ Status Tensor::CreateFromNpArray(const py::array &arr, std::shared_ptr * } #endif +#ifndef ENABLE_ANDROID Status Tensor::CreateFromByteList(const dataengine::BytesList &bytes_list, const TensorShape &shape, TensorPtr *out) { const TensorAlloc *alloc = GlobalContext::Instance()->tensor_allocator(); *out = std::allocate_shared(*alloc, TensorShape({static_cast(bytes_list.value_size())}), @@ -255,6 +256,7 @@ Status Tensor::CreateFromByteList(const dataengine::BytesList &bytes_list, const (*out)->Reshape(shape); return Status::OK(); } +#endif Status Tensor::CreateFromFile(const std::string &path, std::shared_ptr *out) { std::ifstream fs; @@ -269,6 +271,7 @@ Status Tensor::CreateFromFile(const std::string &path, std::shared_ptr * return Status::OK(); } +#ifndef ENABLE_ANDROID Status Tensor::CreateFromByteList(const dataengine::BytesList &bytes_list, const TensorShape &shape, const DataType &type, dsize_t pad_size, TensorPtr *out) { RETURN_IF_NOT_OK(Tensor::CreateEmpty(shape, type, out)); @@ -298,6 +301,7 @@ Status Tensor::CreateFromByteList(const dataengine::BytesList &bytes_list, const return Status::OK(); } +#endif // Memcpy the given strided array's used part to consecutive memory // Consider a 3-d array diff --git a/mindspore/ccsrc/minddata/dataset/core/tensor.h b/mindspore/ccsrc/minddata/dataset/core/tensor.h index 29d1785ad16..89c69e318bd 100644 --- a/mindspore/ccsrc/minddata/dataset/core/tensor.h +++ b/mindspore/ccsrc/minddata/dataset/core/tensor.h @@ -38,12 +38,18 @@ #include "minddata/dataset/core/data_type.h" #include "minddata/dataset/core/tensor_shape.h" #include "minddata/dataset/util/status.h" +#include "minddata/dataset/include/de_tensor.h" +#ifndef ENABLE_ANDROID #include "proto/example.pb.h" +#endif #ifdef ENABLE_PYTHON namespace py = pybind11; #endif namespace mindspore { +namespace tensor { +class DETensor; +} // namespace tensor namespace dataset { class Tensor; template @@ -55,6 +61,7 @@ using offset_t = uint32_t; // type of offset va using TensorPtr = std::shared_ptr; class Tensor { + friend class tensor::DETensor; public: Tensor() = delete; Tensor(const Tensor &other) = delete; @@ -117,6 +124,7 @@ class Tensor { static Status CreateFromNpArray(const py::array &arr, TensorPtr *out); #endif +#ifndef ENABLE_ANDROID /// Create a tensor of type DE_STRING from a BytesList. /// \param[in] bytes_list protobuf's Bytelist /// \param[in] shape shape of the outout tensor @@ -134,6 +142,7 @@ class Tensor { /// \return Status Code static Status CreateFromByteList(const dataengine::BytesList &bytes_list, const TensorShape &shape, const DataType &type, dsize_t pad_size, TensorPtr *out); +#endif /// Create a Tensor from a given list of values. /// \tparam type of the values to be inserted. diff --git a/mindspore/ccsrc/minddata/dataset/include/de_tensor.h b/mindspore/ccsrc/minddata/dataset/include/de_tensor.h new file mode 100644 index 00000000000..5b9a36ef1ff --- /dev/null +++ b/mindspore/ccsrc/minddata/dataset/include/de_tensor.h @@ -0,0 +1,53 @@ + +#ifndef DATASET_INCLUDE_DETENSOR_H_ +#define DATASET_INCLUDE_DETENSOR_H_ +#include "include/ms_tensor.h" +#include "minddata/dataset/include/tensor.h" +#include "minddata/dataset/util/status.h" +namespace mindspore { +namespace tensor { +class DETensor : public MSTensor { + public: + // brief Create a MSTensor pointer. + // + // param data_type DataTypeId of tensor to be created. + // param shape Shape of tensor to be created. + // return MSTensor pointer. + static MSTensor *CreateTensor(TypeId data_type, const std::vector &shape); + + static MSTensor *CreateTensor(const std::string &path); + + DETensor(TypeId data_type, const std::vector &shape); + + explicit DETensor(std::shared_ptr tensor_ptr); + + ~DETensor() = default; + + MSTensor *ConvertToLiteTensor(); + + std::shared_ptr tensor() const; + + TypeId data_type() const override; + + TypeId set_data_type(const TypeId data_type) override; + + std::vector shape() const override; + + size_t set_shape(const std::vector &shape) override; + + int DimensionSize(size_t index) const override; + + int ElementsNum() const override; + + std::size_t hash() const override; + + size_t Size() const override; + + void *MutableData() const override; + + protected: + std::shared_ptr tensor_impl_; +}; +} // namespace tensor +} // namespace mindspore +#endif // DATASET_INCLUDE_DETENSOR_H_ \ No newline at end of file diff --git a/mindspore/ccsrc/minddata/dataset/include/execute.h b/mindspore/ccsrc/minddata/dataset/include/execute.h new file mode 100644 index 00000000000..4d686757ef1 --- /dev/null +++ b/mindspore/ccsrc/minddata/dataset/include/execute.h @@ -0,0 +1,51 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef DATASET_API_EXECUTE_H_ +#define DATASET_API_EXECUTE_H_ + +#include +#include +#include "minddata/dataset/core/constants.h" +#include "minddata/dataset/include/de_tensor.h" +#include "minddata/dataset/include/transforms.h" + +namespace mindspore { +namespace dataset { + +class TensorOp; + +namespace api { + +class Execute { + public: + /// \brief Constructor + Execute(const std::shared_ptr &op); + + /// \brief callable function to execute the TensorOperation in eager mode + /// \param[inout] input - the tensor to be transformed + /// \return - the output tensor, nullptr if Compute fails + std::shared_ptr operator()(std::shared_ptr input); + + private: + std::shared_ptr op_; +}; + + +} // namespace api +} // namespace dataset +} // namespace mindspore +#endif // DATASET_API_EXECUTE_H_ diff --git a/mindspore/ccsrc/minddata/dataset/include/tensor.h b/mindspore/ccsrc/minddata/dataset/include/tensor.h index c40f8346c70..89c69e318bd 100644 --- a/mindspore/ccsrc/minddata/dataset/include/tensor.h +++ b/mindspore/ccsrc/minddata/dataset/include/tensor.h @@ -38,12 +38,18 @@ #include "minddata/dataset/core/data_type.h" #include "minddata/dataset/core/tensor_shape.h" #include "minddata/dataset/util/status.h" +#include "minddata/dataset/include/de_tensor.h" +#ifndef ENABLE_ANDROID #include "proto/example.pb.h" +#endif #ifdef ENABLE_PYTHON namespace py = pybind11; #endif namespace mindspore { +namespace tensor { +class DETensor; +} // namespace tensor namespace dataset { class Tensor; template @@ -55,6 +61,7 @@ using offset_t = uint32_t; // type of offset va using TensorPtr = std::shared_ptr; class Tensor { + friend class tensor::DETensor; public: Tensor() = delete; Tensor(const Tensor &other) = delete; @@ -117,6 +124,7 @@ class Tensor { static Status CreateFromNpArray(const py::array &arr, TensorPtr *out); #endif +#ifndef ENABLE_ANDROID /// Create a tensor of type DE_STRING from a BytesList. /// \param[in] bytes_list protobuf's Bytelist /// \param[in] shape shape of the outout tensor @@ -134,6 +142,7 @@ class Tensor { /// \return Status Code static Status CreateFromByteList(const dataengine::BytesList &bytes_list, const TensorShape &shape, const DataType &type, dsize_t pad_size, TensorPtr *out); +#endif /// Create a Tensor from a given list of values. /// \tparam type of the values to be inserted. @@ -649,13 +658,6 @@ class Tensor { unsigned char *data_end_ = nullptr; private: -#ifdef ENABLE_PYTHON - /// Helper function to create a tensor from Numpy array of strings - /// \param[in] arr Numpy array - /// \param[out] out Created Tensor - /// \return Status - static Status CreateFromNpString(py::array arr, TensorPtr *out); -#endif /// Copy raw data of a array based on shape and strides to the destination pointer /// \param dst [out] Pointer to the destination array where the content is to be copied /// \param[in] src Pointer to the source of strided array to be copied @@ -668,6 +670,14 @@ class Tensor { /// const of the size of the offset variable static constexpr uint8_t kOffsetSize = sizeof(offset_t); + +#ifdef ENABLE_PYTHON + /// Helper function to create a tensor from Numpy array of strings + /// \param[in] arr Numpy array + /// \param[out] out Created Tensor + /// \return Status + static Status CreateFromNpString(py::array arr, TensorPtr *out); +#endif }; template <> inline Tensor::TensorIterator Tensor::end() { diff --git a/mindspore/ccsrc/minddata/dataset/kernels/image/resize_with_bbox_op.cc b/mindspore/ccsrc/minddata/dataset/kernels/image/resize_with_bbox_op.cc index 9df2d8a25ee..8d40514f1bd 100644 --- a/mindspore/ccsrc/minddata/dataset/kernels/image/resize_with_bbox_op.cc +++ b/mindspore/ccsrc/minddata/dataset/kernels/image/resize_with_bbox_op.cc @@ -20,7 +20,6 @@ #include "minddata/dataset/kernels/image/resize_op.h" #include "minddata/dataset/kernels/image/image_utils.h" #include "minddata/dataset/core/cv_tensor.h" -#include "minddata/dataset/core/pybind_support.h" #include "minddata/dataset/core/tensor.h" #include "minddata/dataset/kernels/tensor_op.h" #include "minddata/dataset/util/status.h" diff --git a/mindspore/lite/CMakeLists.txt b/mindspore/lite/CMakeLists.txt index 887da2c9deb..cf48032b2d0 100644 --- a/mindspore/lite/CMakeLists.txt +++ b/mindspore/lite/CMakeLists.txt @@ -33,6 +33,7 @@ option(BUILD_CONVERTER "if build converter" on) option(ENABLE_FP16 "if build fp16 ops" off) option(SUPPORT_GPU "if support gpu" off) option(OFFLINE_COMPILE "if offline compile OpenCL kernel" off) +option(BUILD_MINDDATA "" on) if (BUILD_DEVICE) add_compile_definitions(BUILD_DEVICE) @@ -116,6 +117,32 @@ if (BUILD_DEVICE) set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -march=armv8.2-a+dotprod+fp16") endif () endif() +endif() + +if (BUILD_MINDDATA) + # opencv + set(OpenCV_DIR ${TOP_DIR}/third_party/opencv/build) + find_package(OpenCV REQUIRED) + include_directories(${OpenCV_INCLUDE_DIRS}) + # eigen + include_directories(${TOP_DIR}/third_party/eigen/) + # jpeg-turbo + add_library(jpeg-turbo SHARED IMPORTED) + set_target_properties(jpeg-turbo PROPERTIES + IMPORTED_LOCATION ${TOP_DIR}/third_party/libjpeg-turbo/lib/libturbojpeg.so + ) + add_library(jpeg SHARED IMPORTED) + set_target_properties(jpeg PROPERTIES + IMPORTED_LOCATION ${TOP_DIR}/third_party/libjpeg-turbo/lib/libjpeg.so + ) + include_directories(${TOP_DIR}/third_party/libjpeg-turbo/include) + + add_compile_definitions(ENABLE_ANDROID) + add_compile_definitions(ENABLE_EAGER) + add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/minddata) +endif() + +if (BUILD_DEVICE) add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/src) add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/tools/benchmark) add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/test) diff --git a/mindspore/lite/minddata/CMakeLists.txt b/mindspore/lite/minddata/CMakeLists.txt new file mode 100644 index 00000000000..7ffbcdd9c93 --- /dev/null +++ b/mindspore/lite/minddata/CMakeLists.txt @@ -0,0 +1,44 @@ +set(MINDDATA_DIR ${CCSRC_DIR}/minddata/dataset) +set(CMAKE_CXX_STANDARD 17) +set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++17") +set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS}") +set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fPIC -Wall -Wno-deprecated-declarations") +set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -s") + +AUX_SOURCE_DIRECTORY(${MINDDATA_DIR}/core MINDDATA_CORE_SRC_FILES) +list(REMOVE_ITEM MINDDATA_CORE_SRC_FILES "${MINDDATA_DIR}/core/client.cc") + +AUX_SOURCE_DIRECTORY(${MINDDATA_DIR}/kernels MINDDATA_KERNELS_SRC_FILES) +list(REMOVE_ITEM MINDDATA_KERNELS_SRC_FILES "${MINDDATA_DIR}/kernels/py_func_op.cc") + +AUX_SOURCE_DIRECTORY(${MINDDATA_DIR}/kernels/image MINDDATA_KERNELS_IMAGE_SRC_FILES) + +AUX_SOURCE_DIRECTORY(${MINDDATA_DIR}/kernels/data MINDDATA_KERNELS_DATA_SRC_FILES) + +add_library(minddata-eager OBJECT + ${MINDDATA_DIR}/api/de_tensor.cc + ${MINDDATA_DIR}/api/execute.cc + ) + +add_library(minddata-lite SHARED + ${MINDDATA_CORE_SRC_FILES} + ${MINDDATA_KERNELS_SRC_FILES} + ${MINDDATA_KERNELS_IMAGE_SRC_FILES} + ${MINDDATA_KERNELS_DATA_SRC_FILES} + ${MINDDATA_DIR}/util/status.cc + ${MINDDATA_DIR}/util/memory_pool.cc + ${MINDDATA_DIR}/util/path.cc + ${MINDDATA_DIR}/api/transforms.cc + ${CORE_DIR}/utils/log_adapter.cc + ${CCSRC_DIR}/gvar/logging_level.cc + ) + +target_link_libraries(minddata-lite + securec + jpeg-turbo + jpeg + opencv_core + opencv_imgcodecs + opencv_imgproc + mindspore::json + ) \ No newline at end of file diff --git a/mindspore/lite/src/CMakeLists.txt b/mindspore/lite/src/CMakeLists.txt index 23ef963dad6..438e3da6348 100644 --- a/mindspore/lite/src/CMakeLists.txt +++ b/mindspore/lite/src/CMakeLists.txt @@ -80,5 +80,9 @@ target_link_libraries(mindspore-lite ) add_subdirectory(runtime/kernel/arm) +if (BUILD_MINDDATA) + target_link_libraries(mindspore-lite minddata-eager minddata-lite log) +endif () + add_subdirectory(ops) diff --git a/mindspore/lite/test/CMakeLists.txt b/mindspore/lite/test/CMakeLists.txt index 14dd8a5115c..dd909416c2a 100644 --- a/mindspore/lite/test/CMakeLists.txt +++ b/mindspore/lite/test/CMakeLists.txt @@ -129,6 +129,15 @@ if (SUPPORT_GPU) ${LITE_DIR}/src/runtime/kernel/opencl/kernel/conv2d_transpose.cc ) endif() +### minddata lite +if (BUILD_MINDDATA) + include_directories(${CCSRC_DIR}/minddata) + set(DATASET_TEST_DIR ${CMAKE_CURRENT_SOURCE_DIR}/dataset) + set(TEST_MINDDATA_SRC + ${DATASET_TEST_DIR}/de_tensor_test.cc + ${DATASET_TEST_DIR}/eager_test.cc + ) +endif() ### runtime framework file(GLOB_RECURSE OPS_SRC ${LITE_DIR}/src/ops/*.cc) set(TEST_LITE_SRC @@ -245,6 +254,7 @@ file(GLOB_RECURSE TEST_CASE_KERNEL_SRC set(TEST_SRC ${TEST_LITE_SRC} + ${TEST_MINDDATA_SRC} ${TEST_CASE_KERNEL_SRC} ${TEST_DIR}/common/common_test.cc ${TEST_DIR}/main.cc diff --git a/mindspore/lite/test/dataset/de_tensor_test.cc b/mindspore/lite/test/dataset/de_tensor_test.cc new file mode 100644 index 00000000000..a96b7885baf --- /dev/null +++ b/mindspore/lite/test/dataset/de_tensor_test.cc @@ -0,0 +1,98 @@ +/** + * Copyright 2019 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include "common/common_test.h" +#include "gtest/gtest.h" +#include "securec.h" +#include "dataset/core/tensor.h" +#include "dataset/core/cv_tensor.h" +#include "dataset/core/data_type.h" +#include "mindspore/lite/src/ir/tensor.h" + +using namespace mindspore::dataset; + +class MindDataTestTensorDE : public UT::Common { + public: + MindDataTestTensorDE() {} +}; + +TEST_F(MindDataTestTensorDE, MSTensorBasic) { + std::shared_ptr t = std::make_shared(TensorShape({2, 3}), DataType(DataType::DE_FLOAT32)); + auto ms_tensor = std::shared_ptr(new mindspore::tensor::DETensor(t)); + ASSERT_EQ(t == std::dynamic_pointer_cast(ms_tensor)->tensor(), true); +} + +TEST_F(MindDataTestTensorDE, MSTensorConvertToLiteTensor) { + std::shared_ptr t = std::make_shared(TensorShape({2, 3}), DataType(DataType::DE_FLOAT32)); + auto ms_tensor = std::shared_ptr(new mindspore::tensor::DETensor(t)); + std::shared_ptr lite_ms_tensor = std::shared_ptr( + std::dynamic_pointer_cast(ms_tensor)->ConvertToLiteTensor()); + // check if the lite_ms_tensor is the derived LiteTensor + mindspore::tensor::LiteTensor * lite_tensor = static_cast(lite_ms_tensor.get()); + ASSERT_EQ(lite_tensor != nullptr, true); +} + +TEST_F(MindDataTestTensorDE, MSTensorShape) { + std::shared_ptr t = std::make_shared(TensorShape({2, 3}), DataType(DataType::DE_FLOAT32)); + auto ms_tensor = std::shared_ptr(new mindspore::tensor::DETensor(t)); + ASSERT_EQ(ms_tensor->DimensionSize(0) == 2, true); + ASSERT_EQ(ms_tensor->DimensionSize(1) == 3, true); + ms_tensor->set_shape(std::vector{3,2}); + ASSERT_EQ(ms_tensor->DimensionSize(0) == 3, true); + ASSERT_EQ(ms_tensor->DimensionSize(1) == 2, true); + ms_tensor->set_shape(std::vector{6}); + ASSERT_EQ(ms_tensor->DimensionSize(0) == 6, true); +} + +TEST_F(MindDataTestTensorDE, MSTensorSize) { + std::shared_ptr t = std::make_shared(TensorShape({2, 3}), DataType(DataType::DE_FLOAT32)); + auto ms_tensor = std::shared_ptr(new mindspore::tensor::DETensor(t)); + ASSERT_EQ(ms_tensor->ElementsNum() == 6, true); + ASSERT_EQ(ms_tensor->Size() == 24, true); +} + +TEST_F(MindDataTestTensorDE, MSTensorDataType) { + std::shared_ptr t = std::make_shared(TensorShape({2, 3}), DataType(DataType::DE_FLOAT32)); + auto ms_tensor = std::shared_ptr(new mindspore::tensor::DETensor(t)); + ASSERT_EQ(ms_tensor->data_type() == mindspore::TypeId::kNumberTypeFloat32, true); + ms_tensor->set_data_type(mindspore::TypeId::kNumberTypeInt32); + ASSERT_EQ(ms_tensor->data_type() == mindspore::TypeId::kNumberTypeInt32, true); + ASSERT_EQ(std::dynamic_pointer_cast(ms_tensor)->tensor()->type() == DataType::DE_INT32, true); +} + +TEST_F(MindDataTestTensorDE, MSTensorMutableData) { + std::vector x = {2.5, 2.5, 2.5, 2.5}; + std::shared_ptr t; + Tensor::CreateTensor(&t, x, TensorShape({2, 2})); + auto ms_tensor = std::shared_ptr(new mindspore::tensor::DETensor(t)); + float *data = static_cast(ms_tensor->MutableData()); + std::vector tensor_vec(data, data + ms_tensor->ElementsNum()); + ASSERT_EQ(x == tensor_vec, true); + // TODO: add set_data_type after implmenting it +} + +TEST_F(MindDataTestTensorDE, MSTensorHash) { + std::vector x = {2.5, 2.5, 2.5, 2.5}; + std::shared_ptr t; + Tensor::CreateTensor(&t, x, TensorShape({2, 2})); + auto ms_tensor = std::shared_ptr(new mindspore::tensor::DETensor(t)); +#ifdef ENABLE_ARM64 + ASSERT_EQ(ms_tensor->hash() == 11093771382437, true); // arm64 +#else + ASSERT_EQ(ms_tensor->hash() == 11093825635904, true); +#endif +} \ No newline at end of file diff --git a/mindspore/lite/test/dataset/eager_test.cc b/mindspore/lite/test/dataset/eager_test.cc new file mode 100644 index 00000000000..5b28c44d472 --- /dev/null +++ b/mindspore/lite/test/dataset/eager_test.cc @@ -0,0 +1,165 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include "common/common_test.h" +#include "gtest/gtest.h" +#include "securec.h" +#include "minddata/dataset/core/tensor.h" +#include "minddata/dataset/core/config_manager.h" +#include "minddata/dataset/include/datasets.h" +#include "minddata/dataset/include/execute.h" +#include "minddata/dataset/util/path.h" + +using namespace mindspore::dataset; +using namespace mindspore::dataset::api; +using namespace mindspore; + +class MindDataTestEager : public UT::Common { + public: + MindDataTestEager() {} +}; + +TEST_F(MindDataTestEager, Test1) { + std::string in_dir = "/sdcard/data/testPK/data/class1"; + Path base_dir = Path(in_dir); + MS_LOG(WARNING) << base_dir.toString() << "."; + if (!base_dir.IsDirectory() || !base_dir.Exists()) { + MS_LOG(INFO) << "Input dir is not a directory or doesn't exist" << "."; + } + auto t_start = std::chrono::high_resolution_clock::now(); + // check if output_dir exists and create it if it does not exist + + // iterate over in dir and create json for all images + auto dir_it = Path::DirIterator::OpenDirectory(&base_dir); + while (dir_it->hasNext()) { + Path v = dir_it->next(); + MS_LOG(WARNING) << v.toString() << "."; + std::shared_ptr image = std::shared_ptr(tensor::DETensor::CreateTensor(v.toString())); + + image = Execute(vision::Decode())(image); + EXPECT_TRUE(image != nullptr); + image = Execute(vision::Normalize({121.0, 115.0, 100.0}, {70.0, 68.0, 71.0}))(image); + EXPECT_TRUE(image != nullptr); + image = Execute(vision::Resize({224, 224}))(image); + EXPECT_TRUE(image != nullptr); + EXPECT_TRUE(image->DimensionSize(0) == 224); + EXPECT_TRUE(image->DimensionSize(1) == 224); + } + auto t_end = std::chrono::high_resolution_clock::now(); + double elapsed_time_ms = std::chrono::duration(t_end-t_start).count(); + MS_LOG(INFO) << "duration: " << elapsed_time_ms << " ms\n"; +} + +/* +TEST_F(MindDataTestEager, Test2) { + // string dir for image folder + std::string in_dir = datasets_root_path_ + "/testPK/data"; + // run dataset with decode = on + std::shared_ptr ds = ImageFolder(in_dir, true, RandomSampler(false)); + std::shared_ptr normalize_op = vision::Normalize({121.0, 115.0, 100.0}, {70.0, 68.0, 71.0}); + EXPECT_TRUE(normalize_op != nullptr); + std::shared_ptr resize_op = vision::Resize({224, 224}); + EXPECT_TRUE(resize_op != nullptr); + ds = ds->Map({normalize_op, resize_op}); + EXPECT_TRUE(ds != nullptr); + // Create an iterator over the result of the above dataset + // This will trigger the creation of the Execution Tree and launch it. + std::shared_ptr iter = ds->CreateIterator(); + EXPECT_TRUE(iter != nullptr); + + // Iterate the dataset and get each row + std::unordered_map> row; + iter->GetNextRow(&row); + + uint64_t i = 0; + while (row.size() != 0) { + i++; + auto image = row["image"]; + MS_LOG(INFO) << "Tensor image shape: " << image->shape(); + iter->GetNextRow(&row); + } + MS_LOG(WARNING) << i << "."; + iter->Stop(); + +} + +TEST_F(MindDataTestEager, Test3) { + // string dir for image folder + ConfigManager cm = ConfigManager(); + cm.set_num_parallel_workers(1); + std::string in_dir = datasets_root_path_ + "/testPK/data"; + // run dataset with decode = on + std::shared_ptr ds = ImageFolder(in_dir, true, RandomSampler(false)); + std::shared_ptr normalize_op = vision::Normalize({121.0, 115.0, 100.0}, {70.0, 68.0, 71.0}); + EXPECT_TRUE(normalize_op != nullptr); + std::shared_ptr resize_op = vision::Resize({224, 224}); + EXPECT_TRUE(resize_op != nullptr); + ds = ds->Map({normalize_op, resize_op}); + EXPECT_TRUE(ds != nullptr); + // Create an iterator over the result of the above dataset + // This will trigger the creation of the Execution Tree and launch it. + std::shared_ptr iter = ds->CreateIterator(); + EXPECT_TRUE(iter != nullptr); + + // Iterate the dataset and get each row + std::unordered_map> row; + iter->GetNextRow(&row); + + uint64_t i = 0; + while (row.size() != 0) { + i++; + auto image = row["image"]; + MS_LOG(INFO) << "Tensor image shape: " << image->shape(); + iter->GetNextRow(&row); + } + MS_LOG(WARNING) << i << "."; + iter->Stop(); + +} + +TEST_F(MindDataTestEager, Test4) { + // string dir for image folder + ConfigManager cm = ConfigManager(); + cm.set_num_parallel_workers(1); + std::string in_dir = datasets_root_path_ + "/testPK/data"; + // run dataset with decode = on + std::shared_ptr ds = ImageFolder(in_dir, true, RandomSampler(false)); + // Create an iterator over the result of the above dataset + // This will trigger the creation of the Execution Tree and launch it. + std::shared_ptr iter = ds->CreateIterator(); + EXPECT_TRUE(iter != nullptr); + + // Iterate the dataset and get each row + std::unordered_map> row; + iter->GetNextRow(&row); + + uint64_t i = 0; + while (row.size() != 0) { + i++; + auto image = row["image"]; + image = Execute(vision::Normalize({121.0, 115.0, 100.0}, {70.0, 68.0, 71.0}))(image); + EXPECT_TRUE(image != nullptr); + image = Execute(vision::Resize({224, 224}))(image); + EXPECT_TRUE(image != nullptr); + + MS_LOG(INFO) << "Tensor image shape: " << image->shape(); + iter->GetNextRow(&row); + } + MS_LOG(WARNING) << i << "."; + iter->Stop(); + +} +*/ diff --git a/third_party/eigen b/third_party/eigen new file mode 160000 index 00000000000..daf9bbeca26 --- /dev/null +++ b/third_party/eigen @@ -0,0 +1 @@ +Subproject commit daf9bbeca26e98da2eed0058835cbb04e0a30ad8 diff --git a/third_party/libjpeg-turbo b/third_party/libjpeg-turbo new file mode 160000 index 00000000000..b443c541b9a --- /dev/null +++ b/third_party/libjpeg-turbo @@ -0,0 +1 @@ +Subproject commit b443c541b9a6fdcac214f9f003de0aa13e480ac1 diff --git a/third_party/opencv b/third_party/opencv new file mode 160000 index 00000000000..bda89a6469a --- /dev/null +++ b/third_party/opencv @@ -0,0 +1 @@ +Subproject commit bda89a6469aa79ecd8713967916bd754bff1d931