forked from OSSInnovation/mindspore
fixed cpplint format
This commit is contained in:
parent
2258fd4c4c
commit
6639149e58
|
@ -114,7 +114,8 @@ TypeId DETensor::set_data_type(TypeId data_type) {
|
|||
MS_ASSERT(this->tensor_impl_ != nullptr);
|
||||
if (data_type != this->data_type()) {
|
||||
std::shared_ptr<dataset::Tensor> temp;
|
||||
dataset::Tensor::CreateFromMemory(this->tensor_impl_->shape(), MSTypeToDEType(data_type), this->tensor_impl_->GetBuffer(), &temp);
|
||||
dataset::Tensor::CreateFromMemory(this->tensor_impl_->shape(), MSTypeToDEType(data_type),
|
||||
this->tensor_impl_->GetBuffer(), &temp);
|
||||
this->tensor_impl_ = temp;
|
||||
}
|
||||
return data_type;
|
||||
|
@ -139,7 +140,6 @@ size_t DETensor::set_shape(const std::vector<int> &shape) {
|
|||
std::back_inserter(t_shape),
|
||||
[](int s) -> dataset::dsize_t {return static_cast<dataset::dsize_t>(s);});
|
||||
dataset::Status rc = this->tensor_impl_->Reshape(dataset::TensorShape(t_shape));
|
||||
//TODO: what if t_shape has different size?
|
||||
return shape.size();
|
||||
}
|
||||
|
||||
|
@ -180,7 +180,6 @@ size_t DETensor::Size() const {
|
|||
|
||||
void *DETensor::MutableData() const {
|
||||
MS_ASSERT(this->tensor_impl_ != nullptr);
|
||||
// TODO: friend the DETensor?
|
||||
return this->tensor_impl_->GetMutableBuffer();
|
||||
}
|
||||
|
||||
|
|
|
@ -25,7 +25,7 @@ namespace api {
|
|||
|
||||
Execute::Execute(const std::shared_ptr<TensorOperation> &op) : op_(std::move(op)) {}
|
||||
|
||||
std::shared_ptr<tensor::MSTensor> Execute::operator()(std::shared_ptr<tensor::MSTensor> input){
|
||||
std::shared_ptr<tensor::MSTensor> Execute::operator()(std::shared_ptr<tensor::MSTensor> input) {
|
||||
// Build the op
|
||||
if (op_ == nullptr) {
|
||||
MS_LOG(ERROR) << "Input TensorOperation is not valid";
|
||||
|
|
|
@ -39,7 +39,7 @@ class PrinterPass : public NodePass {
|
|||
|
||||
Status RunOnNode(std::shared_ptr<ShuffleOp> node, bool *modified) override;
|
||||
|
||||
#ifndef ENABLE_ANDROID
|
||||
#ifndef ENABLE_ANDROID
|
||||
Status RunOnNode(std::shared_ptr<MindRecordOp> node, bool *modified) override;
|
||||
|
||||
Status RunOnNode(std::shared_ptr<TFReaderOp> node, bool *modified) override;
|
||||
|
|
|
@ -1,5 +1,24 @@
|
|||
/**
|
||||
* Copyright 2020 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef MINDSPORE_CCSRC_MINDDATA_DATASET_API_DETENSOR_H_
|
||||
#define MINDSPORE_CCSRC_MINDDATA_DATASET_API_DETENSOR_H_
|
||||
#include <string>
|
||||
#include <vector>
|
||||
#include <memory>
|
||||
#include "include/ms_tensor.h"
|
||||
#include "minddata/dataset/include/tensor.h"
|
||||
#include "minddata/dataset/util/status.h"
|
||||
|
@ -7,50 +26,50 @@ namespace mindspore {
|
|||
namespace tensor {
|
||||
class DETensor : public MSTensor {
|
||||
public:
|
||||
/// \brief Create a MSTensor pointer.
|
||||
/// \param[data_type] DataTypeId of tensor to be created.
|
||||
/// \param[shape] Shape of tensor to be created.
|
||||
/// \return - MSTensor pointer.
|
||||
static MSTensor *CreateTensor(TypeId data_type, const std::vector<int> &shape);
|
||||
/// \brief Create a MSTensor pointer.
|
||||
/// \param[data_type] DataTypeId of tensor to be created.
|
||||
/// \param[shape] Shape of tensor to be created.
|
||||
/// \return - MSTensor pointer.
|
||||
static MSTensor *CreateTensor(TypeId data_type, const std::vector<int> &shape);
|
||||
|
||||
/// \brief Create a MSTensor pointer.
|
||||
/// \param[path] Path file to be read.
|
||||
/// \return - MSTensor pointer.
|
||||
static MSTensor *CreateTensor(const std::string &path);
|
||||
/// \brief Create a MSTensor pointer.
|
||||
/// \param[path] Path file to be read.
|
||||
/// \return - MSTensor pointer.
|
||||
static MSTensor *CreateTensor(const std::string &path);
|
||||
|
||||
DETensor(TypeId data_type, const std::vector<int> &shape);
|
||||
DETensor(TypeId data_type, const std::vector<int> &shape);
|
||||
|
||||
explicit DETensor(std::shared_ptr<dataset::Tensor> tensor_ptr);
|
||||
explicit DETensor(std::shared_ptr<dataset::Tensor> tensor_ptr);
|
||||
|
||||
~DETensor() = default;
|
||||
~DETensor() = default;
|
||||
|
||||
/// \brief Create a duplicate instance, convert the DETensor to the LiteTensor.
|
||||
/// \return - MSTensor pointer.
|
||||
MSTensor *ConvertToLiteTensor();
|
||||
/// \brief Create a duplicate instance, convert the DETensor to the LiteTensor.
|
||||
/// \return - MSTensor pointer.
|
||||
MSTensor *ConvertToLiteTensor();
|
||||
|
||||
std::shared_ptr<dataset::Tensor> tensor() const;
|
||||
std::shared_ptr<dataset::Tensor> tensor() const;
|
||||
|
||||
TypeId data_type() const override;
|
||||
TypeId data_type() const override;
|
||||
|
||||
TypeId set_data_type(const TypeId data_type) override;
|
||||
TypeId set_data_type(const TypeId data_type) override;
|
||||
|
||||
std::vector<int> shape() const override;
|
||||
std::vector<int> shape() const override;
|
||||
|
||||
size_t set_shape(const std::vector<int> &shape) override;
|
||||
size_t set_shape(const std::vector<int> &shape) override;
|
||||
|
||||
int DimensionSize(size_t index) const override;
|
||||
int DimensionSize(size_t index) const override;
|
||||
|
||||
int ElementsNum() const override;
|
||||
int ElementsNum() const override;
|
||||
|
||||
std::size_t hash() const override;
|
||||
std::size_t hash() const override;
|
||||
|
||||
size_t Size() const override;
|
||||
size_t Size() const override;
|
||||
|
||||
void *MutableData() const override;
|
||||
void *MutableData() const override;
|
||||
|
||||
protected:
|
||||
std::shared_ptr<dataset::Tensor> tensor_impl_;
|
||||
std::shared_ptr<dataset::Tensor> tensor_impl_;
|
||||
};
|
||||
} // namespace tensor
|
||||
} // namespace mindspore
|
||||
#endif // MINDSPORE_CCSRC_MINDDATA_DATASET_API_DETENSOR_H_
|
||||
#endif // MINDSPORE_CCSRC_MINDDATA_DATASET_API_DETENSOR_H_
|
||||
|
|
|
@ -33,7 +33,7 @@ namespace api {
|
|||
class Execute {
|
||||
public:
|
||||
/// \brief Constructor
|
||||
Execute(const std::shared_ptr<TensorOperation> &op);
|
||||
explicit Execute(const std::shared_ptr<TensorOperation> &op);
|
||||
|
||||
/// \brief callable function to execute the TensorOperation in eager mode
|
||||
/// \param[inout] input - the tensor to be transformed
|
||||
|
|
|
@ -17,13 +17,18 @@
|
|||
#include <string>
|
||||
#include "common/common_test.h"
|
||||
#include "gtest/gtest.h"
|
||||
#include "securec.h"
|
||||
#include "./securec.h"
|
||||
#include "dataset/core/tensor.h"
|
||||
#include "dataset/core/cv_tensor.h"
|
||||
#include "dataset/core/data_type.h"
|
||||
#include "mindspore/lite/src/ir/tensor.h"
|
||||
|
||||
using namespace mindspore::dataset;
|
||||
using MSTensor = mindspore::tensor::MSTensor;
|
||||
using DETensor = mindspore::tensor::DETensor;
|
||||
using LiteTensor = mindspore::lite::tensor::LiteTensor;
|
||||
using Tensor = mindspore::dataset::Tensor;
|
||||
using DataType = mindspore::dataset::DataType;
|
||||
using TensorShape = mindspore::dataset::TensorShape;
|
||||
|
||||
class MindDataTestTensorDE : public mindspore::Common {
|
||||
public:
|
||||
|
@ -32,26 +37,26 @@ class MindDataTestTensorDE : public mindspore::Common {
|
|||
|
||||
TEST_F(MindDataTestTensorDE, MSTensorBasic) {
|
||||
std::shared_ptr<Tensor> t = std::make_shared<Tensor>(TensorShape({2, 3}), DataType(DataType::DE_FLOAT32));
|
||||
auto ms_tensor = std::shared_ptr<mindspore::tensor::MSTensor>(new mindspore::tensor::DETensor(t));
|
||||
ASSERT_EQ(t == std::dynamic_pointer_cast<mindspore::tensor::DETensor>(ms_tensor)->tensor(), true);
|
||||
auto ms_tensor = std::shared_ptr<MSTensor>(new DETensor(t));
|
||||
ASSERT_EQ(t == std::dynamic_pointer_cast<DETensor>(ms_tensor)->tensor(), true);
|
||||
}
|
||||
|
||||
TEST_F(MindDataTestTensorDE, MSTensorConvertToLiteTensor) {
|
||||
std::shared_ptr<Tensor> t = std::make_shared<Tensor>(TensorShape({2, 3}), DataType(DataType::DE_FLOAT32));
|
||||
auto ms_tensor = std::shared_ptr<mindspore::tensor::DETensor>(new mindspore::tensor::DETensor(t));
|
||||
std::shared_ptr<mindspore::tensor::MSTensor> lite_ms_tensor = std::shared_ptr<mindspore::tensor::MSTensor>(
|
||||
std::dynamic_pointer_cast<mindspore::tensor::DETensor>(ms_tensor)->ConvertToLiteTensor());
|
||||
auto ms_tensor = std::shared_ptr<DETensor>(new DETensor(t));
|
||||
std::shared_ptr<MSTensor> lite_ms_tensor = std::shared_ptr<MSTensor>(
|
||||
std::dynamic_pointer_cast<DETensor>(ms_tensor)->ConvertToLiteTensor());
|
||||
// check if the lite_ms_tensor is the derived LiteTensor
|
||||
mindspore::lite::tensor::LiteTensor * lite_tensor = static_cast<mindspore::lite::tensor::LiteTensor *>(lite_ms_tensor.get());
|
||||
LiteTensor * lite_tensor = static_cast<LiteTensor *>(lite_ms_tensor.get());
|
||||
ASSERT_EQ(lite_tensor != nullptr, true);
|
||||
}
|
||||
|
||||
TEST_F(MindDataTestTensorDE, MSTensorShape) {
|
||||
std::shared_ptr<Tensor> t = std::make_shared<Tensor>(TensorShape({2, 3}), DataType(DataType::DE_FLOAT32));
|
||||
auto ms_tensor = std::shared_ptr<mindspore::tensor::MSTensor>(new mindspore::tensor::DETensor(t));
|
||||
auto ms_tensor = std::shared_ptr<MSTensor>(new DETensor(t));
|
||||
ASSERT_EQ(ms_tensor->DimensionSize(0) == 2, true);
|
||||
ASSERT_EQ(ms_tensor->DimensionSize(1) == 3, true);
|
||||
ms_tensor->set_shape(std::vector<int>{3,2});
|
||||
ms_tensor->set_shape(std::vector<int>{3, 2});
|
||||
ASSERT_EQ(ms_tensor->DimensionSize(0) == 3, true);
|
||||
ASSERT_EQ(ms_tensor->DimensionSize(1) == 2, true);
|
||||
ms_tensor->set_shape(std::vector<int>{6});
|
||||
|
@ -60,35 +65,34 @@ TEST_F(MindDataTestTensorDE, MSTensorShape) {
|
|||
|
||||
TEST_F(MindDataTestTensorDE, MSTensorSize) {
|
||||
std::shared_ptr<Tensor> t = std::make_shared<Tensor>(TensorShape({2, 3}), DataType(DataType::DE_FLOAT32));
|
||||
auto ms_tensor = std::shared_ptr<mindspore::tensor::MSTensor>(new mindspore::tensor::DETensor(t));
|
||||
auto ms_tensor = std::shared_ptr<MSTensor>(new DETensor(t));
|
||||
ASSERT_EQ(ms_tensor->ElementsNum() == 6, true);
|
||||
ASSERT_EQ(ms_tensor->Size() == 24, true);
|
||||
}
|
||||
|
||||
TEST_F(MindDataTestTensorDE, MSTensorDataType) {
|
||||
std::shared_ptr<Tensor> t = std::make_shared<Tensor>(TensorShape({2, 3}), DataType(DataType::DE_FLOAT32));
|
||||
auto ms_tensor = std::shared_ptr<mindspore::tensor::MSTensor>(new mindspore::tensor::DETensor(t));
|
||||
auto ms_tensor = std::shared_ptr<MSTensor>(new DETensor(t));
|
||||
ASSERT_EQ(ms_tensor->data_type() == mindspore::TypeId::kNumberTypeFloat32, true);
|
||||
ms_tensor->set_data_type(mindspore::TypeId::kNumberTypeInt32);
|
||||
ASSERT_EQ(ms_tensor->data_type() == mindspore::TypeId::kNumberTypeInt32, true);
|
||||
ASSERT_EQ(std::dynamic_pointer_cast<mindspore::tensor::DETensor>(ms_tensor)->tensor()->type() == DataType::DE_INT32, true);
|
||||
ASSERT_EQ(std::dynamic_pointer_cast<DETensor>(ms_tensor)->tensor()->type() == DataType::DE_INT32, true);
|
||||
}
|
||||
|
||||
TEST_F(MindDataTestTensorDE, MSTensorMutableData) {
|
||||
std::vector<float> x = {2.5, 2.5, 2.5, 2.5};
|
||||
std::shared_ptr<Tensor> t;
|
||||
Tensor::CreateFromVector(x, TensorShape({2, 2}), &t);
|
||||
auto ms_tensor = std::shared_ptr<mindspore::tensor::MSTensor>(new mindspore::tensor::DETensor(t));
|
||||
auto ms_tensor = std::shared_ptr<MSTensor>(new DETensor(t));
|
||||
float *data = static_cast<float*>(ms_tensor->MutableData());
|
||||
std::vector<float> tensor_vec(data, data + ms_tensor->ElementsNum());
|
||||
ASSERT_EQ(x == tensor_vec, true);
|
||||
// TODO: add set_data_type after implmenting it
|
||||
}
|
||||
|
||||
TEST_F(MindDataTestTensorDE, MSTensorHash) {
|
||||
std::vector<float> x = {2.5, 2.5, 2.5, 2.5};
|
||||
std::shared_ptr<Tensor> t;
|
||||
Tensor::CreateFromVector(x, TensorShape({2, 2}), &t);
|
||||
auto ms_tensor = std::shared_ptr<mindspore::tensor::MSTensor>(new mindspore::tensor::DETensor(t));
|
||||
auto ms_tensor = std::shared_ptr<MSTensor>(new DETensor(t));
|
||||
ASSERT_EQ(ms_tensor->hash() == 11093771382437, true);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -16,16 +16,20 @@
|
|||
#include <chrono>
|
||||
#include "common/common_test.h"
|
||||
#include "gtest/gtest.h"
|
||||
#include "securec.h"
|
||||
#include "./securec.h"
|
||||
#include "minddata/dataset/core/tensor.h"
|
||||
#include "minddata/dataset/core/config_manager.h"
|
||||
#include "minddata/dataset/include/datasets.h"
|
||||
#include "minddata/dataset/include/execute.h"
|
||||
#include "minddata/dataset/util/path.h"
|
||||
|
||||
using namespace mindspore::dataset;
|
||||
using namespace mindspore::dataset::api;
|
||||
using namespace mindspore;
|
||||
using MSTensor = mindspore::tensor::MSTensor;
|
||||
using DETensor = mindspore::tensor::DETensor;
|
||||
using mindspore::dataset::api::vision::Decode;
|
||||
using mindspore::dataset::api::vision::Normalize;
|
||||
using mindspore::dataset::api::vision::Resize;
|
||||
using Execute = mindspore::dataset::api::Execute;
|
||||
using Path = mindspore::dataset::Path;
|
||||
|
||||
class MindDataTestEager : public mindspore::Common {
|
||||
public:
|
||||
|
@ -33,7 +37,7 @@ class MindDataTestEager : public mindspore::Common {
|
|||
};
|
||||
|
||||
TEST_F(MindDataTestEager, Test1) {
|
||||
#ifdef ENABLE_ARM64 || ENABLE_ARM32
|
||||
#if defined(ENABLE_ARM64) || defined(ENABLE_ARM32)
|
||||
std::string in_dir = "/sdcard/data/testPK/data/class1";
|
||||
#else
|
||||
std::string in_dir = "data/testPK/data/class1";
|
||||
|
@ -47,20 +51,20 @@ TEST_F(MindDataTestEager, Test1) {
|
|||
// check if output_dir exists and create it if it does not exist
|
||||
|
||||
// iterate over in dir and create json for all images
|
||||
auto dir_it = Path::DirIterator::OpenDirectory(&base_dir);
|
||||
auto dir_it = Path::DirIterator::OpenDirectory(&base_dir);
|
||||
while (dir_it->hasNext()) {
|
||||
Path v = dir_it->next();
|
||||
MS_LOG(WARNING) << v.toString() << ".";
|
||||
std::shared_ptr<tensor::MSTensor> image = std::shared_ptr<tensor::MSTensor>(tensor::DETensor::CreateTensor(v.toString()));
|
||||
|
||||
image = Execute(vision::Decode())(image);
|
||||
std::shared_ptr<MSTensor> image = std::shared_ptr<MSTensor>(DETensor::CreateTensor(v.toString()));
|
||||
|
||||
image = Execute(Decode())(image);
|
||||
EXPECT_TRUE(image != nullptr);
|
||||
image = Execute(vision::Normalize({121.0, 115.0, 100.0}, {70.0, 68.0, 71.0}))(image);
|
||||
image = Execute(Normalize({121.0, 115.0, 100.0}, {70.0, 68.0, 71.0}))(image);
|
||||
EXPECT_TRUE(image != nullptr);
|
||||
image = Execute(vision::Resize({224, 224}))(image);
|
||||
image = Execute(Resize({224, 224}))(image);
|
||||
EXPECT_TRUE(image != nullptr);
|
||||
EXPECT_TRUE(image->DimensionSize(0) == 224);
|
||||
EXPECT_TRUE(image->DimensionSize(1) == 224);
|
||||
EXPECT_EQ(image->DimensionSize(0), 224);
|
||||
EXPECT_EQ(image->DimensionSize(1), 224);
|
||||
}
|
||||
auto t_end = std::chrono::high_resolution_clock::now();
|
||||
double elapsed_time_ms = std::chrono::duration<double, std::milli>(t_end-t_start).count();
|
||||
|
|
Loading…
Reference in New Issue