[feat][assistant][I40GYQ] add new dataset operator WIDERFace
|
@ -129,6 +129,7 @@
|
|||
#include "minddata/dataset/engine/ir/datasetops/source/udpos_node.h"
|
||||
#include "minddata/dataset/engine/ir/datasetops/source/usps_node.h"
|
||||
#include "minddata/dataset/engine/ir/datasetops/source/voc_node.h"
|
||||
#include "minddata/dataset/engine/ir/datasetops/source/wider_face_node.h"
|
||||
#include "minddata/dataset/engine/ir/datasetops/source/wiki_text_node.h"
|
||||
#include "minddata/dataset/engine/ir/datasetops/source/yahoo_answers_node.h"
|
||||
#include "minddata/dataset/engine/ir/datasetops/source/yelp_review_node.h"
|
||||
|
@ -1792,6 +1793,29 @@ UDPOSDataset::UDPOSDataset(const std::vector<char> &dataset_dir, const std::vect
|
|||
ir_node_ = std::static_pointer_cast<UDPOSNode>(ds);
|
||||
}
|
||||
|
||||
WIDERFaceDataset::WIDERFaceDataset(const std::vector<char> &dataset_dir, const std::vector<char> &usage, bool decode,
|
||||
const std::shared_ptr<Sampler> &sampler,
|
||||
const std::shared_ptr<DatasetCache> &cache) {
|
||||
auto sampler_obj = sampler ? sampler->Parse() : nullptr;
|
||||
auto ds = std::make_shared<WIDERFaceNode>(CharToString(dataset_dir), CharToString(usage), decode, sampler_obj, cache);
|
||||
ir_node_ = std::static_pointer_cast<DatasetNode>(ds);
|
||||
}
|
||||
|
||||
WIDERFaceDataset::WIDERFaceDataset(const std::vector<char> &dataset_dir, const std::vector<char> &usage, bool decode,
|
||||
const Sampler *sampler, const std::shared_ptr<DatasetCache> &cache) {
|
||||
auto sampler_obj = sampler ? sampler->Parse() : nullptr;
|
||||
auto ds = std::make_shared<WIDERFaceNode>(CharToString(dataset_dir), CharToString(usage), decode, sampler_obj, cache);
|
||||
ir_node_ = std::static_pointer_cast<DatasetNode>(ds);
|
||||
}
|
||||
|
||||
WIDERFaceDataset::WIDERFaceDataset(const std::vector<char> &dataset_dir, const std::vector<char> &usage, bool decode,
|
||||
const std::reference_wrapper<Sampler> sampler,
|
||||
const std::shared_ptr<DatasetCache> &cache) {
|
||||
auto sampler_obj = sampler.get().Parse();
|
||||
auto ds = std::make_shared<WIDERFaceNode>(CharToString(dataset_dir), CharToString(usage), decode, sampler_obj, cache);
|
||||
ir_node_ = std::static_pointer_cast<DatasetNode>(ds);
|
||||
}
|
||||
|
||||
YahooAnswersDataset::YahooAnswersDataset(const std::vector<char> &dataset_dir, const std::vector<char> &usage,
|
||||
int64_t num_samples, ShuffleMode shuffle, int32_t num_shards, int32_t shard_id,
|
||||
const std::shared_ptr<DatasetCache> &cache) {
|
||||
|
|
|
@ -75,6 +75,7 @@
|
|||
#include "minddata/dataset/engine/ir/datasetops/source/tf_record_node.h"
|
||||
#include "minddata/dataset/engine/ir/datasetops/source/usps_node.h"
|
||||
#include "minddata/dataset/engine/ir/datasetops/source/voc_node.h"
|
||||
#include "minddata/dataset/engine/ir/datasetops/source/wider_face_node.h"
|
||||
#endif
|
||||
|
||||
namespace mindspore {
|
||||
|
@ -626,6 +627,17 @@ PYBIND_REGISTER(VOCNode, 2, ([](const py::module *m) {
|
|||
}));
|
||||
}));
|
||||
|
||||
PYBIND_REGISTER(WIDERFaceNode, 2, ([](const py::module *m) {
|
||||
(void)py::class_<WIDERFaceNode, DatasetNode, std::shared_ptr<WIDERFaceNode>>(
|
||||
*m, "WIDERFaceNode", "to create a WIDERFaceNode")
|
||||
.def(py::init([](std::string dataset_dir, std::string usage, bool decode, py::handle sampler) {
|
||||
auto wider_face =
|
||||
std::make_shared<WIDERFaceNode>(dataset_dir, usage, decode, toSamplerObj(sampler), nullptr);
|
||||
THROW_IF_ERROR(wider_face->ValidateParams());
|
||||
return wider_face;
|
||||
}));
|
||||
}));
|
||||
|
||||
PYBIND_REGISTER(WikiTextNode, 2, ([](const py::module *m) {
|
||||
(void)py::class_<WikiTextNode, DatasetNode, std::shared_ptr<WikiTextNode>>(*m, "WikiTextNode",
|
||||
"to create a WikiTextNode")
|
||||
|
|
|
@ -43,6 +43,7 @@ set(DATASET_ENGINE_DATASETOPS_SOURCE_SRC_FILES
|
|||
text_file_op.cc
|
||||
udpos_op.cc
|
||||
usps_op.cc
|
||||
wider_face_op.cc
|
||||
wiki_text_op.cc
|
||||
yahoo_answers_op.cc
|
||||
yelp_review_op.cc
|
||||
|
|
|
@ -0,0 +1,299 @@
|
|||
/**
|
||||
* Copyright 2021 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
#include "minddata/dataset/engine/datasetops/source/wider_face_op.h"
|
||||
|
||||
#include <algorithm>
|
||||
#include <iomanip>
|
||||
#include <regex>
|
||||
|
||||
#include "minddata/dataset/core/config_manager.h"
|
||||
#include "minddata/dataset/engine/data_schema.h"
|
||||
#include "minddata/dataset/engine/datasetops/source/sampler/sequential_sampler.h"
|
||||
#include "minddata/dataset/engine/execution_tree.h"
|
||||
#include "minddata/dataset/kernels/image/image_utils.h"
|
||||
#include "minddata/dataset/util/path.h"
|
||||
#include "utils/file_utils.h"
|
||||
|
||||
namespace mindspore {
|
||||
namespace dataset {
|
||||
constexpr char kSplitPath[] = "wider_face_split";
|
||||
constexpr char kTrainAnno[] = "wider_face_train_bbx_gt.txt";
|
||||
constexpr char kValAnno[] = "wider_face_val_bbx_gt.txt";
|
||||
constexpr char kTestAnno[] = "wider_face_test_filelist.txt";
|
||||
constexpr char kTrainBase[] = "WIDER_train";
|
||||
constexpr char kValBase[] = "WIDER_val";
|
||||
constexpr char kTestBase[] = "WIDER_test";
|
||||
constexpr char kImage[] = "images";
|
||||
constexpr char kExtension[] = ".jpg";
|
||||
constexpr int kDataLen = 10; // Length of each data row.
|
||||
constexpr int kBboxLen = 4; // Length of bbox in annotation vector.
|
||||
constexpr int kBlurIndex = 4; // Index of blur in annotation vector.
|
||||
constexpr int kExpressionIndex = 5; // Index of expression in annotation vector.
|
||||
constexpr int kIlluminationIndex = 6; // Index of illumination in annotation vector.
|
||||
constexpr int kOcclusionIndex = 7; // Index of occlusion in annotation vector.
|
||||
constexpr int kPoseIndex = 8; // Index of pose in annotation vector.
|
||||
constexpr int kInvalidIndex = 9; // Index of invalid in annotation vector.
|
||||
|
||||
WIDERFaceOp::WIDERFaceOp(const std::string &folder_path, const std::string &usage, int32_t num_workers,
|
||||
int32_t queue_size, bool decode, std::unique_ptr<DataSchema> data_schema,
|
||||
std::shared_ptr<SamplerRT> sampler)
|
||||
: MappableLeafOp(num_workers, queue_size, std::move(sampler)),
|
||||
folder_path_(folder_path),
|
||||
decode_(decode),
|
||||
usage_(usage),
|
||||
data_schema_(std::move(data_schema)) {}
|
||||
|
||||
Status WIDERFaceOp::PrepareData() {
|
||||
auto realpath = FileUtils::GetRealPath(folder_path_.data());
|
||||
if (!realpath.has_value()) {
|
||||
MS_LOG(ERROR) << "Invalid file path, WIDERFace dataset dir: " << folder_path_ << " does not exist.";
|
||||
RETURN_STATUS_UNEXPECTED("Invalid file path, WIDERFace dataset dir: " + folder_path_ + " does not exist.");
|
||||
}
|
||||
std::string train_folder = (Path(realpath.value()) / Path(kTrainBase) / Path(kImage)).ToString();
|
||||
std::string val_folder = (Path(realpath.value()) / Path(kValBase) / Path(kImage)).ToString();
|
||||
std::string test_folder = (Path(realpath.value()) / Path(kTestBase) / Path(kImage)).ToString();
|
||||
std::string train_anno_dir = (Path(realpath.value()) / Path(kSplitPath) / Path(kTrainAnno)).ToString();
|
||||
std::string val_anno_dir = (Path(realpath.value()) / Path(kSplitPath) / Path(kValAnno)).ToString();
|
||||
|
||||
if (usage_ == "train") {
|
||||
RETURN_IF_NOT_OK(WalkFolders(train_folder));
|
||||
RETURN_IF_NOT_OK(GetTraValAnno(train_anno_dir, train_folder));
|
||||
} else if (usage_ == "valid") {
|
||||
RETURN_IF_NOT_OK(WalkFolders(val_folder));
|
||||
RETURN_IF_NOT_OK(GetTraValAnno(val_anno_dir, val_folder));
|
||||
} else if (usage_ == "test") {
|
||||
RETURN_IF_NOT_OK(WalkFolders(test_folder));
|
||||
} else {
|
||||
RETURN_IF_NOT_OK(WalkFolders(train_folder));
|
||||
RETURN_IF_NOT_OK(WalkFolders(val_folder));
|
||||
RETURN_IF_NOT_OK(GetTraValAnno(train_anno_dir, train_folder));
|
||||
RETURN_IF_NOT_OK(GetTraValAnno(val_anno_dir, val_folder));
|
||||
}
|
||||
all_img_names_.shrink_to_fit();
|
||||
num_rows_ = all_img_names_.size();
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
void WIDERFaceOp::Print(std::ostream &out, bool show_all) const {
|
||||
if (!show_all) {
|
||||
ParallelOp::Print(out, show_all);
|
||||
out << "\n";
|
||||
} else {
|
||||
ParallelOp::Print(out, show_all);
|
||||
out << "\nNumber of rows: " << num_rows_ << "\nWIDERFace dataset dir: " << folder_path_
|
||||
<< "\nDecode: " << (decode_ ? "yes" : "no") << "\n\n";
|
||||
}
|
||||
}
|
||||
|
||||
Status WIDERFaceOp::LoadTensorRow(row_id_type row_id, TensorRow *trow) {
|
||||
RETURN_UNEXPECTED_IF_NULL(trow);
|
||||
std::string img_name = all_img_names_[row_id];
|
||||
std::shared_ptr<Tensor> image;
|
||||
std::vector<std::string> path_list;
|
||||
RETURN_IF_NOT_OK(ReadImageToTensor(img_name, &image));
|
||||
trow->setId(row_id);
|
||||
trow->push_back(std::move(image));
|
||||
if (usage_ == "test") {
|
||||
path_list = {img_name};
|
||||
} else if (usage_ == "all" || usage_ == "train" || usage_ == "valid") {
|
||||
TensorRow annotation;
|
||||
RETURN_IF_NOT_OK(ParseAnnotations(img_name, &annotation));
|
||||
std::string train_path = (Path(folder_path_) / Path(kSplitPath) / Path(kTrainAnno)).ToString();
|
||||
std::string val_path = (Path(folder_path_) / Path(kSplitPath) / Path(kValAnno)).ToString();
|
||||
trow->insert(trow->end(), annotation.begin(), annotation.end());
|
||||
if (img_name.find("train") != std::string::npos) {
|
||||
path_list = {img_name, train_path, train_path, train_path, train_path, train_path, train_path, train_path};
|
||||
} else {
|
||||
path_list = {img_name, val_path, val_path, val_path, val_path, val_path, val_path, val_path};
|
||||
}
|
||||
}
|
||||
trow->setPath(path_list);
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
Status WIDERFaceOp::ReadImageToTensor(const std::string &image_path, std::shared_ptr<Tensor> *tensor) {
|
||||
RETURN_UNEXPECTED_IF_NULL(tensor);
|
||||
RETURN_IF_NOT_OK(Tensor::CreateFromFile(image_path, tensor));
|
||||
if (decode_) {
|
||||
Status rc = Decode(*tensor, tensor);
|
||||
CHECK_FAIL_RETURN_UNEXPECTED(
|
||||
rc.IsOk(), "Invalid file, failed to decode image, the image may be broken or permission denied: " + image_path);
|
||||
}
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
// Get annotations of usage of train or valid.
|
||||
Status WIDERFaceOp::GetTraValAnno(const std::string &list_path, const std::string &image_folder_path) {
|
||||
std::string line;
|
||||
bool file_name_line = true;
|
||||
bool num_boxes_line = false;
|
||||
bool box_annotation_line = false;
|
||||
int32_t num_boxes = 0, box_counter = 0;
|
||||
std::string image_path;
|
||||
std::vector<int32_t> image_labels;
|
||||
std::ifstream file_reader(list_path);
|
||||
while (getline(file_reader, line)) {
|
||||
if (file_name_line) {
|
||||
box_counter = 0;
|
||||
image_labels.clear();
|
||||
image_path = (Path(image_folder_path) / Path(line)).ToString();
|
||||
file_name_line = false;
|
||||
num_boxes_line = true;
|
||||
} else if (num_boxes_line) {
|
||||
try {
|
||||
num_boxes = std::stoi(line);
|
||||
} catch (const std::exception &e) {
|
||||
file_reader.close();
|
||||
RETURN_STATUS_UNEXPECTED("Invalid data, failed to read the number of bbox: " + line);
|
||||
}
|
||||
num_boxes_line = false;
|
||||
box_annotation_line = true;
|
||||
} else if (box_annotation_line) {
|
||||
box_counter += 1;
|
||||
std::vector<int32_t> labels;
|
||||
RETURN_IF_NOT_OK(Split(line, &labels));
|
||||
image_labels.insert(image_labels.end(), labels.begin(), labels.end());
|
||||
if (box_counter >= num_boxes) {
|
||||
box_annotation_line = false;
|
||||
file_name_line = true;
|
||||
annotation_map_[image_path] = image_labels;
|
||||
}
|
||||
}
|
||||
}
|
||||
file_reader.close();
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
// Parse annotations to tensors.
|
||||
Status WIDERFaceOp::ParseAnnotations(const std::string &path, TensorRow *tensor) {
|
||||
RETURN_UNEXPECTED_IF_NULL(tensor);
|
||||
std::vector<int32_t> annotation = annotation_map_[path];
|
||||
CHECK_FAIL_RETURN_UNEXPECTED(
|
||||
static_cast<int>(annotation.size()) % kDataLen == 0,
|
||||
"Invalid parameter, only annotation with size multiple of eight are accepted, but got: " +
|
||||
static_cast<int>(annotation.size()));
|
||||
std::vector<int32_t> bboxes_vec, blur_vec, expression_vec, illumination_vec, occlusion_vec, pose_vec, invalid_vec;
|
||||
std::vector<int32_t> label_vec;
|
||||
std::shared_ptr<Tensor> bbox, blur, expression, illumination, occlusion, pose, invalid;
|
||||
int32_t bbox_num = static_cast<int>(annotation.size()) / kDataLen;
|
||||
for (int32_t index = 0; index < bbox_num; index++) {
|
||||
label_vec.clear();
|
||||
for (int32_t inner_index = 0; inner_index < kDataLen; inner_index++) {
|
||||
label_vec.emplace_back(annotation[index * kDataLen + inner_index]);
|
||||
}
|
||||
bboxes_vec.insert(bboxes_vec.end(), label_vec.begin(), label_vec.begin() + kBboxLen);
|
||||
blur_vec.emplace_back(label_vec[kBlurIndex]);
|
||||
expression_vec.emplace_back(label_vec[kExpressionIndex]);
|
||||
illumination_vec.emplace_back(label_vec[kIlluminationIndex]);
|
||||
occlusion_vec.emplace_back(label_vec[kOcclusionIndex]);
|
||||
pose_vec.emplace_back(label_vec[kPoseIndex]);
|
||||
invalid_vec.emplace_back(label_vec[kInvalidIndex]);
|
||||
}
|
||||
|
||||
RETURN_IF_NOT_OK(Tensor::CreateFromVector(bboxes_vec, TensorShape({bbox_num, 4}), &bbox));
|
||||
RETURN_IF_NOT_OK(Tensor::CreateFromVector(blur_vec, &blur));
|
||||
RETURN_IF_NOT_OK(Tensor::CreateFromVector(expression_vec, &expression));
|
||||
RETURN_IF_NOT_OK(Tensor::CreateFromVector(illumination_vec, &illumination));
|
||||
RETURN_IF_NOT_OK(Tensor::CreateFromVector(occlusion_vec, &occlusion));
|
||||
RETURN_IF_NOT_OK(Tensor::CreateFromVector(pose_vec, &pose));
|
||||
RETURN_IF_NOT_OK(Tensor::CreateFromVector(invalid_vec, &invalid));
|
||||
(*tensor) = TensorRow({std::move(bbox), std::move(blur), std::move(expression), std::move(illumination),
|
||||
std::move(occlusion), std::move(pose), std::move(invalid)});
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
// Convert annotation line to int32_t vector.
|
||||
Status WIDERFaceOp::Split(const std::string &line, std::vector<int32_t> *split_num) {
|
||||
RETURN_UNEXPECTED_IF_NULL(split_num);
|
||||
std::string str = line;
|
||||
std::string::size_type pos;
|
||||
std::vector<std::string> split;
|
||||
int size = str.size();
|
||||
for (int index = 0; index < size;) {
|
||||
pos = str.find(" ", index);
|
||||
if (pos != index) {
|
||||
std::string s = str.substr(index, pos - index);
|
||||
split.push_back(s);
|
||||
}
|
||||
index = pos + 1;
|
||||
}
|
||||
int i = 0;
|
||||
try {
|
||||
for (; i < split.size(); i++) {
|
||||
split_num->emplace_back(stoi(split[i]));
|
||||
}
|
||||
} catch (const std::exception &e) {
|
||||
MS_LOG(ERROR) << "Invalid data, failed to parse the annotation " << i << ": " << split[i];
|
||||
RETURN_STATUS_UNEXPECTED("Invalid data, failed to parse the annotation " + std::to_string(i) + ": " + split[i]);
|
||||
}
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
// Get dataset size.
|
||||
Status WIDERFaceOp::CountTotalRows(int64_t *count) {
|
||||
RETURN_UNEXPECTED_IF_NULL(count);
|
||||
if (all_img_names_.size() == 0) {
|
||||
RETURN_IF_NOT_OK(PrepareData());
|
||||
}
|
||||
*count = static_cast<int64_t>(all_img_names_.size());
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
Status WIDERFaceOp::WalkFolders(const std::string &wf_path) {
|
||||
Path img_folder(wf_path);
|
||||
CHECK_FAIL_RETURN_UNEXPECTED(img_folder.Exists() && img_folder.IsDirectory(),
|
||||
"Invalid path, failed to open WIDERFace folder: " + wf_path);
|
||||
std::shared_ptr<Path::DirIterator> img_folder_itr = Path::DirIterator::OpenDirectory(&img_folder);
|
||||
|
||||
RETURN_UNEXPECTED_IF_NULL(img_folder_itr);
|
||||
int32_t dirname_offset_ = img_folder.ToString().length() + 1;
|
||||
|
||||
while (img_folder_itr->HasNext()) {
|
||||
Path sub_dir = img_folder_itr->Next();
|
||||
if (sub_dir.IsDirectory()) {
|
||||
folder_names_.insert(sub_dir.ToString().substr(dirname_offset_));
|
||||
}
|
||||
}
|
||||
CHECK_FAIL_RETURN_UNEXPECTED(!folder_names_.empty(),
|
||||
"Invalid file, no subfolder found under path: " + img_folder.ToString());
|
||||
for (std::set<std::string>::iterator it = folder_names_.begin(); it != folder_names_.end(); ++it) {
|
||||
Path folder_dir(img_folder / (*it));
|
||||
auto folder_it = Path::DirIterator::OpenDirectory(&folder_dir);
|
||||
while (folder_it->HasNext()) {
|
||||
Path file = folder_it->Next();
|
||||
if (file.Extension() == kExtension) {
|
||||
all_img_names_.emplace_back(file.ToString());
|
||||
}
|
||||
}
|
||||
}
|
||||
CHECK_FAIL_RETURN_UNEXPECTED(!all_img_names_.empty(),
|
||||
"Invalid file, no " + std::string(kExtension) + " file found under path: " + wf_path);
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
Status WIDERFaceOp::ComputeColMap() {
|
||||
// Set the column name map (base class field).
|
||||
if (column_name_id_map_.empty()) {
|
||||
for (int32_t index = 0; index < data_schema_->NumColumns(); index++) {
|
||||
column_name_id_map_[data_schema_->Column(index).Name()] = index;
|
||||
}
|
||||
} else {
|
||||
MS_LOG(WARNING) << "Column name map is already set!";
|
||||
}
|
||||
return Status::OK();
|
||||
}
|
||||
} // namespace dataset
|
||||
} // namespace mindspore
|
|
@ -0,0 +1,121 @@
|
|||
/**
|
||||
* Copyright 2021 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
#ifndef MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_SOURCE_WIDER_FACE_OP_H_
|
||||
#define MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_SOURCE_WIDER_FACE_OP_H_
|
||||
|
||||
#include <fstream>
|
||||
#include <map>
|
||||
#include <memory>
|
||||
#include <string>
|
||||
#include <set>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
#include "minddata/dataset/engine/data_schema.h"
|
||||
#include "minddata/dataset/engine/datasetops/parallel_op.h"
|
||||
#include "minddata/dataset/engine/datasetops/source/io_block.h"
|
||||
#include "minddata/dataset/engine/datasetops/source/mappable_leaf_op.h"
|
||||
#include "minddata/dataset/engine/datasetops/source/sampler/sampler.h"
|
||||
|
||||
#include "minddata/dataset/util/queue.h"
|
||||
#include "minddata/dataset/util/status.h"
|
||||
|
||||
namespace mindspore {
|
||||
namespace dataset {
|
||||
class WIDERFaceOp : public MappableLeafOp {
|
||||
public:
|
||||
/// Constructor.
|
||||
/// \param[in] string folder_path - directory of WIDERFace dataset.
|
||||
/// \param[in] string usage - usage.
|
||||
/// \param[in] uint32_t num_workers - number of workers reading images in parallel.
|
||||
/// \param[in] uint32_t queue_size - connector queue size.
|
||||
/// \param[in] bool decode - whether to decode images.
|
||||
/// \param[in] unique_ptr<DataSchema> schema - data schema of WIDERFace dataset.
|
||||
/// \param[in] shared_ptr<Sampler> sampler - sampler tells WIDERFace what to read.
|
||||
WIDERFaceOp(const std::string &folder_path, const std::string &usage, int32_t num_workers, int32_t queue_size,
|
||||
bool decode, std::unique_ptr<DataSchema> schema, std::shared_ptr<SamplerRT> sampler);
|
||||
|
||||
/// Deconstructor.
|
||||
~WIDERFaceOp() override = default;
|
||||
|
||||
/// A print method typically used for debugging.
|
||||
/// \param[out] out - out stream.
|
||||
/// \param[in] show_all - whether to show all information.
|
||||
void Print(std::ostream &out, bool show_all) const override;
|
||||
|
||||
/// Op name getter.
|
||||
/// \return Name of the current Op.
|
||||
std::string Name() const override { return "WIDERFaceOp"; }
|
||||
|
||||
/// Function to count the number of samples in the WIDERFace dataset.
|
||||
/// \param[in] count - output arg that will hold the actual dataset size.
|
||||
/// \return Status - The status code returned.
|
||||
Status CountTotalRows(int64_t *count);
|
||||
|
||||
private:
|
||||
/// Load a tensor row.
|
||||
/// \param[in] uint64_t row_id - row id.
|
||||
/// \param[in] TensorRow row - read all features into this tensor row.
|
||||
/// \return Status - The status code returned.
|
||||
Status LoadTensorRow(row_id_type row_id, TensorRow *row) override;
|
||||
|
||||
/// \param[in] string image_path - path of image data.
|
||||
/// \param[in] Tensor &tensor - get image tensor.
|
||||
/// \return Status - The status code returned.
|
||||
Status ReadImageToTensor(const std::string &image_path, std::shared_ptr<Tensor> *tensor);
|
||||
|
||||
/// Called first when function is called. Get file_name, img_path and attribute info from ".txt" files.
|
||||
/// \return Status - The status code returned.
|
||||
Status PrepareData();
|
||||
|
||||
/// \param[in] string wf_path - walk the selected folder to get image names.
|
||||
/// \return Status - The status code returned.
|
||||
Status WalkFolders(const std::string &wf_path);
|
||||
|
||||
/// Get all valid or train or both file paths(names).
|
||||
/// \param[in] string list_path - real path of annotation file.
|
||||
/// \param[in] string image_folder_path - real path of image folder.
|
||||
/// \return Status - the status code returned.
|
||||
Status GetTraValAnno(const std::string &list_path, const std::string &image_folder_path);
|
||||
|
||||
/// \param[in] string path - path to the WIDERFace directory.
|
||||
/// \param[in] TensorRow tensor - get attributes.
|
||||
/// \return Status-the status code returned.
|
||||
Status ParseAnnotations(const std::string &path, TensorRow *tensor);
|
||||
|
||||
/// Split attribute info with space.
|
||||
/// \param[in] string line - read line from annotation files.
|
||||
/// \param[out] vector split_num - vector of annotation values.
|
||||
/// \return Status-the status code returned.
|
||||
Status Split(const std::string &line, std::vector<int32_t> *split_num);
|
||||
|
||||
/// Private function for computing the assignment of the column name map.
|
||||
/// \return Status-the status code returned.
|
||||
Status ComputeColMap() override;
|
||||
|
||||
std::string folder_path_; // directory of WIDERFace folder.
|
||||
std::string usage_;
|
||||
bool decode_;
|
||||
std::unique_ptr<DataSchema> data_schema_;
|
||||
|
||||
std::vector<std::string> all_img_names_;
|
||||
std::set<std::string> folder_names_;
|
||||
std::map<std::string, int32_t> class_index_;
|
||||
std::map<std::string, std::vector<int32_t>> annotation_map_;
|
||||
};
|
||||
} // namespace dataset
|
||||
} // namespace mindspore
|
||||
#endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_SOURCE_WIDER_FACE_OP_H_
|
|
@ -120,6 +120,7 @@ constexpr char kTFRecordNode[] = "TFRecordDataset";
|
|||
constexpr char kUDPOSNode[] = "UDPOSDataset";
|
||||
constexpr char kUSPSNode[] = "USPSDataset";
|
||||
constexpr char kVOCNode[] = "VOCDataset";
|
||||
constexpr char kWIDERFaceNode[] = "WIDERFaceDataset";
|
||||
constexpr char kWikiTextNode[] = "WikiTextDataset";
|
||||
constexpr char kYahooAnswersNode[] = "YahooAnswersDataset";
|
||||
constexpr char kYelpReviewNode[] = "YelpReviewDataset";
|
||||
|
|
|
@ -46,6 +46,7 @@ set(DATASET_ENGINE_IR_DATASETOPS_SOURCE_SRC_FILES
|
|||
udpos_node.cc
|
||||
usps_node.cc
|
||||
voc_node.cc
|
||||
wider_face_node.cc
|
||||
wiki_text_node.cc
|
||||
yahoo_answers_node.cc
|
||||
yelp_review_node.cc
|
||||
|
|
|
@ -0,0 +1,131 @@
|
|||
/**
|
||||
* Copyright 2021 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "minddata/dataset/engine/ir/datasetops/source/wider_face_node.h"
|
||||
|
||||
#include <algorithm>
|
||||
#include <utility>
|
||||
|
||||
#include "minddata/dataset/engine/datasetops/source/wider_face_op.h"
|
||||
#include "minddata/dataset/util/status.h"
|
||||
|
||||
namespace mindspore {
|
||||
namespace dataset {
|
||||
// Constructor for WIDERFaceNode.
|
||||
WIDERFaceNode::WIDERFaceNode(const std::string &dataset_dir, const std::string &usage, const bool &decode,
|
||||
const std::shared_ptr<SamplerObj> &sampler, const std::shared_ptr<DatasetCache> &cache)
|
||||
: MappableSourceNode(std::move(cache)),
|
||||
dataset_dir_(dataset_dir),
|
||||
usage_(usage),
|
||||
sampler_(sampler),
|
||||
decode_(decode) {}
|
||||
|
||||
std::shared_ptr<DatasetNode> WIDERFaceNode::Copy() {
|
||||
std::shared_ptr<SamplerObj> sampler = (sampler_ == nullptr) ? nullptr : sampler_->SamplerCopy();
|
||||
auto node = std::make_shared<WIDERFaceNode>(dataset_dir_, usage_, decode_, sampler, cache_);
|
||||
return node;
|
||||
}
|
||||
|
||||
void WIDERFaceNode::Print(std::ostream &out) const { out << Name(); }
|
||||
|
||||
Status WIDERFaceNode::ValidateParams() {
|
||||
RETURN_IF_NOT_OK(DatasetNode::ValidateParams());
|
||||
RETURN_IF_NOT_OK(ValidateDatasetDirParam("WIDERFaceDataset", dataset_dir_));
|
||||
RETURN_IF_NOT_OK(ValidateDatasetSampler("WIDERFaceDataset", sampler_));
|
||||
RETURN_IF_NOT_OK(ValidateStringValue("WIDERFaceDataset", usage_, {"all", "train", "valid", "test"}));
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
// Function to build WIDERFaceNode.
|
||||
Status WIDERFaceNode::Build(std::vector<std::shared_ptr<DatasetOp>> *const node_ops) {
|
||||
std::unique_ptr<DataSchema> schema = std::make_unique<DataSchema>();
|
||||
if (usage_ == "all" || usage_ == "train" || usage_ == "valid") {
|
||||
RETURN_IF_NOT_OK(schema->AddColumn(ColDescriptor("image", DataType(DataType::DE_UINT8), TensorImpl::kFlexible, 1)));
|
||||
RETURN_IF_NOT_OK(
|
||||
schema->AddColumn(ColDescriptor(std::string("bbox"), DataType(DataType::DE_UINT32), TensorImpl::kFlexible, 1)));
|
||||
RETURN_IF_NOT_OK(
|
||||
schema->AddColumn(ColDescriptor(std::string("blur"), DataType(DataType::DE_UINT32), TensorImpl::kFlexible, 1)));
|
||||
RETURN_IF_NOT_OK(schema->AddColumn(
|
||||
ColDescriptor(std::string("expression"), DataType(DataType::DE_UINT32), TensorImpl::kFlexible, 1)));
|
||||
RETURN_IF_NOT_OK(schema->AddColumn(
|
||||
ColDescriptor(std::string("illumination"), DataType(DataType::DE_UINT32), TensorImpl::kFlexible, 1)));
|
||||
RETURN_IF_NOT_OK(schema->AddColumn(
|
||||
ColDescriptor(std::string("occlusion"), DataType(DataType::DE_UINT32), TensorImpl::kFlexible, 1)));
|
||||
RETURN_IF_NOT_OK(
|
||||
schema->AddColumn(ColDescriptor(std::string("pose"), DataType(DataType::DE_UINT32), TensorImpl::kFlexible, 1)));
|
||||
RETURN_IF_NOT_OK(schema->AddColumn(
|
||||
ColDescriptor(std::string("invalid"), DataType(DataType::DE_UINT32), TensorImpl::kFlexible, 1)));
|
||||
} else if (usage_ == "test") {
|
||||
RETURN_IF_NOT_OK(schema->AddColumn(ColDescriptor("image", DataType(DataType::DE_UINT8), TensorImpl::kFlexible, 1)));
|
||||
}
|
||||
std::shared_ptr<SamplerRT> sampler_rt = nullptr;
|
||||
RETURN_IF_NOT_OK(sampler_->SamplerBuild(&sampler_rt));
|
||||
auto wider_face_op = std::make_shared<WIDERFaceOp>(dataset_dir_, usage_, num_workers_, connector_que_size_, decode_,
|
||||
std::move(schema), std::move(sampler_rt));
|
||||
wider_face_op->SetTotalRepeats(GetTotalRepeats());
|
||||
wider_face_op->SetNumRepeatsPerEpoch(GetNumRepeatsPerEpoch());
|
||||
node_ops->push_back(wider_face_op);
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
// Get the shard id of node.
|
||||
Status WIDERFaceNode::GetShardId(int32_t *shard_id) {
|
||||
*shard_id = sampler_->ShardId();
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
// Get Dataset size.
|
||||
Status WIDERFaceNode::GetDatasetSize(const std::shared_ptr<DatasetSizeGetter> &size_getter, bool estimate,
|
||||
int64_t *dataset_size) {
|
||||
if (dataset_size_ > 0) {
|
||||
*dataset_size = dataset_size_;
|
||||
return Status::OK();
|
||||
}
|
||||
int64_t num_rows = 0, sample_size;
|
||||
std::vector<std::shared_ptr<DatasetOp>> ops;
|
||||
RETURN_IF_NOT_OK(Build(&ops));
|
||||
CHECK_FAIL_RETURN_UNEXPECTED(!ops.empty(), "Unable to build WIDERFaceOp.");
|
||||
auto op = std::dynamic_pointer_cast<WIDERFaceOp>(ops.front());
|
||||
RETURN_IF_NOT_OK(op->CountTotalRows(&num_rows));
|
||||
std::shared_ptr<SamplerRT> sampler_rt = nullptr;
|
||||
RETURN_IF_NOT_OK(sampler_->SamplerBuild(&sampler_rt));
|
||||
sample_size = sampler_rt->CalculateNumSamples(num_rows);
|
||||
if (sample_size == -1) {
|
||||
RETURN_IF_NOT_OK(size_getter->DryRun(shared_from_this(), &sample_size));
|
||||
}
|
||||
*dataset_size = sample_size;
|
||||
dataset_size_ = *dataset_size;
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
Status WIDERFaceNode::to_json(nlohmann::json *out_json) {
|
||||
nlohmann::json args, sampler_args;
|
||||
RETURN_IF_NOT_OK(sampler_->to_json(&sampler_args));
|
||||
args["sampler"] = sampler_args;
|
||||
args["num_parallel_workers"] = num_workers_;
|
||||
args["dataset_dir"] = dataset_dir_;
|
||||
args["decode"] = decode_;
|
||||
args["usage"] = usage_;
|
||||
if (cache_ != nullptr) {
|
||||
nlohmann::json cache_args;
|
||||
RETURN_IF_NOT_OK(cache_->to_json(&cache_args));
|
||||
args["cache"] = cache_args;
|
||||
}
|
||||
*out_json = args;
|
||||
return Status::OK();
|
||||
}
|
||||
} // namespace dataset
|
||||
} // namespace mindspore
|
|
@ -0,0 +1,103 @@
|
|||
/**
|
||||
* Copyright 2021 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_SOURCE_WIDER_FACE_NODE_H_
|
||||
#define MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_SOURCE_WIDER_FACE_NODE_H_
|
||||
|
||||
#include <map>
|
||||
#include <memory>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
#include "minddata/dataset/engine/ir/datasetops/dataset_node.h"
|
||||
|
||||
namespace mindspore {
|
||||
namespace dataset {
|
||||
class WIDERFaceNode : public MappableSourceNode {
|
||||
public:
|
||||
/// \brief Constructor.
|
||||
WIDERFaceNode(const std::string &dataset_dir, const std::string &usage, const bool &decode,
|
||||
const std::shared_ptr<SamplerObj> &sampler, const std::shared_ptr<DatasetCache> &cache);
|
||||
|
||||
/// \brief Destructor.
|
||||
~WIDERFaceNode() = default;
|
||||
|
||||
/// \brief Node name getter.
|
||||
/// \return Name of the current node.
|
||||
std::string Name() const override { return kWIDERFaceNode; }
|
||||
|
||||
/// \brief Print the description.
|
||||
/// \param out The output stream to write output to.
|
||||
void Print(std::ostream &out) const override;
|
||||
|
||||
/// \brief Copy the node to a new object.
|
||||
/// \return A shared pointer to the new copy.
|
||||
std::shared_ptr<DatasetNode> Copy() override;
|
||||
|
||||
/// \brief a base class override function to create the required runtime dataset op objects for this class.
|
||||
/// \param node_ops A vector containing shared pointer to the Dataset Ops that this object will create.
|
||||
/// \return Status Status::OK() if build successfully.
|
||||
Status Build(std::vector<std::shared_ptr<DatasetOp>> *const node_ops) override;
|
||||
|
||||
/// \brief Parameters validation.
|
||||
/// \return Status Status::OK() if all the parameters are valid.
|
||||
Status ValidateParams() override;
|
||||
|
||||
/// \brief Get the shard id of node.
|
||||
/// \param[in] shard_id The shard id.
|
||||
/// \return Status Status::OK() if get shard id successfully.
|
||||
Status GetShardId(int32_t *shard_id) override;
|
||||
|
||||
/// \brief Base-class override for GetDatasetSize.
|
||||
/// \param[in] size_getter Shared pointer to DatasetSizeGetter.
|
||||
/// \param[in] estimate This is only supported by some of the ops and it's used to speed up the process of getting
|
||||
/// dataset size at the expense of accuracy.
|
||||
/// \param[out] dataset_size the size of the dataset.
|
||||
/// \return Status of the function.
|
||||
Status GetDatasetSize(const std::shared_ptr<DatasetSizeGetter> &size_getter, bool estimate,
|
||||
int64_t *dataset_size) override;
|
||||
|
||||
/// \brief Getter function.
|
||||
const std::string &DatasetDir() const { return dataset_dir_; }
|
||||
|
||||
/// \brief Getter function.
|
||||
const std::string &Usage() const { return usage_; }
|
||||
|
||||
/// \brief Getter function.
|
||||
bool Decode() const { return decode_; }
|
||||
|
||||
/// \brief Get the arguments of node.
|
||||
/// \param[out] out_json JSON string of all attributes.
|
||||
/// \return Status of the function.
|
||||
Status to_json(nlohmann::json *out_json) override;
|
||||
|
||||
/// \brief Sampler getters.
|
||||
/// \return SamplerObj of the current node.
|
||||
std::shared_ptr<SamplerObj> Sampler() override { return sampler_; }
|
||||
|
||||
/// \brief Sampler setter.
|
||||
/// \param[in] sampler Sampler object used to choose samples from the dataset.
|
||||
void SetSampler(std::shared_ptr<SamplerObj> sampler) override { sampler_ = sampler; }
|
||||
|
||||
private:
|
||||
std::string dataset_dir_;
|
||||
std::string usage_;
|
||||
bool decode_;
|
||||
std::shared_ptr<SamplerObj> sampler_;
|
||||
};
|
||||
} // namespace dataset
|
||||
} // namespace mindspore
|
||||
#endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_SOURCE_WIDER_FACE_NODE_H_
|
|
@ -4925,6 +4925,113 @@ inline std::shared_ptr<VOCDataset> MS_API VOC(const std::string &dataset_dir, co
|
|||
MapStringToChar(class_indexing), decode, sampler, cache, extra_metadata);
|
||||
}
|
||||
|
||||
/// \class WIDERFaceDataset
|
||||
/// \brief A source dataset for reading and parsing WIDERFace dataset.
|
||||
class MS_API WIDERFaceDataset : public Dataset {
|
||||
public:
|
||||
/// \brief Constructor of WIDERFaceDataset.
|
||||
/// \param[in] dataset_dir Path to the root directory that contains the dataset.
|
||||
/// \param[in] usage The type of data to be read, can be "train", "test", "valid" or "all". "all" will read samples
|
||||
/// from "train" and "valid".
|
||||
/// \param[in] decode Decode the images after reading.
|
||||
/// \param[in] sampler Shared pointer to a sampler object used to choose samples from the dataset.
|
||||
/// \param[in] cache Tensor cache to use.
|
||||
WIDERFaceDataset(const std::vector<char> &dataset_dir, const std::vector<char> &usage, bool decode,
|
||||
const std::shared_ptr<Sampler> &sampler, const std::shared_ptr<DatasetCache> &cache);
|
||||
|
||||
/// \brief Constructor of WIDERFaceDataset.
|
||||
/// \param[in] dataset_dir Path to the root directory that contains the dataset.
|
||||
/// \param[in] usage The type of data to be read, can be "train", "test", "valid" or "all". "all" will read samples
|
||||
/// from "train" and "valid".
|
||||
/// \param[in] decode Decode the images after reading.
|
||||
/// \param[in] sampler Raw pointer to a sampler object used to choose samples from the dataset.
|
||||
/// \param[in] cache Tensor cache to use.
|
||||
WIDERFaceDataset(const std::vector<char> &dataset_dir, const std::vector<char> &usage, bool decode,
|
||||
const Sampler *sampler, const std::shared_ptr<DatasetCache> &cache);
|
||||
|
||||
/// \brief Constructor of WIDERFaceDataset.
|
||||
/// \param[in] dataset_dir Path to the root directory that contains the dataset.
|
||||
/// \param[in] usage The type of data to be read, can be "train", "test", "valid" or "all". "all" will read samples
|
||||
/// from "train" and "valid".
|
||||
/// \param[in] decode Decode the images after reading.
|
||||
/// \param[in] sampler Sampler object used to choose samples from the dataset.
|
||||
/// \param[in] cache Tensor cache to use.
|
||||
WIDERFaceDataset(const std::vector<char> &dataset_dir, const std::vector<char> &usage, bool decode,
|
||||
const std::reference_wrapper<Sampler> sampler, const std::shared_ptr<DatasetCache> &cache);
|
||||
|
||||
/// Destructor of WIDERFaceDataset.
|
||||
~WIDERFaceDataset() = default;
|
||||
};
|
||||
|
||||
/// \brief Function to create a WIDERFace Dataset.
|
||||
/// \note When usage is "train", "valid" or "all", the generated dataset has eight columns ["image", "bbox", "blur",
|
||||
/// "expression", "illumination", "occlusion", "pose", "invalid"]. When usage is "test", it only has one column
|
||||
/// ["image"].
|
||||
/// \param[in] dataset_dir Path to the root directory that contains the dataset.
|
||||
/// \param[in] usage The type of data to be read, can be "train", "test", "valid" or "all" (default="all"). "all" will
|
||||
/// read samples from "train" and "valid".
|
||||
/// \param[in] decode The option to decode the images in dataset (default = false).
|
||||
/// \param[in] sampler Shared pointer to a sampler object used to choose samples from the dataset. If sampler is not
|
||||
/// given, a `RandomSampler` will be used to randomly iterate the entire dataset (default = RandomSampler()).
|
||||
/// \param[in] cache Tensor cache to use (default=nullptr, which means no cache is used).
|
||||
/// \return Shared pointer to the WIDERFaceDataset.
|
||||
/// \par Example
|
||||
/// \code
|
||||
/// /* Define dataset path and MindData object */
|
||||
/// std::string folder_path = "/path/to/wider_face_dataset_directory";
|
||||
/// std::shared_ptr<Dataset> ds = WIDERFace(folder_path, "train", std::make_shared<SequentialSampler>(0, 2));
|
||||
///
|
||||
/// /* Create iterator to read dataset */
|
||||
/// std::shared_ptr<Iterator> iter = ds->CreateIterator();
|
||||
/// std::unordered_map<std::string, mindspore::MSTensor> row;
|
||||
/// iter->GetNextRow(&row);
|
||||
///
|
||||
/// /* Note: In WIDERFace dataset, if task='test', each dictionary has key "image" */
|
||||
/// /* Note: In WIDERFace dataset, if task='all', 'train' or 'valid', each dictionary has keys "image", "bbox",
|
||||
/// "blur", "expression", "illumination", "occlusion", "pose", "invalid" */
|
||||
/// auto image = row["image"];
|
||||
/// \endcode
|
||||
inline std::shared_ptr<WIDERFaceDataset> MS_API
|
||||
WIDERFace(const std::string &dataset_dir, const std::string &usage = "all", bool decode = false,
|
||||
const std::shared_ptr<Sampler> &sampler = std::make_shared<RandomSampler>(),
|
||||
const std::shared_ptr<DatasetCache> &cache = nullptr) {
|
||||
return std::make_shared<WIDERFaceDataset>(StringToChar(dataset_dir), StringToChar(usage), decode, sampler, cache);
|
||||
}
|
||||
|
||||
/// \brief Function to create a WIDERFace Dataset.
|
||||
/// \note When usage is "train", "valid" or "all", the generated dataset has eight columns ["image", "bbox", "blur",
|
||||
/// "expression", "illumination", "occlusion", "pose", "invalid"]. When usage is "test", it only has one column
|
||||
/// ["image"].
|
||||
/// \param[in] dataset_dir Path to the root directory that contains the dataset.
|
||||
/// \param[in] usage The type of data to be read, can be "train", "test", "valid" or "all". "all" will read samples
|
||||
/// from "train" and "valid".
|
||||
/// \param[in] decode The option to decode the images in dataset.
|
||||
/// \param[in] sampler Raw pointer to a sampler object used to choose samples from the dataset.
|
||||
/// \param[in] cache Tensor cache to use (default=nullptr, which means no cache is used).
|
||||
/// \return Shared pointer to the WIDERFaceDataset.
|
||||
inline std::shared_ptr<WIDERFaceDataset> MS_API WIDERFace(const std::string &dataset_dir, const std::string &usage,
|
||||
bool decode, const Sampler *sampler,
|
||||
const std::shared_ptr<DatasetCache> &cache = nullptr) {
|
||||
return std::make_shared<WIDERFaceDataset>(StringToChar(dataset_dir), StringToChar(usage), decode, sampler, cache);
|
||||
}
|
||||
|
||||
/// \brief Function to create a WIDERFace Dataset.
|
||||
/// \note When usage is "train", "valid" or "all", the generated dataset has eight columns ["image", "bbox", "blur",
|
||||
/// "expression", "illumination", "occlusion", "pose", "invalid"]. When usage is "test", it only has one column
|
||||
/// ["image"].
|
||||
/// \param[in] dataset_dir Path to the root directory that contains the dataset.
|
||||
/// \param[in] usage The type of data to be read, can be "train", "test", "valid" or "all". "all" will read samples
|
||||
/// from "train" and "valid".
|
||||
/// \param[in] decode The option to decode the images in dataset.
|
||||
/// \param[in] sampler Sampler object used to choose samples from the dataset.
|
||||
/// \param[in] cache Tensor cache to use (default=nullptr, which means no cache is used).
|
||||
/// \return Shared pointer to the WIDERFaceDataset.
|
||||
inline std::shared_ptr<WIDERFaceDataset> MS_API WIDERFace(const std::string &dataset_dir, const std::string &usage,
|
||||
bool decode, const std::reference_wrapper<Sampler> sampler,
|
||||
const std::shared_ptr<DatasetCache> &cache = nullptr) {
|
||||
return std::make_shared<WIDERFaceDataset>(StringToChar(dataset_dir), StringToChar(usage), decode, sampler, cache);
|
||||
}
|
||||
|
||||
/// \class WikiTextDataset
|
||||
/// \brief A source dataset for reading and parsing WikiTextDataset dataset.
|
||||
class MS_API WikiTextDataset : public Dataset {
|
||||
|
|
|
@ -66,6 +66,7 @@ class MS_API Sampler : std::enable_shared_from_this<Sampler> {
|
|||
friend class TFRecordDataset;
|
||||
friend class USPSDataset;
|
||||
friend class VOCDataset;
|
||||
friend class WIDERFaceDataset;
|
||||
friend class YesNoDataset;
|
||||
friend std::shared_ptr<SamplerObj> SelectSampler(int64_t, bool, int32_t, int32_t);
|
||||
|
||||
|
|
|
@ -76,7 +76,7 @@ from .validators import check_batch, check_shuffle, check_map, check_filter, che
|
|||
check_stl10_dataset, check_yelp_review_dataset, check_penn_treebank_dataset, check_iwslt2016_dataset, \
|
||||
check_iwslt2017_dataset, check_sogou_news_dataset, check_yahoo_answers_dataset, check_udpos_dataset, \
|
||||
check_conll2000_dataset, check_amazon_review_dataset, check_semeion_dataset, check_caltech101_dataset, \
|
||||
check_caltech256_dataset, check_wiki_text_dataset, check_imdb_dataset
|
||||
check_caltech256_dataset, check_wiki_text_dataset, check_imdb_dataset, check_wider_face_dataset
|
||||
from ..core.config import get_callback_timeout, _init_device_info, get_enable_shared_mem, get_num_parallel_workers, \
|
||||
get_prefetch_size
|
||||
from ..core.datatypes import mstype_to_detype, mstypelist_to_detypelist
|
||||
|
@ -9803,6 +9803,158 @@ class DIV2KDataset(MappableDataset):
|
|||
return cde.DIV2KNode(self.dataset_dir, self.usage, self.downgrade, self.scale, self.decode, self.sampler)
|
||||
|
||||
|
||||
class WIDERFaceDataset(MappableDataset):
|
||||
"""
|
||||
A source dataset for reading and parsing WIDERFace dataset.
|
||||
|
||||
When usage is "train", "valid" or "all", the generated dataset has eight columns ["image", "bbox", "blur",
|
||||
"expression", "illumination", "occlusion", "pose", "invalid"]. When usage is "test", it only has one column
|
||||
["image"].
|
||||
The tensor of column :py:obj:`image` is a vector of the uint8 type.
|
||||
The tensor of column :py:obj:`bbox` is a scalar of the uint32 type.
|
||||
The tensor of column :py:obj:`blur` is a scalar of the uint32 type.
|
||||
The tensor of column :py:obj:`expression` is a scalar of the uint32 type.
|
||||
The tensor of column :py:obj:`illumination` is a scalar of the uint32 type.
|
||||
The tensor of column :py:obj:`occlusion` is a scalar of the uint32 type.
|
||||
The tensor of column :py:obj:`pose` is a scalar of the uint32 type.
|
||||
The tensor of column :py:obj:`invalid` is a scalar of the uint32 type.
|
||||
|
||||
Args:
|
||||
dataset_dir (str): Path to the root directory that contains the dataset.
|
||||
usage (str, optional): Usage of this dataset, can be `train`, `test`, `valid` or `all`. `train` will read
|
||||
from 12,880 samples, `test` will read from 16,097 samples, `valid` will read from 3,226 test samples
|
||||
and `all` will read all `train` and `valid` samples (default=None, will be set to `all`).
|
||||
num_samples (int, optional): The number of images to be included in the dataset
|
||||
(default=None, will read all images).
|
||||
num_parallel_workers (int, optional): Number of workers to read the data
|
||||
(default=None, will use value set in the config).
|
||||
shuffle (bool, optional): Whether or not to perform shuffle on the dataset
|
||||
(default=None, expected order behavior shown in the table).
|
||||
decode (bool, optional): Decode the images after reading (default=False).
|
||||
sampler (Sampler, optional): Object used to choose samples from the dataset
|
||||
(default=None, expected order behavior shown in the table).
|
||||
num_shards (int, optional): Number of shards that the dataset will be divided into (default=None).
|
||||
When this argument is specified, `num_samples` reflects the maximum sample number of per shard.
|
||||
shard_id (int, optional): The shard ID within `num_shards` (default=None). This argument can only be specified
|
||||
when `num_shards` is also specified.
|
||||
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing
|
||||
(default=None, which means no cache is used).
|
||||
|
||||
Raises:
|
||||
RuntimeError: If dataset_dir does not contain data files.
|
||||
RuntimeError: If num_parallel_workers exceeds the max thread numbers.
|
||||
RuntimeError: If sampler and shuffle are specified at the same time.
|
||||
RuntimeError: If sampler and sharding are specified at the same time.
|
||||
RuntimeError: If num_shards is specified but shard_id is None.
|
||||
RuntimeError: If shard_id is specified but num_shards is None.
|
||||
ValueError: If shard_id is invalid (< 0 or >= num_shards).
|
||||
ValueError: If usage is not in [`train`, `test`, `valid`, `all`].
|
||||
ValueError: If annotation_file is not exist.
|
||||
ValueError: If dataset_dir is not exist.
|
||||
ValueError: If shard_id is invalid (< 0 or >= num_shards).
|
||||
|
||||
Note:
|
||||
- This dataset can take in a `sampler`. `sampler` and `shuffle` are mutually exclusive.
|
||||
The table below shows what input arguments are allowed and their expected behavior.
|
||||
|
||||
.. list-table:: Expected Order Behavior of Using `sampler` and `shuffle`
|
||||
:widths: 25 25 50
|
||||
:header-rows: 1
|
||||
|
||||
* - Parameter `sampler`
|
||||
- Parameter `shuffle`
|
||||
- Expected Order Behavior
|
||||
* - None
|
||||
- None
|
||||
- random order
|
||||
* - None
|
||||
- True
|
||||
- random order
|
||||
* - None
|
||||
- False
|
||||
- sequential order
|
||||
* - Sampler object
|
||||
- None
|
||||
- order defined by sampler
|
||||
* - Sampler object
|
||||
- True
|
||||
- not allowed
|
||||
* - Sampler object
|
||||
- False
|
||||
- not allowed
|
||||
|
||||
Examples:
|
||||
>>> wider_face_dir = "/path/to/wider_face_dataset"
|
||||
>>>
|
||||
>>> # Read 3 samples from WIDERFace dataset
|
||||
>>> dataset = ds.WIDERFaceDataset(dataset_dir=wider_face_dir, num_samples=3)
|
||||
|
||||
About WIDERFace dataset:
|
||||
|
||||
The WIDERFace database of people faces has a training set of 12,880 samples, a testing set of 16,097 examples
|
||||
and a validating set of 3,226 examples. It is a subset of a larger set available from WIDER. The digits have
|
||||
been size-normalized and centered in a fixed-size image.
|
||||
|
||||
The following is the original WIDERFace dataset structure.
|
||||
You can unzip the dataset files into this directory structure and read by MindSpore's API.
|
||||
|
||||
.. code-block::
|
||||
|
||||
.
|
||||
└── wider_face_dir
|
||||
├── WIDER_test
|
||||
│ └── images
|
||||
│ ├── 0--Parade
|
||||
│ │ ├── 0_Parade_marchingband_1_9.jpg
|
||||
│ │ ├── ...
|
||||
│ ├──1--Handshaking
|
||||
│ ├──...
|
||||
├── WIDER_train
|
||||
│ └── images
|
||||
│ ├── 0--Parade
|
||||
│ │ ├── 0_Parade_marchingband_1_11.jpg
|
||||
│ │ ├── ...
|
||||
│ ├──1--Handshaking
|
||||
│ ├──...
|
||||
├── WIDER_val
|
||||
│ └── images
|
||||
│ ├── 0--Parade
|
||||
│ │ ├── 0_Parade_marchingband_1_102.jpg
|
||||
│ │ ├── ...
|
||||
│ ├──1--Handshaking
|
||||
│ ├──...
|
||||
└── wider_face_split
|
||||
├── wider_face_test_filelist.txt
|
||||
├── wider_face_train_bbx_gt.txt
|
||||
└── wider_face_val_bbx_gt.txt
|
||||
|
||||
Citation:
|
||||
|
||||
.. code-block::
|
||||
|
||||
@inproceedings{2016WIDER,
|
||||
title={WIDER FACE: A Face Detection Benchmark},
|
||||
author={Yang, S. and Luo, P. and Loy, C. C. and Tang, X.},
|
||||
booktitle={IEEE},
|
||||
pages={5525-5533},
|
||||
year={2016},
|
||||
}
|
||||
"""
|
||||
|
||||
@check_wider_face_dataset
|
||||
def __init__(self, dataset_dir, usage=None, num_samples=None, num_parallel_workers=None, shuffle=None,
|
||||
decode=False, sampler=None, num_shards=None, shard_id=None, cache=None):
|
||||
super().__init__(num_parallel_workers=num_parallel_workers, sampler=sampler, num_samples=num_samples,
|
||||
shuffle=shuffle, num_shards=num_shards, shard_id=shard_id, cache=cache)
|
||||
|
||||
self.dataset_dir = dataset_dir
|
||||
self.usage = replace_none(usage, "all")
|
||||
self.decode = replace_none(decode, False)
|
||||
|
||||
def parse(self, children=None):
|
||||
return cde.WIDERFaceNode(self.dataset_dir, self.usage, self.decode, self.sampler)
|
||||
|
||||
|
||||
class YelpReviewDataset(SourceDataset, TextBaseDataset):
|
||||
"""
|
||||
A source dataset that reads and parses Yelp Review Polarity and Yelp Review Full dataset.
|
||||
|
|
|
@ -2163,6 +2163,36 @@ def check_dbpedia_dataset(method):
|
|||
return new_method
|
||||
|
||||
|
||||
def check_wider_face_dataset(method):
|
||||
"""A wrapper that wraps a parameter checker around the WIDERFaceDataset."""
|
||||
|
||||
@wraps(method)
|
||||
def new_method(self, *args, **kwargs):
|
||||
_, param_dict = parse_user_args(method, *args, **kwargs)
|
||||
|
||||
nreq_param_int = ['num_samples', 'num_parallel_workers', 'num_shards', 'shard_id']
|
||||
nreq_param_bool = ['decode', 'shuffle']
|
||||
|
||||
dataset_dir = param_dict.get('dataset_dir')
|
||||
check_dir(dataset_dir)
|
||||
|
||||
usage = param_dict.get('usage')
|
||||
if usage is not None:
|
||||
check_valid_str(usage, ["train", "test", "valid", "all"], "usage")
|
||||
|
||||
validate_dataset_param_value(nreq_param_int, param_dict, int)
|
||||
validate_dataset_param_value(nreq_param_bool, param_dict, bool)
|
||||
|
||||
check_sampler_shuffle_shard_options(param_dict)
|
||||
|
||||
cache = param_dict.get('cache')
|
||||
check_cache_option(cache)
|
||||
|
||||
return method(self, *args, **kwargs)
|
||||
|
||||
return new_method
|
||||
|
||||
|
||||
def check_yelp_review_dataset(method):
|
||||
"""A wrapper that wraps a parameter checker around the original Dataset(YelpReviewDataset)."""
|
||||
|
||||
|
|
|
@ -56,6 +56,7 @@ SET(DE_UT_SRCS
|
|||
c_api_dataset_udpos_test.cc
|
||||
c_api_dataset_usps_test.cc
|
||||
c_api_dataset_voc_test.cc
|
||||
c_api_dataset_wider_face_test.cc
|
||||
c_api_dataset_yahoo_answers_test.cc
|
||||
c_api_dataset_yelp_review_test.cc
|
||||
c_api_dataset_yes_no_test.cc
|
||||
|
|
|
@ -0,0 +1,294 @@
|
|||
/**
|
||||
* Copyright 2021 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
#include "common/common.h"
|
||||
#include "minddata/dataset/include/dataset/datasets.h"
|
||||
|
||||
using namespace mindspore::dataset;
|
||||
using mindspore::dataset::DataType;
|
||||
using mindspore::dataset::dsize_t;
|
||||
using mindspore::dataset::Tensor;
|
||||
using mindspore::dataset::TensorShape;
|
||||
|
||||
class MindDataTestPipeline : public UT::DatasetOpTesting {
|
||||
protected:
|
||||
};
|
||||
|
||||
/// Feature: Test WIDERFace dataset.
|
||||
/// Description: read data for default usage.
|
||||
/// Expectation: the data is processed successfully.
|
||||
TEST_F(MindDataTestPipeline, TestWIDERFace) {
|
||||
MS_LOG(INFO) << "Doing MindDataTestPipeline-TestWIDERFace.";
|
||||
// Create a WIDERFace Dataset.
|
||||
std::string folder_path = datasets_root_path_ + "/testWIDERFace/";
|
||||
std::shared_ptr<Dataset> ds = WIDERFace(folder_path);
|
||||
EXPECT_NE(ds, nullptr);
|
||||
|
||||
std::shared_ptr<Iterator> iter = ds->CreateIterator();
|
||||
EXPECT_NE(iter, nullptr);
|
||||
|
||||
std::unordered_map<std::string, mindspore::MSTensor> row;
|
||||
ASSERT_OK(iter->GetNextRow(&row));
|
||||
|
||||
EXPECT_NE(row.find("image"), row.end());
|
||||
EXPECT_NE(row.find("bbox"), row.end());
|
||||
EXPECT_NE(row.find("blur"), row.end());
|
||||
EXPECT_NE(row.find("expression"), row.end());
|
||||
EXPECT_NE(row.find("illumination"), row.end());
|
||||
EXPECT_NE(row.find("occlusion"), row.end());
|
||||
EXPECT_NE(row.find("pose"), row.end());
|
||||
EXPECT_NE(row.find("invalid"), row.end());
|
||||
|
||||
uint64_t i = 0;
|
||||
while (row.size() != 0) {
|
||||
auto image = row["image"];
|
||||
auto bbox = row["bbox"];
|
||||
auto blur = row["blur"];
|
||||
auto expression = row["expression"];
|
||||
auto illumination = row["illumination"];
|
||||
auto occlusion = row["occlusion"];
|
||||
auto pose = row["pose"];
|
||||
auto invalid = row["invalid"];
|
||||
MS_LOG(INFO) << "Tensor image shape: " << image.Shape();
|
||||
MS_LOG(INFO) << "Tensor bbox shape: " << bbox.Shape();
|
||||
MS_LOG(INFO) << "Tensor blur shape: " << blur.Shape();
|
||||
MS_LOG(INFO) << "Tensor expression shape: " << expression.Shape();
|
||||
MS_LOG(INFO) << "Tensor illumination shape: " << illumination.Shape();
|
||||
MS_LOG(INFO) << "Tensor occlusion shape: " << occlusion.Shape();
|
||||
MS_LOG(INFO) << "Tensor pose shape: " << pose.Shape();
|
||||
MS_LOG(INFO) << "Tensor invalid shape: " << invalid.Shape();
|
||||
ASSERT_OK(iter->GetNextRow(&row));
|
||||
i++;
|
||||
}
|
||||
|
||||
EXPECT_EQ(i, 4);
|
||||
iter->Stop();
|
||||
}
|
||||
|
||||
/// Feature: Test WIDERFace dataset.
|
||||
/// Description: test usage "test".
|
||||
/// Expectation: the data is processed successfully.
|
||||
TEST_F(MindDataTestPipeline, TestWIDERFaceTest) {
|
||||
MS_LOG(INFO) << "Doing MindDataTestPipeline-TestWIDERFaceTest.";
|
||||
// Create a WIDERFace Dataset.
|
||||
std::string folder_path = datasets_root_path_ + "/testWIDERFace/";
|
||||
std::shared_ptr<Dataset> ds = WIDERFace(folder_path, "test");
|
||||
EXPECT_NE(ds, nullptr);
|
||||
|
||||
std::shared_ptr<Iterator> iter = ds->CreateIterator();
|
||||
EXPECT_NE(iter, nullptr);
|
||||
|
||||
std::unordered_map<std::string, mindspore::MSTensor> row;
|
||||
ASSERT_OK(iter->GetNextRow(&row));
|
||||
|
||||
uint64_t i = 0;
|
||||
while (row.size() != 0) {
|
||||
auto image = row["image"];
|
||||
MS_LOG(INFO) << "Tensor image shape: " << image.Shape();
|
||||
ASSERT_OK(iter->GetNextRow(&row));
|
||||
i++;
|
||||
}
|
||||
|
||||
EXPECT_EQ(i, 3);
|
||||
iter->Stop();
|
||||
}
|
||||
|
||||
/// Feature: Test WIDERFace dataset.
|
||||
/// Description: test pipeline.
|
||||
/// Expectation: the data is processed successfully.
|
||||
TEST_F(MindDataTestPipeline, TestWIDERFaceDefaultWithPipeline) {
|
||||
MS_LOG(INFO) << "Doing MindDataTestPipeline-TestWIDERFaceDefaultWithPipeline.";
|
||||
// Create two WIDERFace Dataset.
|
||||
std::string folder_path = datasets_root_path_ + "/testWIDERFace/";
|
||||
|
||||
std::shared_ptr<Dataset> ds1 = WIDERFace(folder_path);
|
||||
std::shared_ptr<Dataset> ds2 = WIDERFace(folder_path);
|
||||
EXPECT_NE(ds1, nullptr);
|
||||
EXPECT_NE(ds2, nullptr);
|
||||
|
||||
// Create two Repeat operation on ds.
|
||||
int32_t repeat_num = 1;
|
||||
ds1 = ds1->Repeat(repeat_num);
|
||||
EXPECT_NE(ds1, nullptr);
|
||||
repeat_num = 2;
|
||||
ds2 = ds2->Repeat(repeat_num);
|
||||
EXPECT_NE(ds2, nullptr);
|
||||
|
||||
// Create two Project operation on ds.
|
||||
std::vector<std::string> column_project = {"image", "bbox", "blur", "expression",
|
||||
"illumination", "occlusion", "pose", "invalid"};
|
||||
ds1 = ds1->Project(column_project);
|
||||
EXPECT_NE(ds1, nullptr);
|
||||
ds2 = ds2->Project(column_project);
|
||||
EXPECT_NE(ds2, nullptr);
|
||||
|
||||
// Create a Concat operation on the ds.
|
||||
ds1 = ds1->Concat({ds2});
|
||||
EXPECT_NE(ds1, nullptr);
|
||||
|
||||
// Create an iterator over the result of the above dataset.
|
||||
// This will trigger the creation of the Execution Tree and launch it.
|
||||
std::shared_ptr<Iterator> iter = ds1->CreateIterator();
|
||||
EXPECT_NE(iter, nullptr);
|
||||
|
||||
// Iterate the dataset and get each row.
|
||||
std::unordered_map<std::string, mindspore::MSTensor> row;
|
||||
ASSERT_OK(iter->GetNextRow(&row));
|
||||
|
||||
uint64_t i = 0;
|
||||
while (row.size() != 0) {
|
||||
auto image = row["image"];
|
||||
auto bbox = row["bbox"];
|
||||
auto blur = row["blur"];
|
||||
auto expression = row["expression"];
|
||||
auto illumination = row["illumination"];
|
||||
auto occlusion = row["occlusion"];
|
||||
auto pose = row["pose"];
|
||||
auto invalid = row["invalid"];
|
||||
MS_LOG(INFO) << "Tensor image shape: " << image.Shape();
|
||||
MS_LOG(INFO) << "Tensor bbox shape: " << bbox.Shape();
|
||||
MS_LOG(INFO) << "Tensor blur shape: " << blur.Shape();
|
||||
MS_LOG(INFO) << "Tensor expression shape: " << expression.Shape();
|
||||
MS_LOG(INFO) << "Tensor illumination shape: " << illumination.Shape();
|
||||
MS_LOG(INFO) << "Tensor occlusion shape: " << occlusion.Shape();
|
||||
MS_LOG(INFO) << "Tensor pose shape: " << pose.Shape();
|
||||
MS_LOG(INFO) << "Tensor invalid shape: " << invalid.Shape();
|
||||
ASSERT_OK(iter->GetNextRow(&row));
|
||||
i++;
|
||||
}
|
||||
|
||||
EXPECT_EQ(i, 12);
|
||||
iter->Stop();
|
||||
}
|
||||
|
||||
/// Feature: Test WIDERFace dataset.
|
||||
/// Description: test WIDERFace getters.
|
||||
/// Expectation: the data is processed successfully.
|
||||
TEST_F(MindDataTestPipeline, TestWIDERFaceGetters) {
|
||||
MS_LOG(INFO) << "Doing MindDataTestPipeline-TestWIDERFaceGetters.";
|
||||
// Create a WIDERFace Dataset.
|
||||
std::string folder_path = datasets_root_path_ + "/testWIDERFace/";
|
||||
|
||||
std::shared_ptr<Dataset> ds = WIDERFace(folder_path);
|
||||
EXPECT_NE(ds, nullptr);
|
||||
|
||||
std::vector<std::string> column_names = {"image", "bbox", "blur", "expression",
|
||||
"illumination", "occlusion", "pose", "invalid"};
|
||||
EXPECT_EQ(ds->GetDatasetSize(), 4);
|
||||
EXPECT_EQ(ds->GetColumnNames(), column_names);
|
||||
}
|
||||
|
||||
/// Feature: Test WIDERFace dataset.
|
||||
/// Description: test WIDERFace usage error.
|
||||
/// Expectation: throw error messages when certain errors occur.
|
||||
TEST_F(MindDataTestPipeline, TestWIDERFaceWithUsageError) {
|
||||
MS_LOG(INFO) << "Doing MindDataTestPipeline-TestWIDERFaceWithNullSamplerFail.";
|
||||
// Create a WIDERFace Dataset.
|
||||
std::string folder_path = datasets_root_path_ + "/testWIDERFace/";
|
||||
|
||||
std::shared_ptr<Dataset> ds = WIDERFace(folder_path, "off");
|
||||
EXPECT_NE(ds, nullptr);
|
||||
|
||||
// Create an iterator over the result of the above dataset.
|
||||
std::shared_ptr<Iterator> iter = ds->CreateIterator();
|
||||
// Expect failure: invalid WIDERFace input, sampler cannot be nullptr.
|
||||
EXPECT_EQ(iter, nullptr);
|
||||
}
|
||||
|
||||
/// Feature: Test WIDERFace dataset.
|
||||
/// Description: test WIDERFace with SequentialSampler.
|
||||
/// Expectation: the data is processed successfully.
|
||||
TEST_F(MindDataTestPipeline, TestWIDERFaceSequentialSampler) {
|
||||
MS_LOG(INFO) << "Doing MindDataTestPipeline-TestWIDERFaceSequentialSampler.";
|
||||
|
||||
std::string folder_path = datasets_root_path_ + "/testWIDERFace/";
|
||||
// Create a WIDERFace Dataset.
|
||||
std::shared_ptr<Dataset> ds = WIDERFace(folder_path, "test", false, std::make_shared<SequentialSampler>(0, 1));
|
||||
EXPECT_NE(ds, nullptr);
|
||||
|
||||
// Create an iterator over the result of the above dataset.
|
||||
// This will trigger the creation of the Execution Tree and launch it.
|
||||
std::shared_ptr<Iterator> iter = ds->CreateIterator();
|
||||
EXPECT_NE(iter, nullptr);
|
||||
|
||||
// Iterate the dataset and get each row.
|
||||
std::unordered_map<std::string, mindspore::MSTensor> row;
|
||||
ASSERT_OK(iter->GetNextRow(&row));
|
||||
|
||||
uint64_t i = 0;
|
||||
while (row.size() != 0) {
|
||||
i++;
|
||||
auto image = row["image"];
|
||||
MS_LOG(INFO) << "Tensor image shape: " << image.Shape();
|
||||
ASSERT_OK(iter->GetNextRow(&row));
|
||||
}
|
||||
|
||||
EXPECT_EQ(i, 1);
|
||||
|
||||
// Manually terminate the pipeline.
|
||||
iter->Stop();
|
||||
}
|
||||
|
||||
/// Feature: Test WIDERFace dataset.
|
||||
/// Description: test WIDERFace with invalid nullptr sampler.
|
||||
/// Expectation: throw error messages when certain errors occur.
|
||||
TEST_F(MindDataTestPipeline, TestWIDERFaceWithNullSamplerError) {
|
||||
MS_LOG(INFO) << "Doing MindDataTestPipeline-TestWIDERFaceWithNullSamplerError.";
|
||||
|
||||
// Create a WIDERFace Dataset.
|
||||
std::string folder_path = datasets_root_path_ + "/testWIDERFace/";
|
||||
std::shared_ptr<Dataset> ds = WIDERFace(folder_path, "all", false, nullptr);
|
||||
EXPECT_NE(ds, nullptr);
|
||||
|
||||
// Create an iterator over the result of the above dataset.
|
||||
std::shared_ptr<Iterator> iter = ds->CreateIterator();
|
||||
// Expect failure: invalid WIDERFace input, sampler cannot be nullptr.
|
||||
EXPECT_EQ(iter, nullptr);
|
||||
}
|
||||
|
||||
/// Feature: Test WIDERFace dataset.
|
||||
/// Description: test WIDERFace error.
|
||||
/// Expectation: throw error messages when certain errors occur.
|
||||
TEST_F(MindDataTestPipeline, TestWIDERFaceError) {
|
||||
MS_LOG(INFO) << "Doing MindDataTestPipeline-TestWIDERFaceError.";
|
||||
|
||||
std::string folder_path = datasets_root_path_ + "/testWIDERFace/";
|
||||
// Create a WIDERFace Dataset with non-existing file.
|
||||
std::shared_ptr<Dataset> ds0 = WIDERFace("NotExistFile", "train");
|
||||
EXPECT_NE(ds0, nullptr);
|
||||
|
||||
// Create an iterator over the result of the above dataset.
|
||||
std::shared_ptr<Iterator> iter0 = ds0->CreateIterator();
|
||||
// Expect failure: invalid WIDERFace input.
|
||||
EXPECT_EQ(iter0, nullptr);
|
||||
|
||||
// Create a WIDERFace Dataset with invalid usage.
|
||||
std::shared_ptr<Dataset> ds1 = WIDERFace(folder_path, "invalid_usage");
|
||||
EXPECT_NE(ds1, nullptr);
|
||||
|
||||
// Create an iterator over the result of the above dataset.
|
||||
std::shared_ptr<Iterator> iter1 = ds1->CreateIterator();
|
||||
// Expect failure: invalid WIDERFace input.
|
||||
EXPECT_EQ(iter1, nullptr);
|
||||
|
||||
// Create a WIDERFace Dataset with invalid string.
|
||||
std::shared_ptr<Dataset> ds2 = WIDERFace(":*?\"<>|`&;'", "train");
|
||||
EXPECT_NE(ds2, nullptr);
|
||||
|
||||
// Create an iterator over the result of the above dataset.
|
||||
std::shared_ptr<Iterator> iter2 = ds2->CreateIterator();
|
||||
// Expect failure: invalid WIDERFace input.
|
||||
EXPECT_EQ(iter2, nullptr);
|
||||
}
|
After Width: | Height: | Size: 172 KiB |
After Width: | Height: | Size: 207 KiB |
After Width: | Height: | Size: 51 KiB |
After Width: | Height: | Size: 170 KiB |
After Width: | Height: | Size: 63 KiB |
After Width: | Height: | Size: 53 KiB |
After Width: | Height: | Size: 207 KiB |
|
@ -0,0 +1,3 @@
|
|||
0--Abs/0_Abs_mypic_1_111.jpg
|
||||
1--Pushup/1_Pushup_mypic_1_111.jpg
|
||||
1--Pushup/1_Pushup_mypic_7_777.jpg
|
|
@ -0,0 +1,26 @@
|
|||
0--Abs/0_Abs_mypic_4_444.jpg
|
||||
21
|
||||
78 221 7 8 2 0 0 0 0 0
|
||||
78 238 14 17 2 0 0 0 0 0
|
||||
113 212 11 15 2 0 0 0 0 0
|
||||
134 260 15 15 2 0 0 0 0 0
|
||||
163 250 14 17 2 0 0 0 0 0
|
||||
201 218 10 12 2 0 0 0 0 0
|
||||
182 266 15 17 2 0 0 0 0 0
|
||||
245 279 18 15 2 0 0 0 0 0
|
||||
304 265 16 17 2 0 0 0 2 1
|
||||
328 295 16 20 2 0 0 0 0 0
|
||||
389 281 17 19 2 0 0 0 2 0
|
||||
406 293 21 21 2 0 1 0 0 0
|
||||
436 290 22 17 2 0 0 0 0 0
|
||||
522 328 21 18 2 0 1 0 0 0
|
||||
643 320 23 22 2 0 0 0 0 0
|
||||
653 224 17 25 2 0 0 0 0 0
|
||||
793 337 23 30 2 0 0 0 0 0
|
||||
535 311 16 17 2 0 0 0 1 0
|
||||
29 220 11 15 2 0 0 0 0 0
|
||||
3 232 11 15 2 0 0 0 2 0
|
||||
20 215 12 16 2 0 0 0 2 0
|
||||
1--Pushup/1_Pushup_mypic_3_333.jpg
|
||||
1
|
||||
467 95 112 150 0 0 0 0 0 0
|
|
@ -0,0 +1,12 @@
|
|||
0--Abs/0_Abs_mypic_5_555.jpg
|
||||
5
|
||||
111 425 122 127 0 1 0 0 0 1
|
||||
209 347 70 103 0 1 0 0 0 0
|
||||
368 252 89 133 0 1 0 0 0 0
|
||||
555 282 89 100 0 1 0 0 0 1
|
||||
707 252 92 133 0 1 0 0 0 0
|
||||
1--Pushup/1_Pushup_mypic_4_444.jpg
|
||||
3
|
||||
111 425 122 127 0 1 0 0 0 1
|
||||
209 347 70 103 0 1 0 0 0 0
|
||||
707 252 92 133 0 1 0 0 0 0
|
|
@ -0,0 +1,290 @@
|
|||
# Copyright 2021 Huawei Technologies Co., Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ==============================================================================
|
||||
import pytest
|
||||
import numpy as np
|
||||
|
||||
import mindspore.dataset as ds
|
||||
import mindspore.dataset.vision.c_transforms as vision
|
||||
import mindspore.log as logger
|
||||
|
||||
DATA_DIR = "../data/dataset/testWIDERFace/"
|
||||
|
||||
|
||||
def test_wider_face_basic():
|
||||
"""
|
||||
Feature: WIDERFace dataset
|
||||
Description: Read all files
|
||||
Expectation: Throw number of data in all files
|
||||
"""
|
||||
logger.info("Test WIDERFaceDataset Op")
|
||||
|
||||
# case 1: test loading default usage dataset
|
||||
data1 = ds.WIDERFaceDataset(DATA_DIR)
|
||||
num_iter1 = 0
|
||||
for _ in data1.create_dict_iterator(num_epochs=1, output_numpy=True):
|
||||
num_iter1 += 1
|
||||
assert num_iter1 == 4
|
||||
|
||||
# case 2: test num_samples
|
||||
data2 = ds.WIDERFaceDataset(DATA_DIR, num_samples=1)
|
||||
num_iter2 = 0
|
||||
for _ in data2.create_dict_iterator(num_epochs=1, output_numpy=True):
|
||||
num_iter2 += 1
|
||||
assert num_iter2 == 1
|
||||
|
||||
# case 3: test repeat
|
||||
data3 = ds.WIDERFaceDataset(DATA_DIR, num_samples=2)
|
||||
data3 = data3.repeat(5)
|
||||
num_iter3 = 0
|
||||
for _ in data3.create_dict_iterator(num_epochs=1, output_numpy=True):
|
||||
num_iter3 += 1
|
||||
assert num_iter3 == 10
|
||||
|
||||
|
||||
def test_wider_face_noshuffle():
|
||||
"""
|
||||
Feature: WIDERFace dataset
|
||||
Description: Test noshuffle
|
||||
Expectation: Throw number of data in all files
|
||||
"""
|
||||
logger.info("Test Case noShuffle")
|
||||
# define parameters
|
||||
repeat_count = 1
|
||||
|
||||
# apply dataset operations
|
||||
# Note: "all" reads both "train" dataset (2 samples) and "valid" dataset (2 samples)
|
||||
data1 = ds.WIDERFaceDataset(DATA_DIR, shuffle=False)
|
||||
data1 = data1.repeat(repeat_count)
|
||||
|
||||
num_iter = 0
|
||||
# each data is a dictionary
|
||||
for _ in data1.create_dict_iterator(num_epochs=1, output_numpy=True):
|
||||
num_iter += 1
|
||||
|
||||
assert num_iter == 4
|
||||
|
||||
|
||||
def test_wider_face_usage():
|
||||
"""
|
||||
Feature: WIDERFace dataset
|
||||
Description: Test Usage
|
||||
Expectation: Throw number of data in all files
|
||||
"""
|
||||
logger.info("Test WIDERFaceDataset usage flag")
|
||||
|
||||
def test_config(usage, wider_face_path=DATA_DIR):
|
||||
try:
|
||||
data = ds.WIDERFaceDataset(wider_face_path, usage=usage)
|
||||
num_rows = 0
|
||||
for _ in data.create_dict_iterator(num_epochs=1, output_numpy=True):
|
||||
num_rows += 1
|
||||
except (ValueError, TypeError, RuntimeError) as e:
|
||||
return str(e)
|
||||
return num_rows
|
||||
|
||||
# test the usage of WIDERFacce
|
||||
assert test_config("test") == 3
|
||||
assert test_config("train") == 2
|
||||
assert test_config("valid") == 2
|
||||
assert test_config("all") == 4
|
||||
assert "usage is not within the valid set of ['train', 'test', 'valid', 'all']" in test_config(
|
||||
"invalid")
|
||||
|
||||
# change to the folder that contains all WIDERFacce files
|
||||
all_wider_face = None
|
||||
if all_wider_face is not None:
|
||||
assert test_config("test", all_wider_face) == 16097
|
||||
assert test_config("valid", all_wider_face) == 3226
|
||||
assert test_config("train", all_wider_face) == 12880
|
||||
assert test_config("all", all_wider_face) == 16106
|
||||
assert ds.WIDERFaceDataset(all_wider_face, usage="test").get_dataset_size() == 16097
|
||||
assert ds.WIDERFaceDataset(all_wider_face, usage="valid").get_dataset_size() == 3226
|
||||
assert ds.WIDERFaceDataset(all_wider_face, usage="train").get_dataset_size() == 12880
|
||||
assert ds.WIDERFaceDataset(all_wider_face, usage="all").get_dataset_size() == 16106
|
||||
|
||||
|
||||
def test_wider_face_sequential_sampler():
|
||||
"""
|
||||
Feature: WIDERFace dataset
|
||||
Description: test SequentialSampler
|
||||
Expectation: get correct number of data
|
||||
"""
|
||||
num_samples = 1
|
||||
sampler = ds.SequentialSampler(num_samples=num_samples)
|
||||
data1 = ds.WIDERFaceDataset(DATA_DIR, 'test', sampler=sampler)
|
||||
data2 = ds.WIDERFaceDataset(DATA_DIR, 'test', shuffle=False, num_samples=num_samples)
|
||||
matches_list1, matches_list2 = [], []
|
||||
num_iter = 0
|
||||
for item1, item2 in zip(data1.create_dict_iterator(num_epochs=1), data2.create_dict_iterator(num_epochs=1)):
|
||||
matches_list1.append(item1["image"].asnumpy())
|
||||
matches_list2.append(item2["image"].asnumpy())
|
||||
num_iter += 1
|
||||
np.testing.assert_array_equal(matches_list1, matches_list2)
|
||||
assert num_iter == num_samples
|
||||
|
||||
|
||||
def test_wider_face_pipeline():
|
||||
"""
|
||||
Feature: Pipeline test
|
||||
Description: Read a sample
|
||||
Expectation: The amount of each function are equal
|
||||
"""
|
||||
dataset = ds.WIDERFaceDataset(DATA_DIR, "valid", num_samples=1, decode=True)
|
||||
resize_op = vision.Resize((100, 100))
|
||||
dataset = dataset.map(input_columns=["image"], operations=resize_op)
|
||||
num_iter = 0
|
||||
for _ in dataset.create_dict_iterator(num_epochs=1, output_numpy=True):
|
||||
num_iter += 1
|
||||
assert num_iter == 1
|
||||
|
||||
|
||||
def test_wider_face_exception():
|
||||
"""
|
||||
Feature: WIDERFace dataset
|
||||
Description: Throw error messages when certain errors occur
|
||||
Expectation: Error message
|
||||
"""
|
||||
logger.info("Test error cases for WIDERFaceDataset")
|
||||
error_msg_1 = "sampler and shuffle cannot be specified at the same time"
|
||||
with pytest.raises(RuntimeError, match=error_msg_1):
|
||||
ds.WIDERFaceDataset(DATA_DIR, shuffle=False, sampler=ds.PKSampler(3))
|
||||
|
||||
error_msg_2 = "sampler and sharding cannot be specified at the same time"
|
||||
with pytest.raises(RuntimeError, match=error_msg_2):
|
||||
ds.WIDERFaceDataset(DATA_DIR, sampler=ds.PKSampler(
|
||||
3), num_shards=2, shard_id=0)
|
||||
|
||||
error_msg_3 = "num_shards is specified and currently requires shard_id as well"
|
||||
with pytest.raises(RuntimeError, match=error_msg_3):
|
||||
ds.WIDERFaceDataset(DATA_DIR, num_shards=10)
|
||||
|
||||
error_msg_4 = "shard_id is specified but num_shards is not"
|
||||
with pytest.raises(RuntimeError, match=error_msg_4):
|
||||
ds.WIDERFaceDataset(DATA_DIR, shard_id=0)
|
||||
|
||||
error_msg_5 = "Input shard_id is not within the required interval"
|
||||
with pytest.raises(ValueError, match=error_msg_5):
|
||||
ds.WIDERFaceDataset(DATA_DIR, num_shards=5, shard_id=-1)
|
||||
with pytest.raises(ValueError, match=error_msg_5):
|
||||
ds.WIDERFaceDataset(DATA_DIR, num_shards=5, shard_id=5)
|
||||
with pytest.raises(ValueError, match=error_msg_5):
|
||||
ds.WIDERFaceDataset(DATA_DIR, num_shards=2, shard_id=5)
|
||||
|
||||
error_msg_6 = "num_parallel_workers exceeds"
|
||||
with pytest.raises(ValueError, match=error_msg_6):
|
||||
ds.WIDERFaceDataset(DATA_DIR, shuffle=False, num_parallel_workers=0)
|
||||
with pytest.raises(ValueError, match=error_msg_6):
|
||||
ds.WIDERFaceDataset(DATA_DIR, shuffle=False, num_parallel_workers=256)
|
||||
with pytest.raises(ValueError, match=error_msg_6):
|
||||
ds.WIDERFaceDataset(DATA_DIR, shuffle=False, num_parallel_workers=-2)
|
||||
|
||||
error_msg_7 = "Argument shard_id"
|
||||
with pytest.raises(TypeError, match=error_msg_7):
|
||||
ds.WIDERFaceDataset(DATA_DIR, num_shards=2, shard_id="0")
|
||||
|
||||
def exception_func(item):
|
||||
raise Exception("Error occur!")
|
||||
|
||||
# usage = test
|
||||
try:
|
||||
data = ds.WIDERFaceDataset(DATA_DIR, usage="test", shuffle=False)
|
||||
data = data.map(operations=exception_func, input_columns=["image"], num_parallel_workers=1)
|
||||
for _ in data.create_dict_iterator(num_epochs=1, output_numpy=True):
|
||||
pass
|
||||
assert False
|
||||
except RuntimeError as e:
|
||||
assert "map operation: [PyFunc] failed. The corresponding data files" in str(e)
|
||||
|
||||
# usage = all
|
||||
try:
|
||||
data = ds.WIDERFaceDataset(DATA_DIR, usage="all", shuffle=False)
|
||||
data = data.map(operations=exception_func, input_columns=["image"], num_parallel_workers=1)
|
||||
for _ in data.create_dict_iterator(num_epochs=1, output_numpy=True):
|
||||
pass
|
||||
assert False
|
||||
except RuntimeError as e:
|
||||
assert "map operation: [PyFunc] failed. The corresponding data files" in str(e)
|
||||
|
||||
try:
|
||||
data = ds.WIDERFaceDataset(DATA_DIR, shuffle=False)
|
||||
data = data.map(operations=exception_func, input_columns=["bbox"], num_parallel_workers=1)
|
||||
for _ in data.create_dict_iterator(num_epochs=1, output_numpy=True):
|
||||
pass
|
||||
assert False
|
||||
except RuntimeError as e:
|
||||
assert "map operation: [PyFunc] failed. The corresponding data files" in str(e)
|
||||
|
||||
try:
|
||||
data = ds.WIDERFaceDataset(DATA_DIR, shuffle=False)
|
||||
data = data.map(operations=exception_func, input_columns=["blur"], num_parallel_workers=1)
|
||||
for _ in data.create_dict_iterator(num_epochs=1, output_numpy=True):
|
||||
pass
|
||||
assert False
|
||||
except RuntimeError as e:
|
||||
assert "map operation: [PyFunc] failed. The corresponding data files" in str(e)
|
||||
|
||||
try:
|
||||
data = ds.WIDERFaceDataset(DATA_DIR, shuffle=False)
|
||||
data = data.map(operations=exception_func, input_columns=["expression"], num_parallel_workers=1)
|
||||
for _ in data.create_dict_iterator(num_epochs=1, output_numpy=True):
|
||||
pass
|
||||
assert False
|
||||
except RuntimeError as e:
|
||||
assert "map operation: [PyFunc] failed. The corresponding data files" in str(e)
|
||||
|
||||
try:
|
||||
data = ds.WIDERFaceDataset(DATA_DIR, shuffle=False)
|
||||
data = data.map(operations=exception_func, input_columns=["illumination"], num_parallel_workers=1)
|
||||
for _ in data.create_dict_iterator(num_epochs=1, output_numpy=True):
|
||||
pass
|
||||
assert False
|
||||
except RuntimeError as e:
|
||||
assert "map operation: [PyFunc] failed. The corresponding data files" in str(e)
|
||||
|
||||
try:
|
||||
data = ds.WIDERFaceDataset(DATA_DIR, shuffle=False)
|
||||
data = data.map(operations=exception_func, input_columns=["occlusion"], num_parallel_workers=1)
|
||||
for _ in data.create_dict_iterator(num_epochs=1, output_numpy=True):
|
||||
pass
|
||||
assert False
|
||||
except RuntimeError as e:
|
||||
assert "map operation: [PyFunc] failed. The corresponding data files" in str(e)
|
||||
|
||||
try:
|
||||
data = ds.WIDERFaceDataset(DATA_DIR, shuffle=False)
|
||||
data = data.map(operations=exception_func, input_columns=["pose"], num_parallel_workers=1)
|
||||
for _ in data.create_dict_iterator(num_epochs=1, output_numpy=True):
|
||||
pass
|
||||
assert False
|
||||
except RuntimeError as e:
|
||||
assert "map operation: [PyFunc] failed. The corresponding data files" in str(e)
|
||||
|
||||
try:
|
||||
data = ds.WIDERFaceDataset(DATA_DIR, shuffle=False)
|
||||
data = data.map(operations=exception_func, input_columns=["invalid"], num_parallel_workers=1)
|
||||
for _ in data.create_dict_iterator(num_epochs=1, output_numpy=True):
|
||||
pass
|
||||
assert False
|
||||
except RuntimeError as e:
|
||||
assert "map operation: [PyFunc] failed. The corresponding data files" in str(e)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
test_wider_face_basic()
|
||||
test_wider_face_sequential_sampler()
|
||||
test_wider_face_noshuffle()
|
||||
test_wider_face_usage()
|
||||
test_wider_face_pipeline()
|
||||
test_wider_face_exception()
|