Add API class for data transform ops except Compose, RandomApply, RandomChoice

Add API class for text transforms ops
This commit is contained in:
TinaMengtingZhang 2021-02-11 15:20:11 -05:00
parent 7443ea4b5b
commit c5aa3eeef2
18 changed files with 1198 additions and 550 deletions

View File

@ -483,6 +483,7 @@ FilterDataset::FilterDataset(std::shared_ptr<Dataset> input, std::function<Tenso
}
#endif
// FIXME - Should be removed once all Tensor op API class has been added
MapDataset::MapDataset(std::shared_ptr<Dataset> input, std::vector<std::shared_ptr<TensorOperation>> operations,
const std::vector<std::string> &input_columns, const std::vector<std::string> &output_columns,
const std::vector<std::string> &project_columns, const std::shared_ptr<DatasetCache> &cache,

View File

@ -18,6 +18,8 @@
#include "minddata/dataset/include/text.h"
#include "minddata/dataset/text/ir/kernels/text_ir.h"
namespace mindspore {
namespace dataset {
@ -28,126 +30,179 @@ namespace text {
// (In alphabetical order)
#ifndef _WIN32
std::shared_ptr<BasicTokenizerOperation> BasicTokenizer(bool lower_case, bool keep_whitespace,
const NormalizeForm normalize_form, bool preserve_unused_token,
bool with_offsets) {
auto op = std::make_shared<BasicTokenizerOperation>(lower_case, keep_whitespace, normalize_form,
preserve_unused_token, with_offsets);
// BasicTokenizer
BasicTokenizer::BasicTokenizer(bool lower_case, bool keep_whitespace, const NormalizeForm normalize_form,
bool preserve_unused_token, bool with_offsets)
: lower_case_(lower_case),
keep_whitespace_(keep_whitespace),
normalize_form_(normalize_form),
preserve_unused_token_(preserve_unused_token),
with_offsets_(with_offsets) {}
return op->ValidateParams() ? op : nullptr;
std::shared_ptr<TensorOperation> BasicTokenizer::Parse() {
return std::make_shared<BasicTokenizerOperation>(lower_case_, keep_whitespace_, normalize_form_,
preserve_unused_token_, with_offsets_);
}
std::shared_ptr<BertTokenizerOperation> BertTokenizer(const std::shared_ptr<Vocab> &vocab,
const std::string &suffix_indicator, int32_t max_bytes_per_token,
const std::string &unknown_token, bool lower_case,
bool keep_whitespace, const NormalizeForm normalize_form,
bool preserve_unused_token, bool with_offsets) {
auto op =
std::make_shared<BertTokenizerOperation>(vocab, suffix_indicator, max_bytes_per_token, unknown_token, lower_case,
keep_whitespace, normalize_form, preserve_unused_token, with_offsets);
// BertTokenizer
BertTokenizer::BertTokenizer(const std::shared_ptr<Vocab> &vocab, const std::string &suffix_indicator,
int32_t max_bytes_per_token, const std::string &unknown_token, bool lower_case,
bool keep_whitespace, const NormalizeForm normalize_form, bool preserve_unused_token,
bool with_offsets)
: vocab_(vocab),
suffix_indicator_(suffix_indicator),
max_bytes_per_token_(max_bytes_per_token),
unknown_token_(unknown_token),
lower_case_(lower_case),
keep_whitespace_(keep_whitespace),
normalize_form_(normalize_form),
preserve_unused_token_(preserve_unused_token),
with_offsets_(with_offsets) {}
return op->ValidateParams() ? op : nullptr;
std::shared_ptr<TensorOperation> BertTokenizer::Parse() {
return std::make_shared<BertTokenizerOperation>(vocab_, suffix_indicator_, max_bytes_per_token_, unknown_token_,
lower_case_, keep_whitespace_, normalize_form_,
preserve_unused_token_, with_offsets_);
}
std::shared_ptr<CaseFoldOperation> CaseFold() {
auto op = std::make_shared<CaseFoldOperation>();
// CaseFold
CaseFold::CaseFold() {}
return op->ValidateParams() ? op : nullptr;
}
std::shared_ptr<TensorOperation> CaseFold::Parse() { return std::make_shared<CaseFoldOperation>(); }
#endif
std::shared_ptr<JiebaTokenizerOperation> JiebaTokenizer(const std::string &hmm_path, const std::string &mp_path,
const JiebaMode &mode, bool with_offsets) {
auto op = std::make_shared<JiebaTokenizerOperation>(hmm_path, mp_path, mode, with_offsets);
// JiebaTokenizer
JiebaTokenizer::JiebaTokenizer(const std::string &hmm_path, const std::string &mp_path, const JiebaMode &mode,
bool with_offsets)
: hmm_path_(hmm_path), mp_path_(mp_path), mode_(mode), with_offsets_(with_offsets) {}
return op->ValidateParams() ? op : nullptr;
std::shared_ptr<TensorOperation> JiebaTokenizer::Parse() {
std::shared_ptr<JiebaTokenizerOperation> jieba_tokenizer =
std::make_shared<JiebaTokenizerOperation>(hmm_path_, mp_path_, mode_, with_offsets_);
for (auto &word : words_list_) {
Status rc = jieba_tokenizer->AddWord(word.first, word.second);
if (rc.IsError()) {
MS_LOG(ERROR) << rc;
return {};
}
}
return jieba_tokenizer;
}
std::shared_ptr<LookupOperation> Lookup(const std::shared_ptr<Vocab> &vocab,
const std::optional<std::string> &unknown_token, const std::string &data_type) {
auto op = std::make_shared<LookupOperation>(vocab, unknown_token, data_type);
return op->ValidateParams() ? op : nullptr;
Status JiebaTokenizer::AddWord(const std::string &word, int64_t freq) {
if (word.empty()) {
std::string err_msg = "JiebaTokenizer : The parameter word is empty or not provided.";
MS_LOG(ERROR) << err_msg;
RETURN_STATUS_SYNTAX_ERROR(err_msg);
}
if (freq < 0) {
std::string err_msg = "JiebaTokenizer : The parameter freq must be greater than or equal to 0.";
MS_LOG(ERROR) << err_msg;
RETURN_STATUS_SYNTAX_ERROR(err_msg);
}
words_list_.emplace_back(word, freq);
return Status::OK();
}
std::shared_ptr<NgramOperation> Ngram(const std::vector<int32_t> &ngrams,
const std::pair<std::string, int32_t> &left_pad,
const std::pair<std::string, int32_t> &right_pad, const std::string &separator) {
auto op = std::make_shared<NgramOperation>(ngrams, left_pad, right_pad, separator);
// Lookup
Lookup::Lookup(const std::shared_ptr<Vocab> &vocab, const std::optional<std::string> &unknown_token,
const std::string &data_type)
: vocab_(vocab), unknown_token_(unknown_token), data_type_(data_type) {}
return op->ValidateParams() ? op : nullptr;
std::shared_ptr<TensorOperation> Lookup::Parse() {
return std::make_shared<LookupOperation>(vocab_, unknown_token_, data_type_);
}
// Ngram
Ngram::Ngram(const std::vector<int32_t> &ngrams, const std::pair<std::string, int32_t> &left_pad,
const std::pair<std::string, int32_t> &right_pad, const std::string &separator)
: ngrams_(ngrams), left_pad_(left_pad), right_pad_(right_pad), separator_(separator) {}
std::shared_ptr<TensorOperation> Ngram::Parse() {
return std::make_shared<NgramOperation>(ngrams_, left_pad_, right_pad_, separator_);
}
#ifndef _WIN32
std::shared_ptr<NormalizeUTF8Operation> NormalizeUTF8(NormalizeForm normalize_form) {
auto op = std::make_shared<NormalizeUTF8Operation>(normalize_form);
// NormalizeUTF8
NormalizeUTF8::NormalizeUTF8(NormalizeForm normalize_form) : normalize_form_(normalize_form) {}
return op->ValidateParams() ? op : nullptr;
std::shared_ptr<TensorOperation> NormalizeUTF8::Parse() {
return std::make_shared<NormalizeUTF8Operation>(normalize_form_);
}
std::shared_ptr<RegexReplaceOperation> RegexReplace(std::string pattern, std::string replace, bool replace_all) {
auto op = std::make_shared<RegexReplaceOperation>(pattern, replace, replace_all);
// RegexReplace
RegexReplace::RegexReplace(std::string pattern, std::string replace, bool replace_all)
: pattern_(pattern), replace_(replace), replace_all_(replace_all) {}
return op->ValidateParams() ? op : nullptr;
std::shared_ptr<TensorOperation> RegexReplace::Parse() {
return std::make_shared<RegexReplaceOperation>(pattern_, replace_, replace_all_);
}
std::shared_ptr<RegexTokenizerOperation> RegexTokenizer(std::string delim_pattern, std::string keep_delim_pattern,
bool with_offsets) {
auto op = std::make_shared<RegexTokenizerOperation>(delim_pattern, keep_delim_pattern, with_offsets);
// RegexTokenizer
RegexTokenizer::RegexTokenizer(std::string delim_pattern, std::string keep_delim_pattern, bool with_offsets)
: delim_pattern_(delim_pattern), keep_delim_pattern_(keep_delim_pattern), with_offsets_(with_offsets) {}
return op->ValidateParams() ? op : nullptr;
std::shared_ptr<TensorOperation> RegexTokenizer::Parse() {
return std::make_shared<RegexTokenizerOperation>(delim_pattern_, keep_delim_pattern_, with_offsets_);
}
#endif
std::shared_ptr<SentencePieceTokenizerOperation> SentencePieceTokenizer(
const std::shared_ptr<SentencePieceVocab> &vocab, SPieceTokenizerOutType out_type) {
auto op = std::make_shared<SentencePieceTokenizerOperation>(vocab, out_type);
// SentencePieceTokenizer
SentencePieceTokenizer::SentencePieceTokenizer(const std::shared_ptr<SentencePieceVocab> &vocab,
SPieceTokenizerOutType out_type)
: vocab_(vocab), out_type_(out_type) {}
return op->ValidateParams() ? op : nullptr;
SentencePieceTokenizer::SentencePieceTokenizer(const std::string &vocab_path, SPieceTokenizerOutType out_type)
: vocab_path_(vocab_path), out_type_(out_type) {}
std::shared_ptr<TensorOperation> SentencePieceTokenizer::Parse() {
if (vocab_ != nullptr) {
return std::make_shared<SentencePieceTokenizerOperation>(vocab_, out_type_);
} else {
return std::make_shared<SentencePieceTokenizerOperation>(vocab_path_, out_type_);
}
}
std::shared_ptr<SentencePieceTokenizerOperation> SentencePieceTokenizer(const std::string &vocab_path,
SPieceTokenizerOutType out_type) {
auto op = std::make_shared<SentencePieceTokenizerOperation>(vocab_path, out_type);
// SlidingWindow
SlidingWindow::SlidingWindow(const int32_t width, const int32_t axis) : width_(width), axis_(axis) {}
return op->ValidateParams() ? op : nullptr;
std::shared_ptr<TensorOperation> SlidingWindow::Parse() {
return std::make_shared<SlidingWindowOperation>(width_, axis_);
}
std::shared_ptr<SlidingWindowOperation> SlidingWindow(const int32_t width, const int32_t axis) {
auto op = std::make_shared<SlidingWindowOperation>(width, axis);
// ToNumber
ToNumber::ToNumber(const std::string &data_type) : data_type_(data_type) {}
return op->ValidateParams() ? op : nullptr;
std::shared_ptr<TensorOperation> ToNumber::Parse() { return std::make_shared<ToNumberOperation>(data_type_); }
// TruncateSequencePair
TruncateSequencePair::TruncateSequencePair(int32_t max_length) : max_length_(max_length) {}
std::shared_ptr<TensorOperation> TruncateSequencePair::Parse() {
return std::make_shared<TruncateSequencePairOperation>(max_length_);
}
std::shared_ptr<ToNumberOperation> ToNumber(const std::string &data_type) {
auto op = std::make_shared<ToNumberOperation>(data_type);
// UnicodeCharTokenizer
UnicodeCharTokenizer::UnicodeCharTokenizer(bool with_offsets) : with_offsets_(with_offsets) {}
return op->ValidateParams() ? op : nullptr;
}
std::shared_ptr<TruncateSequencePairOperation> TruncateSequencePair(int32_t max_length) {
auto op = std::make_shared<TruncateSequencePairOperation>(max_length);
return op->ValidateParams() ? op : nullptr;
}
std::shared_ptr<UnicodeCharTokenizerOperation> UnicodeCharTokenizer(bool with_offsets) {
auto op = std::make_shared<UnicodeCharTokenizerOperation>(with_offsets);
return op->ValidateParams() ? op : nullptr;
std::shared_ptr<TensorOperation> UnicodeCharTokenizer::Parse() {
return std::make_shared<UnicodeCharTokenizerOperation>(with_offsets_);
}
#ifndef _WIN32
std::shared_ptr<UnicodeScriptTokenizerOperation> UnicodeScriptTokenizer(bool keep_whitespace, bool with_offsets) {
auto op = std::make_shared<UnicodeScriptTokenizerOperation>(keep_whitespace, with_offsets);
// UnicodeScriptTokenizer
UnicodeScriptTokenizer::UnicodeScriptTokenizer(bool keep_whitespace, bool with_offsets)
: keep_whitespace_(keep_whitespace), with_offsets_(with_offsets) {}
return op->ValidateParams() ? op : nullptr;
std::shared_ptr<TensorOperation> UnicodeScriptTokenizer::Parse() {
return std::make_shared<UnicodeScriptTokenizerOperation>(keep_whitespace_, with_offsets_);
}
std::shared_ptr<WhitespaceTokenizerOperation> WhitespaceTokenizer(bool with_offsets) {
auto op = std::make_shared<WhitespaceTokenizerOperation>(with_offsets);
// WhitespaceTokenizer
WhitespaceTokenizer::WhitespaceTokenizer(bool with_offsets) : with_offsets_(with_offsets) {}
return op->ValidateParams() ? op : nullptr;
std::shared_ptr<TensorOperation> WhitespaceTokenizer::Parse() {
return std::make_shared<WhitespaceTokenizerOperation>(with_offsets_);
}
#endif
} // namespace text

View File

@ -32,19 +32,15 @@ std::shared_ptr<ComposeOperation> Compose(const std::vector<std::shared_ptr<Tens
return op->ValidateParams() ? op : nullptr;
}
// Function to create DuplicateOperation.
std::shared_ptr<DuplicateOperation> Duplicate() {
auto op = std::make_shared<DuplicateOperation>();
// Input validation
return op->ValidateParams() ? op : nullptr;
}
// Constructor to Duplicate
Duplicate::Duplicate() {}
// Function to create OneHotOperation.
std::shared_ptr<OneHotOperation> OneHot(int32_t num_classes) {
auto op = std::make_shared<OneHotOperation>(num_classes);
// Input validation
return op->ValidateParams() ? op : nullptr;
}
std::shared_ptr<TensorOperation> Duplicate::Parse() { return std::make_shared<DuplicateOperation>(); }
// Constructor to OneHot
OneHot::OneHot(int32_t num_classes) : num_classes_(num_classes) {}
std::shared_ptr<TensorOperation> OneHot::Parse() { return std::make_shared<OneHotOperation>(num_classes_); }
// Function to create RandomApplyOperation.
std::shared_ptr<RandomApplyOperation> RandomApply(const std::vector<std::shared_ptr<TensorOperation>> &transforms,
@ -61,20 +57,16 @@ std::shared_ptr<RandomChoiceOperation> RandomChoice(const std::vector<std::share
return op->ValidateParams() ? op : nullptr;
}
// Function to create TypeCastOperation.
std::shared_ptr<TypeCastOperation> TypeCast(std::string data_type) {
auto op = std::make_shared<TypeCastOperation>(data_type);
// Input validation
return op->ValidateParams() ? op : nullptr;
}
// Constructor to TypeCast
TypeCast::TypeCast(std::string data_type) : data_type_(data_type) {}
std::shared_ptr<TensorOperation> TypeCast::Parse() { return std::make_shared<TypeCastOperation>(data_type_); }
#ifndef ENABLE_ANDROID
// Function to create UniqueOperation.
std::shared_ptr<UniqueOperation> Unique() {
auto op = std::make_shared<UniqueOperation>();
// Input validation
return op->ValidateParams() ? op : nullptr;
}
// Constructor to Unique
Unique::Unique() {}
std::shared_ptr<TensorOperation> Unique::Parse() { return std::make_shared<UniqueOperation>(); }
#endif
} // namespace transforms
} // namespace dataset

View File

@ -19,6 +19,8 @@
#include <sys/stat.h>
#include <unistd.h>
#include <algorithm>
#include <map>
#include <memory>
#include <set>
@ -303,6 +305,33 @@ class Dataset : public std::enable_shared_from_this<Dataset> {
cache, callbacks);
}
std::shared_ptr<MapDataset> Map(std::vector<std::shared_ptr<TensorTransform>> operations,
const std::vector<std::string> &input_columns = {},
const std::vector<std::string> &output_columns = {},
const std::vector<std::string> &project_columns = {},
const std::shared_ptr<DatasetCache> &cache = nullptr,
std::vector<std::shared_ptr<DSCallback>> callbacks = {}) {
std::vector<std::shared_ptr<TensorOperation>> transform_ops;
(void)std::transform(
operations.begin(), operations.end(), std::back_inserter(transform_ops),
[](std::shared_ptr<TensorTransform> op) -> std::shared_ptr<TensorOperation> { return op->Parse(); });
return std::make_shared<MapDataset>(shared_from_this(), transform_ops, input_columns, output_columns,
project_columns, cache, callbacks);
}
std::shared_ptr<MapDataset> Map(const std::vector<std::reference_wrapper<TensorTransform>> operations,
const std::vector<std::string> &input_columns = {},
const std::vector<std::string> &output_columns = {},
const std::vector<std::string> &project_columns = {},
const std::shared_ptr<DatasetCache> &cache = nullptr,
std::vector<std::shared_ptr<DSCallback>> callbacks = {}) {
std::vector<std::shared_ptr<TensorOperation>> transform_ops;
(void)std::transform(operations.begin(), operations.end(), std::back_inserter(transform_ops),
[](TensorTransform &op) -> std::shared_ptr<TensorOperation> { return op.Parse(); });
return std::make_shared<MapDataset>(shared_from_this(), transform_ops, input_columns, output_columns,
project_columns, cache, callbacks);
}
/// \brief Function to create a Project Dataset
/// \notes Applies project to the dataset
/// \param[in] columns The name of columns to project

View File

@ -27,218 +27,419 @@
#include "minddata/dataset/include/constants.h"
#include "minddata/dataset/include/transforms.h"
// FIXME - This internal IR header will be removed when external API classes are provided
#include "minddata/dataset/text/ir/kernels/text_ir.h"
namespace mindspore {
namespace dataset {
class Vocab;
class SentencePieceVocab;
class TensorOperation;
// Transform operations for text
namespace text {
// Text Op classes (in alphabetical order)
#ifndef _WIN32
class BasicTokenizerOperation;
class BertTokenizerOperation;
class CaseFoldOperation;
#endif
class JiebaTokenizerOperation;
class LookupOperation;
class NgramOperation;
#ifndef _WIN32
class NormalizeUTF8Operation;
class RegexReplaceOperation;
class RegexTokenizerOperation;
#endif
class SentencePieceTokenizerOperation;
class SlidingWindowOperation;
class ToNumberOperation;
class TruncateSequencePairOperation;
class UnicodeCharTokenizerOperation;
#ifndef _WIN32
class UnicodeScriptTokenizerOperation;
class WhitespaceTokenizerOperation;
#endif
#ifndef _WIN32
/// \brief Tokenize a scalar tensor of UTF-8 string by specific rules.
/// \notes BasicTokenizer is not supported on Windows platform yet.
/// \param[in] lower_case If true, apply CaseFold, NormalizeUTF8(NFD mode), RegexReplace operation on input text to
/// fold the text to lower case and strip accents characters. If false, only apply NormalizeUTF8('normalization_form'
/// mode) operation on input text (default=false).
/// \param[in] keep_whitespace If true, the whitespace will be kept in out tokens (default=false).
/// \param[in] normalize_form Used to specify a specific normalize mode. This is only effective when 'lower_case' is
/// false. See NormalizeUTF8 for details (default=NormalizeForm::kNone).
/// \param[in] preserve_unused_token If true, do not split special tokens like '[CLS]', '[SEP]', '[UNK]', '[PAD]',
/// '[MASK]' (default=true).
/// \param[in] with_offsets If or not output offsets of tokens (default=false).
/// \return Shared pointer to the current TensorOperation.
std::shared_ptr<BasicTokenizerOperation> BasicTokenizer(bool lower_case = false, bool keep_whitespace = false,
const NormalizeForm normalize_form = NormalizeForm::kNone,
bool preserve_unused_token = true, bool with_offsets = false);
class BasicTokenizer : public TensorTransform {
public:
/// \brief Constructor.
/// \param[in] lower_case If true, apply CaseFold, NormalizeUTF8(NFD mode), RegexReplace operation on input text to
/// fold the text to lower case and strip accents characters. If false, only apply
/// NormalizeUTF8('normalization_form' mode) operation on input text (default=false).
/// \param[in] keep_whitespace If true, the whitespace will be kept in out tokens (default=false).
/// \param[in] normalize_form Used to specify a specific normalize mode. This is only effective when 'lower_case' is
/// false. See NormalizeUTF8 for details (default=NormalizeForm::kNone).
/// \param[in] preserve_unused_token If true, do not split special tokens like '[CLS]', '[SEP]', '[UNK]', '[PAD]',
/// '[MASK]' (default=true).
/// \param[in] with_offsets If or not output offsets of tokens (default=false).
explicit BasicTokenizer(bool lower_case = false, bool keep_whitespace = false,
const NormalizeForm normalize_form = NormalizeForm::kNone, bool preserve_unused_token = true,
bool with_offsets = false);
/// \brief Destructor
~BasicTokenizer() = default;
/// \brief Function to convert TensorTransform object into a TensorOperation object.
/// \return return code
std::shared_ptr<TensorOperation> Parse() override;
private:
bool lower_case_;
bool keep_whitespace_;
NormalizeForm normalize_form_;
bool preserve_unused_token_;
bool with_offsets_;
};
/// \brief Tokenizer used for Bert text process.
/// \notes BertTokenizer is not supported on Windows platform yet.
/// \param[in] vocab A Vocab object.
/// \param[in] suffix_indicator Used to show that the subword is the last part of a word (default='##').
/// \param[in] max_bytes_per_token Tokens exceeding this length will not be further split (default=100).
/// \param[in] unknown_token When a token cannot be found, return the token directly if 'unknown_token' is an empty
/// string, else return the string specified(default='[UNK]').
/// \param[in] lower_case If true, apply CaseFold, NormalizeUTF8(NFD mode), RegexReplace operation on input text to
/// fold the text to lower case and strip accents characters. If false, only apply NormalizeUTF8('normalization_form'
/// mode) operation on input text (default=false).
/// \param[in] keep_whitespace If true, the whitespace will be kept in out tokens (default=false).
/// \param[in] normalize_form Used to specify a specific normalize mode. This is only effective when 'lower_case' is
/// false. See NormalizeUTF8 for details (default=NormalizeForm::kNone).
/// \param[in] preserve_unused_token If true, do not split special tokens like '[CLS]', '[SEP]', '[UNK]', '[PAD]',
/// '[MASK]' (default=true).
/// \param[in] with_offsets If or not output offsets of tokens (default=false).
/// \return Shared pointer to the current TensorOperation.
std::shared_ptr<BertTokenizerOperation> BertTokenizer(const std::shared_ptr<Vocab> &vocab,
const std::string &suffix_indicator = "##",
int32_t max_bytes_per_token = 100,
const std::string &unknown_token = "[UNK]",
bool lower_case = false, bool keep_whitespace = false,
const NormalizeForm normalize_form = NormalizeForm::kNone,
bool preserve_unused_token = true, bool with_offsets = false);
class BertTokenizer : public TensorTransform {
public:
/// \brief Constructor.
/// \param[in] vocab A Vocab object.
/// \param[in] suffix_indicator Used to show that the subword is the last part of a word (default='##').
/// \param[in] max_bytes_per_token Tokens exceeding this length will not be further split (default=100).
/// \param[in] unknown_token When a token cannot be found, return the token directly if 'unknown_token' is an empty
/// string, else return the string specified(default='[UNK]').
/// \param[in] lower_case If true, apply CaseFold, NormalizeUTF8(NFD mode), RegexReplace operation on input text to
/// fold the text to lower case and strip accents characters. If false, only apply
/// NormalizeUTF8('normalization_form' mode) operation on input text (default=false).
/// \param[in] keep_whitespace If true, the whitespace will be kept in out tokens (default=false).
/// \param[in] normalize_form Used to specify a specific normalize mode. This is only effective when 'lower_case' is
/// false. See NormalizeUTF8 for details (default=NormalizeForm::kNone).
/// \param[in] preserve_unused_token If true, do not split special tokens like '[CLS]', '[SEP]', '[UNK]', '[PAD]',
/// '[MASK]' (default=true).
/// \param[in] with_offsets If or not output offsets of tokens (default=false).
explicit BertTokenizer(const std::shared_ptr<Vocab> &vocab, const std::string &suffix_indicator = "##",
int32_t max_bytes_per_token = 100, const std::string &unknown_token = "[UNK]",
bool lower_case = false, bool keep_whitespace = false,
const NormalizeForm normalize_form = NormalizeForm::kNone, bool preserve_unused_token = true,
bool with_offsets = false);
/// \brief Destructor
~BertTokenizer() = default;
/// \brief Function to convert TensorTransform object into a TensorOperation object.
/// \return return code
std::shared_ptr<TensorOperation> Parse() override;
private:
std::shared_ptr<Vocab> vocab_;
std::string suffix_indicator_;
int32_t max_bytes_per_token_;
std::string unknown_token_;
bool lower_case_;
bool keep_whitespace_;
NormalizeForm normalize_form_;
bool preserve_unused_token_;
bool with_offsets_;
};
/// \brief Apply case fold operation on UTF-8 string tensor.
/// \return Shared pointer to the current TensorOperation.
std::shared_ptr<CaseFoldOperation> CaseFold();
class CaseFold : public TensorTransform {
public:
/// \brief Constructor.
CaseFold();
/// \brief Destructor
~CaseFold() = default;
/// \brief Function to convert TensorTransform object into a TensorOperation object.
/// \return return code
std::shared_ptr<TensorOperation> Parse() override;
};
#endif
/// \brief Tokenize Chinese string into words based on dictionary.
/// \notes The integrity of the HMMSEgment algorithm and MPSegment algorithm files must be confirmed.
/// \param[in] hmm_path Dictionary file is used by HMMSegment algorithm. The dictionary can be obtained on the
/// official website of cppjieba.
/// \param[in] mp_path Dictionary file is used by MPSegment algorithm. The dictionary can be obtained on the
/// official website of cppjieba.
/// \param[in] mode Valid values can be any of [JiebaMode.MP, JiebaMode.HMM, JiebaMode.MIX](default=JiebaMode.MIX).
/// - JiebaMode.kMP, tokenize with MPSegment algorithm.
/// - JiebaMode.kHMM, tokenize with Hiddel Markov Model Segment algorithm.
/// - JiebaMode.kMIX, tokenize with a mix of MPSegment and HMMSegment algorithm.
/// \param[in] with_offsets If or not output offsets of tokens (default=false).
/// \return Shared pointer to the current TensorOperation.
std::shared_ptr<JiebaTokenizerOperation> JiebaTokenizer(const std::string &hmm_path, const std::string &mp_path,
const JiebaMode &mode = JiebaMode::kMix,
bool with_offsets = false);
class JiebaTokenizer : public TensorTransform {
public:
/// \brief Constructor.
/// \param[in] hmm_path Dictionary file is used by HMMSegment algorithm. The dictionary can be obtained on the
/// official website of cppjieba.
/// \param[in] mp_path Dictionary file is used by MPSegment algorithm. The dictionary can be obtained on the
/// official website of cppjieba.
/// \param[in] mode Valid values can be any of [JiebaMode.MP, JiebaMode.HMM, JiebaMode.MIX](default=JiebaMode.MIX).
/// - JiebaMode.kMP, tokenize with MPSegment algorithm.
/// - JiebaMode.kHMM, tokenize with Hiddel Markov Model Segment algorithm.
/// - JiebaMode.kMIX, tokenize with a mix of MPSegment and HMMSegment algorithm.
/// \param[in] with_offsets If or not output offsets of tokens (default=false).
explicit JiebaTokenizer(const std::string &hmm_path, const std::string &mp_path,
const JiebaMode &mode = JiebaMode::kMix, bool with_offsets = false);
/// \brief Destructor
~JiebaTokenizer() = default;
/// \brief Function to convert TensorTransform object into a TensorOperation object.
/// \return return code
std::shared_ptr<TensorOperation> Parse() override;
Status AddWord(const std::string &word, int64_t freq = 0);
private:
std::string hmm_path_;
std::string mp_path_;
JiebaMode mode_;
bool with_offsets_;
std::vector<std::pair<std::string, int64_t>> words_list_;
};
/// \brief Look up a word into an id according to the input vocabulary table.
/// \param[in] vocab a Vocab object.
/// \param[in] unknown_token word to use for lookup if the word being looked up is out of Vocabulary (oov).
/// If unknown_token is oov, runtime error will be thrown. If unknown_token is {}, which means that not to
// specify unknown_token when word being out of Vocabulary (default={}).
/// \param[in] data_type type of the tensor after lookup, typically int32.
/// \return Shared pointer to the current TensorOperation.
class Lookup : public TensorTransform {
public:
/// \brief Constructor.
/// \param[in] vocab a Vocab object.
/// \param[in] unknown_token word to use for lookup if the word being looked up is out of Vocabulary (oov).
/// If unknown_token is oov, runtime error will be thrown. If unknown_token is {}, which means that not to
/// specify unknown_token when word being out of Vocabulary (default={}).
/// \param[in] data_type type of the tensor after lookup, typically int32.
explicit Lookup(const std::shared_ptr<Vocab> &vocab, const std::optional<std::string> &unknown_token = {},
const std::string &data_type = "int32");
std::shared_ptr<LookupOperation> Lookup(const std::shared_ptr<Vocab> &vocab,
const std::optional<std::string> &unknown_token = {},
const std::string &data_type = "int32");
/// \brief Destructor
~Lookup() = default;
/// \brief Function to convert TensorTransform object into a TensorOperation object.
/// \return return code
std::shared_ptr<TensorOperation> Parse() override;
private:
std::shared_ptr<Vocab> vocab_;
std::optional<std::string> unknown_token_;
std::string data_type_;
};
/// \brief TensorOp to generate n-gram from a 1-D string Tensor.
/// \param[in] ngrams ngrams is a vector of positive integers. For example, if ngrams={4, 3}, then the result
/// would be a 4-gram followed by a 3-gram in the same tensor. If the number of words is not enough to make up
/// for a n-gram, an empty string will be returned.
/// \param[in] left_pad {"pad_token", pad_width}. Padding performed on left side of the sequence. pad_width will
/// be capped at n-1. left_pad=("_",2) would pad left side of the sequence with "__" (default={"", 0}}).
/// \param[in] right_pad {"pad_token", pad_width}. Padding performed on right side of the sequence.pad_width will
/// be capped at n-1. right_pad=("-":2) would pad right side of the sequence with "--" (default={"", 0}}).
/// \param[in] separator Symbol used to join strings together (default=" ").
/// \return Shared pointer to the current TensorOperation.
std::shared_ptr<NgramOperation> Ngram(const std::vector<int32_t> &ngrams,
const std::pair<std::string, int32_t> &left_pad = {"", 0},
const std::pair<std::string, int32_t> &right_pad = {"", 0},
const std::string &separator = " ");
class Ngram : public TensorTransform {
public:
/// \brief Constructor.
/// \param[in] ngrams ngrams is a vector of positive integers. For example, if ngrams={4, 3}, then the result
/// would be a 4-gram followed by a 3-gram in the same tensor. If the number of words is not enough to make up
/// for a n-gram, an empty string will be returned.
/// \param[in] left_pad {"pad_token", pad_width}. Padding performed on left side of the sequence. pad_width will
/// be capped at n-1. left_pad=("_",2) would pad left side of the sequence with "__" (default={"", 0}}).
/// \param[in] right_pad {"pad_token", pad_width}. Padding performed on right side of the sequence.pad_width will
/// be capped at n-1. right_pad=("-":2) would pad right side of the sequence with "--" (default={"", 0}}).
/// \param[in] separator Symbol used to join strings together (default=" ").
explicit Ngram(const std::vector<int32_t> &ngrams, const std::pair<std::string, int32_t> &left_pad = {"", 0},
const std::pair<std::string, int32_t> &right_pad = {"", 0}, const std::string &separator = " ");
/// \brief Destructor
~Ngram() = default;
/// \brief Function to convert TensorTransform object into a TensorOperation object.
/// \return return code
std::shared_ptr<TensorOperation> Parse() override;
private:
std::vector<int32_t> ngrams_;
std::pair<std::string, int32_t> left_pad_;
std::pair<std::string, int32_t> right_pad_;
std::string separator_;
};
#ifndef _WIN32
/// \brief Apply normalize operation on UTF-8 string tensor.
/// \param[in] normalize_form Valid values can be any of [NormalizeForm::kNone,NormalizeForm::kNfc,
/// NormalizeForm::kNfkc,
/// NormalizeForm::kNfd, NormalizeForm::kNfkd](default=NormalizeForm::kNfkc).
/// See http://unicode.org/reports/tr15/ for details.
/// - NormalizeForm.NONE, do nothing for input string tensor.
/// - NormalizeForm.NFC, normalize with Normalization Form C.
/// - NormalizeForm.NFKC, normalize with Normalization Form KC.
/// - NormalizeForm.NFD, normalize with Normalization Form D.
/// - NormalizeForm.NFKD, normalize with Normalization Form KD.
/// \return Shared pointer to the current TensorOperation.
std::shared_ptr<NormalizeUTF8Operation> NormalizeUTF8(NormalizeForm normalize_form = NormalizeForm::kNfkc);
class NormalizeUTF8 : public TensorTransform {
public:
/// \brief Constructor.
/// \param[in] normalize_form Valid values can be any of [NormalizeForm::kNone,NormalizeForm::kNfc,
/// NormalizeForm::kNfkc,
/// NormalizeForm::kNfd, NormalizeForm::kNfkd](default=NormalizeForm::kNfkc).
/// See http://unicode.org/reports/tr15/ for details.
/// - NormalizeForm.NONE, do nothing for input string tensor.
/// - NormalizeForm.NFC, normalize with Normalization Form C.
/// - NormalizeForm.NFKC, normalize with Normalization Form KC.
/// - NormalizeForm.NFD, normalize with Normalization Form D.
/// - NormalizeForm.NFKD, normalize with Normalization Form KD.
explicit NormalizeUTF8(NormalizeForm normalize_form = NormalizeForm::kNfkc);
/// \brief Destructor
~NormalizeUTF8() = default;
/// \brief Function to convert TensorTransform object into a TensorOperation object.
/// \return return code
std::shared_ptr<TensorOperation> Parse() override;
private:
NormalizeForm normalize_form_;
};
/// \brief Replace UTF-8 string tensor with 'replace' according to regular expression 'pattern'.
/// \param[in] pattern The regex expression patterns.
/// \param[in] replace The string to replace matched element.
/// \param[in] replace_all Confirm whether to replace all. If false, only replace first matched element;
/// if true, replace all matched elements (default=true).
/// \return Shared pointer to the current TensorOperation.
std::shared_ptr<RegexReplaceOperation> RegexReplace(std::string pattern, std::string replace, bool replace_all = true);
class RegexReplace : public TensorTransform {
public:
/// \brief Constructor.
/// \param[in] pattern The regex expression patterns.
/// \param[in] replace The string to replace matched element.
/// \param[in] replace_all Confirm whether to replace all. If false, only replace first matched element;
/// if true, replace all matched elements (default=true).
explicit RegexReplace(std::string pattern, std::string replace, bool replace_all = true);
/// \brief Destructor
~RegexReplace() = default;
/// \brief Function to convert TensorTransform object into a TensorOperation object.
/// \return return code
std::shared_ptr<TensorOperation> Parse() override;
private:
std::string pattern_;
std::string replace_;
bool replace_all_;
};
/// \brief Tokenize a scalar tensor of UTF-8 string by regex expression pattern.
/// \param[in] delim_pattern The pattern of regex delimiters.
/// \param[in] keep_delim_pattern The string matched by 'delim_pattern' can be kept as a token if it can be
/// matched by 'keep_delim_pattern'. The default value is an empty string ("")
/// which means that delimiters will not be kept as an output token (default="").
/// \param[in] with_offsets If or not output offsets of tokens (default=false).
/// \return Shared pointer to the current TensorOperation.
std::shared_ptr<RegexTokenizerOperation> RegexTokenizer(std::string delim_pattern, std::string keep_delim_pattern = "",
bool with_offsets = false);
class RegexTokenizer : public TensorTransform {
public:
/// \brief Constructor.
/// \param[in] delim_pattern The pattern of regex delimiters.
/// \param[in] keep_delim_pattern The string matched by 'delim_pattern' can be kept as a token if it can be
/// matched by 'keep_delim_pattern'. The default value is an empty string ("")
/// which means that delimiters will not be kept as an output token (default="").
/// \param[in] with_offsets If or not output offsets of tokens (default=false).
explicit RegexTokenizer(std::string delim_pattern, std::string keep_delim_pattern = "", bool with_offsets = false);
/// \brief Destructor
~RegexTokenizer() = default;
/// \brief Function to convert TensorTransform object into a TensorOperation object.
/// \return return code
std::shared_ptr<TensorOperation> Parse() override;
private:
std::string delim_pattern_;
std::string keep_delim_pattern_;
bool with_offsets_;
};
#endif
/// \brief Tokenize scalar token or 1-D tokens to tokens by sentencepiece.
/// \param[in] vocab a SentencePieceVocab object.
/// \param[in] out_type The type of output.
/// \return Shared pointer to the current TensorOperation.
std::shared_ptr<SentencePieceTokenizerOperation> SentencePieceTokenizer(
const std::shared_ptr<SentencePieceVocab> &vocab, mindspore::dataset::SPieceTokenizerOutType out_type);
class SentencePieceTokenizer : public TensorTransform {
public:
/// \brief Constructor.
/// \param[in] vocab a SentencePieceVocab object.
/// \param[in] out_type The type of output.
SentencePieceTokenizer(const std::shared_ptr<SentencePieceVocab> &vocab,
mindspore::dataset::SPieceTokenizerOutType out_typee);
/// \brief Tokenize scalar token or 1-D tokens to tokens by sentencepiece.
/// \param[in] vocab_path vocab model file path.
/// \param[in] out_type The type of output.
/// \return Shared pointer to the current TensorOperation.
std::shared_ptr<SentencePieceTokenizerOperation> SentencePieceTokenizer(
const std::string &vocab_path, mindspore::dataset::SPieceTokenizerOutType out_type);
/// \brief Constructor.
/// \param[in] vocab_path vocab model file path.
/// \param[in] out_type The type of output.
SentencePieceTokenizer(const std::string &vocab_path, mindspore::dataset::SPieceTokenizerOutType out_type);
/// \brief Destructor
~SentencePieceTokenizer() = default;
/// \brief Function to convert TensorTransform object into a TensorOperation object.
/// \return return code
std::shared_ptr<TensorOperation> Parse() override;
private:
std::shared_ptr<SentencePieceVocab> vocab_;
std::string vocab_path_;
SPieceTokenizerLoadType load_type_;
SPieceTokenizerOutType out_type_;
};
/// \brief TensorOp to construct a tensor from data (only 1-D for now), where each element in the dimension
/// axis is a slice of data starting at the corresponding position, with a specified width.
/// \param[in] width The width of the window. It must be an integer and greater than zero.
/// \param[in] axis The axis along which the sliding window is computed (default=0), axis support 0 or -1 only
/// for now.
/// \return Shared pointer to the current TensorOperation.
std::shared_ptr<SlidingWindowOperation> SlidingWindow(const int32_t width, const int32_t axis = 0);
class SlidingWindow : public TensorTransform {
public:
/// \brief Constructor.
/// \param[in] width The width of the window. It must be an integer and greater than zero.
/// \param[in] axis The axis along which the sliding window is computed (default=0), axis support 0 or -1 only
/// for now.
explicit SlidingWindow(const int32_t width, const int32_t axis = 0);
/// \brief Destructor
~SlidingWindow() = default;
/// \brief Function to convert TensorTransform object into a TensorOperation object.
/// \return return code
std::shared_ptr<TensorOperation> Parse() override;
private:
int32_t width_;
int32_t axis_;
};
/// \brief Tensor operation to convert every element of a string tensor to a number.
/// Strings are casted according to the rules specified in the following links:
/// https://en.cppreference.com/w/cpp/string/basic_string/stof,
/// https://en.cppreference.com/w/cpp/string/basic_string/stoul,
/// except that any strings which represent negative numbers cannot be cast to an unsigned integer type.
/// \param[in] data_type of the tensor to be casted to. Must be a numeric type.
/// \return Shared pointer to the current TensorOperation.
std::shared_ptr<ToNumberOperation> ToNumber(const std::string &data_type);
class ToNumber : public TensorTransform {
public:
/// \brief Constructor.
/// \param[in] data_type of the tensor to be casted to. Must be a numeric type.
explicit ToNumber(const std::string &data_type);
/// \brief Destructor
~ToNumber() = default;
/// \brief Function to convert TensorTransform object into a TensorOperation object.
/// \return return code
std::shared_ptr<TensorOperation> Parse() override;
private:
std::string data_type_;
};
/// \brief Truncate a pair of rank-1 tensors such that the total length is less than max_length.
/// \param[in] max_length Maximum length required.
/// \return Shared pointer to the current TensorOperation.
std::shared_ptr<TruncateSequencePairOperation> TruncateSequencePair(int32_t max_length);
class TruncateSequencePair : public TensorTransform {
public:
/// \brief Constructor.
/// \param[in] max_length Maximum length required.
explicit TruncateSequencePair(int32_t max_length);
/// \brief Destructor
~TruncateSequencePair() = default;
/// \brief Function to convert TensorTransform object into a TensorOperation object.
/// \return return code
std::shared_ptr<TensorOperation> Parse() override;
private:
int32_t max_length_;
};
/// \brief Tokenize a scalar tensor of UTF-8 string to Unicode characters.
/// \param[in] with_offsets If or not output offsets of tokens (default=false).
/// \return Shared pointer to the current TensorOperation.
std::shared_ptr<UnicodeCharTokenizerOperation> UnicodeCharTokenizer(bool with_offsets = false);
class UnicodeCharTokenizer : public TensorTransform {
public:
/// \brief Constructor.
/// \param[in] with_offsets If or not output offsets of tokens (default=false).
explicit UnicodeCharTokenizer(bool with_offsets = false);
/// \brief Destructor
~UnicodeCharTokenizer() = default;
/// \brief Function to convert TensorTransform object into a TensorOperation object.
/// \return return code
std::shared_ptr<TensorOperation> Parse() override;
private:
bool with_offsets_;
};
#ifndef _WIN32
/// \brief Tokenize a scalar tensor of UTF-8 string on Unicode script boundaries.
/// \param[in] keep_whitespace If or not emit whitespace tokens (default=false).
/// \param[in] with_offsets If or not output offsets of tokens (default=false).
/// \return Shared pointer to the current TensorOperation.
std::shared_ptr<UnicodeScriptTokenizerOperation> UnicodeScriptTokenizer(bool keep_whitespace = false,
bool with_offsets = false);
class UnicodeScriptTokenizer : public TensorTransform {
public:
/// \brief Constructor.
/// \param[in] keep_whitespace If or not emit whitespace tokens (default=false).
/// \param[in] with_offsets If or not output offsets of tokens (default=false).
explicit UnicodeScriptTokenizer(bool keep_whitespace = false, bool with_offsets = false);
/// \brief Destructor
~UnicodeScriptTokenizer() = default;
/// \brief Function to convert TensorTransform object into a TensorOperation object.
/// \return return code
std::shared_ptr<TensorOperation> Parse() override;
private:
bool keep_whitespace_;
bool with_offsets_;
};
/// \brief Tokenize a scalar tensor of UTF-8 string on ICU4C defined whitespaces.
/// \param[in] with_offsets If or not output offsets of tokens (default=false).
/// \return Shared pointer to the current TensorOperation.
std::shared_ptr<WhitespaceTokenizerOperation> WhitespaceTokenizer(bool with_offsets = false);
class WhitespaceTokenizer : public TensorTransform {
public:
/// \brief Constructor.
/// \param[in] with_offsets If or not output offsets of tokens (default=false).
explicit WhitespaceTokenizer(bool with_offsets = false);
/// \brief Destructor
~WhitespaceTokenizer() = default;
/// \brief Function to convert TensorTransform object into a TensorOperation object.
/// \return return code
std::shared_ptr<TensorOperation> Parse() override;
private:
bool with_offsets_;
};
#endif
} // namespace text
} // namespace dataset

View File

@ -30,21 +30,27 @@
namespace mindspore {
namespace dataset {
// Abstract class to represent a tensor transform operation in the data pipeline.
class TensorTransform : public std::enable_shared_from_this<TensorTransform> {
public:
/// \brief Constructor
TensorTransform() {}
/// \brief Destructor
~TensorTransform() = default;
/// \brief Pure virtual function to convert a TensorTransform class into a IR TensorOperation object.
/// \return shared pointer to the newly created TensorOperation.
virtual std::shared_ptr<TensorOperation> Parse() = 0;
};
// Transform operations for performing data transformation.
namespace transforms {
// Transform Op classes (in alphabetical order)
class ComposeOperation;
class DuplicateOperation;
class OneHotOperation;
class PreBuiltOperation;
class RandomApplyOperation;
class RandomChoiceOperation;
class TypeCastOperation;
#ifndef ENABLE_ANDROID
class UniqueOperation;
#endif
/// \brief Function to create a Compose TensorOperation.
/// \notes Compose a list of transforms into a single transform.
@ -52,17 +58,40 @@ class UniqueOperation;
/// \return Shared pointer to the current TensorOperation.
std::shared_ptr<ComposeOperation> Compose(const std::vector<std::shared_ptr<TensorOperation>> &transforms);
/// \brief Function to create a Duplicate TensorOperation.
/// \brief Duplicate Op.
/// \notes Duplicate the input tensor to a new output tensor.
/// The input tensor is carried over to the output list.
/// \return Shared pointer to the current TensorOperation.
std::shared_ptr<DuplicateOperation> Duplicate();
class Duplicate : public TensorTransform {
public:
/// \brief Constructor.
Duplicate();
/// \brief Function to create a OneHot TensorOperation.
/// \brief Destructor
~Duplicate() = default;
/// \brief Function to convert TensorTransform object into a TensorOperation object.
/// \return return code
std::shared_ptr<TensorOperation> Parse() override;
};
/// \brief OneHot Op.
/// \notes Convert the labels into OneHot format.
/// \param[in] num_classes number of classes.
/// \return Shared pointer to the current TensorOperation.
std::shared_ptr<OneHotOperation> OneHot(int32_t num_classes);
class OneHot : public TensorTransform {
public:
/// \brief Constructor.
/// \param[in] num_classes number of classes.
explicit OneHot(int32_t num_classes);
/// \brief Destructor
~OneHot() = default;
/// \brief Function to convert TensorTransform object into a TensorOperation object.
/// \return return code
std::shared_ptr<TensorOperation> Parse() override;
private:
float num_classes_;
};
/// \brief Function to create a RandomApply TensorOperation.
/// \notes Randomly perform a series of transforms with a given probability.
@ -78,18 +107,41 @@ std::shared_ptr<RandomApplyOperation> RandomApply(const std::vector<std::shared_
/// \return Shared pointer to the current TensorOperation.
std::shared_ptr<RandomChoiceOperation> RandomChoice(const std::vector<std::shared_ptr<TensorOperation>> &transforms);
/// \brief Function to create a TypeCast TensorOperation.
/// \brief TypeCast Op.
/// \notes Tensor operation to cast to a given MindSpore data type.
/// \param[in] data_type mindspore.dtype to be cast to.
/// \return Shared pointer to the current TensorOperation.
std::shared_ptr<TypeCastOperation> TypeCast(std::string data_type);
class TypeCast : public TensorTransform {
public:
/// \brief Constructor.
/// \param[in] data_type mindspore.dtype to be cast to.
explicit TypeCast(std::string data_type);
/// \brief Destructor
~TypeCast() = default;
/// \brief Function to convert TensorTransform object into a TensorOperation object.
/// \return return code
std::shared_ptr<TensorOperation> Parse() override;
private:
std::string data_type_;
};
#ifndef ENABLE_ANDROID
/// \brief Function to create a Unique TensorOperation.
/// \brief Unique Op.
/// \notes Return an output tensor containing all the unique elements of the input tensor in
/// the same order that they occur in the input tensor.
/// \return Shared pointer to the current TensorOperation.
std::shared_ptr<UniqueOperation> Unique();
class Unique : public TensorTransform {
public:
/// \brief Constructor.
Unique();
/// \brief Destructor
~Unique() = default;
/// \brief Function to convert TensorTransform object into a TensorOperation object.
/// \return return code
std::shared_ptr<TensorOperation> Parse() override;
};
#endif
} // namespace transforms
} // namespace dataset

View File

@ -72,7 +72,7 @@ class DuplicateOperation : public TensorOperation {
class OneHotOperation : public TensorOperation {
public:
explicit OneHotOperation(int32_t num_classes_);
explicit OneHotOperation(int32_t num_classes);
~OneHotOperation() = default;

View File

@ -42,7 +42,7 @@ class TensorOperation : public std::enable_shared_from_this<TensorOperation> {
/// \return shared pointer to the newly created TensorOp.
virtual std::shared_ptr<TensorOp> Build() = 0;
virtual Status ValidateParams() = 0;
virtual Status ValidateParams() { return Status::OK(); }
virtual std::string Name() const = 0;

View File

@ -162,16 +162,6 @@ std::shared_ptr<TensorOp> JiebaTokenizerOperation::Build() {
}
Status JiebaTokenizerOperation::AddWord(const std::string &word, int64_t freq) {
if (word.empty()) {
std::string err_msg = "JiebaTokenizer : The parameter word is empty or not provided.";
MS_LOG(ERROR) << err_msg;
RETURN_STATUS_SYNTAX_ERROR(err_msg);
}
if (freq < 0) {
std::string err_msg = "JiebaTokenizer : The parameter freq must be greater than or equal to 0.";
MS_LOG(ERROR) << err_msg;
RETURN_STATUS_SYNTAX_ERROR(err_msg);
}
words_list_.emplace_back(word, freq);
return Status::OK();
}
@ -379,6 +369,7 @@ std::shared_ptr<TensorOp> ToNumberOperation::Build() {
return tensor_op;
}
// TruncateSequencePairOperation
TruncateSequencePairOperation::TruncateSequencePairOperation(int32_t max_length) : max_length_(max_length) {}
Status TruncateSequencePairOperation::ValidateParams() {

View File

@ -74,7 +74,7 @@ TEST_F(MindDataTestPipeline, TestSaveCifar10AndLoad) {
// Create objects for the tensor ops
// uint32 will be casted to int64 implicitly in mindrecord file, so we have to cast it back to uint32
std::shared_ptr<TensorOperation> type_cast = transforms::TypeCast("uint32");
std::shared_ptr<TensorTransform> type_cast = std::make_shared<transforms::TypeCast>("uint32");
EXPECT_NE(type_cast, nullptr);
// Create a Map operation on ds

View File

@ -53,8 +53,8 @@ TEST_F(MindDataTestPipeline, TestSentencePieceVocabSuccess1) {
std::shared_ptr<Dataset> ds = TextFile({data_file}, 0, ShuffleMode::kFalse);
// Create SentencePieceTokenizer operation from vocab object
std::shared_ptr<TensorOperation> sentencepiece_tokenizer =
text::SentencePieceTokenizer(vocab, mindspore::dataset::SPieceTokenizerOutType::kString);
std::shared_ptr<TensorTransform> sentencepiece_tokenizer =
std::make_shared<text::SentencePieceTokenizer>(vocab, mindspore::dataset::SPieceTokenizerOutType::kString);
EXPECT_NE(sentencepiece_tokenizer, nullptr);
// Create Map operation on ds
@ -109,8 +109,8 @@ TEST_F(MindDataTestPipeline, TestSentencePieceVocabSuccess2) {
// Create SentencePieceTokenizer operation from local vocab model
std::string vocab_model = datasets_root_path_ + "/test_sentencepiece/m.model";
std::shared_ptr<TensorOperation> sentencepiece_tokenizer =
text::SentencePieceTokenizer(vocab_model, mindspore::dataset::SPieceTokenizerOutType::kString);
std::shared_ptr<TensorTransform> sentencepiece_tokenizer =
std::make_shared<text::SentencePieceTokenizer>(vocab_model, mindspore::dataset::SPieceTokenizerOutType::kString);
EXPECT_NE(sentencepiece_tokenizer, nullptr);
// Create Map operation on ds
@ -175,26 +175,76 @@ TEST_F(MindDataTestPipeline, TestSentencePieceVocabFail) {
TEST_F(MindDataTestPipeline, TestSentencePieceTokenizerFail1) {
MS_LOG(INFO) << "Doing MindDataTestPipeline-TestSentencePieceTokenizerFail with incorrect parameter.";
// Create SentencePieceTokenizer operation from local vocab model
std::string vocab_model1 = "";
std::shared_ptr<TensorOperation> sentencepiece_tokenizer1 =
text::SentencePieceTokenizer(vocab_model1, mindspore::dataset::SPieceTokenizerOutType::kString);
EXPECT_EQ(sentencepiece_tokenizer1, nullptr);
// Create a TextFile dataset
std::string data_file = datasets_root_path_ + "/testTokenizerData/sentencepiece_tokenizer.txt";
std::shared_ptr<Dataset> ds = TextFile({data_file}, 0, ShuffleMode::kFalse);
// Create SentencePieceTokenizer operation from local vocab model
std::string vocab_model2 = "m.model";
std::shared_ptr<TensorOperation> sentencepiece_tokenizer2 =
text::SentencePieceTokenizer(vocab_model2, mindspore::dataset::SPieceTokenizerOutType::kString);
EXPECT_EQ(sentencepiece_tokenizer2, nullptr);
std::string vocab_model = "";
std::shared_ptr<TensorTransform> sentencepiece_tokenizer =
std::make_shared<text::SentencePieceTokenizer>(vocab_model, mindspore::dataset::SPieceTokenizerOutType::kString);
EXPECT_NE(sentencepiece_tokenizer, nullptr);
// Create SentencePieceTokenizer operation from vocab object
std::shared_ptr<SentencePieceVocab> vocab_model3 = nullptr;
std::shared_ptr<TensorOperation> sentencepiece_tokenizer3 =
text::SentencePieceTokenizer(vocab_model3, mindspore::dataset::SPieceTokenizerOutType::kString);
EXPECT_EQ(sentencepiece_tokenizer3, nullptr);
// Create Map operation on ds
ds = ds->Map({sentencepiece_tokenizer}, {"text"});
EXPECT_NE(ds, nullptr);
// Create an iterator over the result of the above dataset
// This will trigger the creation of the Execution Tree and launch it.
std::shared_ptr<Iterator> iter = ds->CreateIterator();
// Expect failure: Invalid SentencePieceTokenizer input
EXPECT_EQ(iter, nullptr);
}
TEST_F(MindDataTestPipeline, TestSentencePieceTokenizerFail2) {
MS_LOG(INFO) << "Doing MindDataTestPipeline-TestSentencePieceTokenizerFail2 with incorrect parameter.";
// Create a TextFile dataset
std::string data_file = datasets_root_path_ + "/testTokenizerData/sentencepiece_tokenizer.txt";
std::shared_ptr<Dataset> ds = TextFile({data_file}, 0, ShuffleMode::kFalse);
// Create SentencePieceTokenizer operation from local vocab model
std::string vocab_model = "m.model";
std::shared_ptr<TensorTransform> sentencepiece_tokenizer =
std::make_shared<text::SentencePieceTokenizer>(vocab_model, mindspore::dataset::SPieceTokenizerOutType::kString);
EXPECT_NE(sentencepiece_tokenizer, nullptr);
// Create Map operation on ds
ds = ds->Map({sentencepiece_tokenizer}, {"text"});
EXPECT_NE(ds, nullptr);
// Create an iterator over the result of the above dataset
// This will trigger the creation of the Execution Tree and launch it.
std::shared_ptr<Iterator> iter = ds->CreateIterator();
// Expect failure: Invalid SentencePieceTokenizer input
EXPECT_EQ(iter, nullptr);
}
TEST_F(MindDataTestPipeline, TestSentencePieceTokenizerFail3) {
MS_LOG(INFO) << "Doing MindDataTestPipeline-TestSentencePieceTokenizerFail3 with incorrect parameter.";
// Create a TextFile dataset
std::string data_file = datasets_root_path_ + "/testTokenizerData/sentencepiece_tokenizer.txt";
std::shared_ptr<Dataset> ds = TextFile({data_file}, 0, ShuffleMode::kFalse);
// Create SentencePieceTokenizer operation from vocab object
std::shared_ptr<SentencePieceVocab> vocab_model = nullptr;
std::shared_ptr<TensorTransform> sentencepiece_tokenizer =
std::make_shared<text::SentencePieceTokenizer>(vocab_model, mindspore::dataset::SPieceTokenizerOutType::kString);
EXPECT_NE(sentencepiece_tokenizer, nullptr);
// Create Map operation on ds
ds = ds->Map({sentencepiece_tokenizer}, {"text"});
EXPECT_NE(ds, nullptr);
// Create an iterator over the result of the above dataset
// This will trigger the creation of the Execution Tree and launch it.
std::shared_ptr<Iterator> iter = ds->CreateIterator();
// Expect failure: Invalid SentencePieceTokenizer input
EXPECT_EQ(iter, nullptr);
}
TEST_F(MindDataTestPipeline, TestSentencePieceTokenizerFail4) {
MS_LOG(INFO) << "Doing MindDataTestPipeline-TestSentencePieceTokenizerFail with invalid SentencePieceVocab object.";
// Create a TextFile dataset
@ -203,8 +253,8 @@ TEST_F(MindDataTestPipeline, TestSentencePieceTokenizerFail2) {
// Create SentencePieceTokenizer operation from vocab object
std::shared_ptr<SentencePieceVocab> vocab_model4 = std::make_shared<SentencePieceVocab>();
std::shared_ptr<TensorOperation> sentencepiece_tokenizer4 =
text::SentencePieceTokenizer(vocab_model4, mindspore::dataset::SPieceTokenizerOutType::kString);
std::shared_ptr<TensorTransform> sentencepiece_tokenizer4 =
std::make_shared<text::SentencePieceTokenizer>(vocab_model4, mindspore::dataset::SPieceTokenizerOutType::kString);
EXPECT_NE(sentencepiece_tokenizer4, nullptr);
// Create Map operation on ds
@ -215,8 +265,4 @@ TEST_F(MindDataTestPipeline, TestSentencePieceTokenizerFail2) {
// This will trigger the creation of the Execution Tree and launch it.
std::shared_ptr<Iterator> iter = ds->CreateIterator();
EXPECT_NE(iter, nullptr);
// Iterate the dataset and get each row
// std::unordered_map<std::string, mindspore::MSTensor> row;
// EXPECT_EQ(iter->GetNextRow(&row), false);
}

View File

@ -49,7 +49,7 @@ TEST_F(MindDataTestPipeline, TestBasicTokenizerSuccess1) {
EXPECT_NE(ds, nullptr);
// Create BasicTokenizer operation on ds
std::shared_ptr<TensorOperation> basic_tokenizer = text::BasicTokenizer();
std::shared_ptr<TensorTransform> basic_tokenizer = std::make_shared<text::BasicTokenizer>();
EXPECT_NE(basic_tokenizer, nullptr);
// Create Map operation on ds
@ -107,7 +107,7 @@ TEST_F(MindDataTestPipeline, TestBasicTokenizerSuccess2) {
EXPECT_NE(ds, nullptr);
// Create BasicTokenizer operation on ds
std::shared_ptr<TensorOperation> basic_tokenizer = text::BasicTokenizer(true);
std::shared_ptr<TensorTransform> basic_tokenizer = std::make_shared<text::BasicTokenizer>(true);
EXPECT_NE(basic_tokenizer, nullptr);
// Create Map operation on ds
@ -155,8 +155,8 @@ TEST_F(MindDataTestPipeline, TestBasicTokenizerSuccess3) {
EXPECT_NE(ds, nullptr);
// Create BasicTokenizer operation on ds
std::shared_ptr<TensorOperation> basic_tokenizer =
text::BasicTokenizer(true, false, NormalizeForm::kNone, true, true);
std::shared_ptr<TensorTransform> basic_tokenizer =
std::make_shared<text::BasicTokenizer>(true, false, NormalizeForm::kNone, true, true);
EXPECT_NE(basic_tokenizer, nullptr);
// Create Map operation on ds
@ -226,7 +226,7 @@ TEST_F(MindDataTestPipeline, TestBertTokenizerSuccess1) {
EXPECT_EQ(s, Status::OK());
// Create BertTokenizer operation on ds
std::shared_ptr<TensorOperation> bert_tokenizer = text::BertTokenizer(vocab);
std::shared_ptr<TensorTransform> bert_tokenizer = std::make_shared<text::BertTokenizer>(vocab);
EXPECT_NE(bert_tokenizer, nullptr);
// Create Map operation on ds
@ -286,7 +286,8 @@ TEST_F(MindDataTestPipeline, TestBertTokenizerSuccess2) {
EXPECT_EQ(s, Status::OK());
// Create BertTokenizer operation on ds
std::shared_ptr<TensorOperation> bert_tokenizer = text::BertTokenizer(vocab, "##", 100, "[UNK]", true);
std::shared_ptr<TensorTransform> bert_tokenizer =
std::make_shared<text::BertTokenizer>(vocab, "##", 100, "[UNK]", true);
EXPECT_NE(bert_tokenizer, nullptr);
// Create Map operation on ds
@ -344,8 +345,8 @@ TEST_F(MindDataTestPipeline, TestBertTokenizerSuccess3) {
EXPECT_EQ(s, Status::OK());
// Create BertTokenizer operation on ds
std::shared_ptr<TensorOperation> bert_tokenizer =
text::BertTokenizer(vocab, "##", 100, "[UNK]", false, false, NormalizeForm::kNfc);
std::shared_ptr<TensorTransform> bert_tokenizer =
std::make_shared<text::BertTokenizer>(vocab, "##", 100, "[UNK]", false, false, NormalizeForm::kNfc);
EXPECT_NE(bert_tokenizer, nullptr);
// Create Map operation on ds
@ -403,7 +404,8 @@ TEST_F(MindDataTestPipeline, TestBertTokenizerSuccess4) {
EXPECT_EQ(s, Status::OK());
// Create BertTokenizer operation on ds
std::shared_ptr<TensorOperation> bert_tokenizer = text::BertTokenizer(vocab, "##", 100, "[UNK]", false, true);
std::shared_ptr<TensorTransform> bert_tokenizer =
std::make_shared<text::BertTokenizer>(vocab, "##", 100, "[UNK]", false, true);
EXPECT_NE(bert_tokenizer, nullptr);
// Create Map operation on ds
@ -460,7 +462,8 @@ TEST_F(MindDataTestPipeline, TestBertTokenizerSuccess5) {
EXPECT_EQ(s, Status::OK());
// Create BertTokenizer operation on ds
std::shared_ptr<TensorOperation> bert_tokenizer = text::BertTokenizer(vocab, "##", 100, "", false, true);
std::shared_ptr<TensorTransform> bert_tokenizer =
std::make_shared<text::BertTokenizer>(vocab, "##", 100, "", false, true);
EXPECT_NE(bert_tokenizer, nullptr);
// Create Map operation on ds
@ -517,8 +520,8 @@ TEST_F(MindDataTestPipeline, TestBertTokenizerSuccess6) {
EXPECT_EQ(s, Status::OK());
// Create BertTokenizer operation on ds
std::shared_ptr<TensorOperation> bert_tokenizer =
text::BertTokenizer(vocab, "##", 100, "", false, true, NormalizeForm::kNone, false);
std::shared_ptr<TensorTransform> bert_tokenizer =
std::make_shared<text::BertTokenizer>(vocab, "##", 100, "", false, true, NormalizeForm::kNone, false);
EXPECT_NE(bert_tokenizer, nullptr);
// Create Map operation on ds
@ -575,8 +578,8 @@ TEST_F(MindDataTestPipeline, TestBertTokenizerSuccess7) {
EXPECT_EQ(s, Status::OK());
// Create BertTokenizer operation on ds
std::shared_ptr<TensorOperation> bert_tokenizer =
text::BertTokenizer(vocab, "##", 100, "[UNK]", true, false, NormalizeForm::kNone, true, true);
std::shared_ptr<TensorTransform> bert_tokenizer =
std::make_shared<text::BertTokenizer>(vocab, "##", 100, "[UNK]", true, false, NormalizeForm::kNone, true, true);
EXPECT_NE(bert_tokenizer, nullptr);
// Create Map operation on ds
@ -631,9 +634,16 @@ TEST_F(MindDataTestPipeline, TestBertTokenizerFail1) {
EXPECT_NE(ds, nullptr);
// Create BertTokenizer operation on ds
std::shared_ptr<TensorOperation> bert_tokenizer = text::BertTokenizer(nullptr);
std::shared_ptr<TensorTransform> bert_tokenizer = std::make_shared<text::BertTokenizer>(nullptr);
EXPECT_NE(bert_tokenizer, nullptr);
// Create a Map operation on ds
ds = ds->Map({bert_tokenizer});
EXPECT_NE(ds, nullptr);
std::shared_ptr<Iterator> iter = ds->CreateIterator();
// Expect failure: invalid BertTokenizer input with nullptr vocab
EXPECT_EQ(bert_tokenizer, nullptr);
EXPECT_EQ(iter, nullptr);
}
TEST_F(MindDataTestPipeline, TestBertTokenizerFail2) {
@ -651,9 +661,16 @@ TEST_F(MindDataTestPipeline, TestBertTokenizerFail2) {
EXPECT_EQ(s, Status::OK());
// Create BertTokenizer operation on ds
std::shared_ptr<TensorOperation> bert_tokenizer = text::BertTokenizer(vocab, "##", -1);
std::shared_ptr<TensorTransform> bert_tokenizer = std::make_shared<text::BertTokenizer>(vocab, "##", -1);
EXPECT_NE(bert_tokenizer, nullptr);
// Create a Map operation on ds
ds = ds->Map({bert_tokenizer});
EXPECT_NE(ds, nullptr);
std::shared_ptr<Iterator> iter = ds->CreateIterator();
// Expect failure: invalid BertTokenizer input with nullptr vocab
EXPECT_EQ(bert_tokenizer, nullptr);
EXPECT_EQ(iter, nullptr);
}
TEST_F(MindDataTestPipeline, TestCaseFoldSuccess) {
@ -665,7 +682,7 @@ TEST_F(MindDataTestPipeline, TestCaseFoldSuccess) {
EXPECT_NE(ds, nullptr);
// Create casefold operation on ds
std::shared_ptr<TensorOperation> casefold = text::CaseFold();
std::shared_ptr<TensorTransform> casefold = std::make_shared<text::CaseFold>();
EXPECT_NE(casefold, nullptr);
// Create Map operation on ds
@ -711,7 +728,8 @@ TEST_F(MindDataTestPipeline, TestJiebaTokenizerSuccess) {
EXPECT_NE(ds, nullptr);
// Create jieba_tokenizer operation on ds
std::shared_ptr<TensorOperation> jieba_tokenizer = text::JiebaTokenizer(hmm_path, mp_path, JiebaMode::kMp);
std::shared_ptr<TensorTransform> jieba_tokenizer =
std::make_shared<text::JiebaTokenizer>(hmm_path, mp_path, JiebaMode::kMp);
EXPECT_NE(jieba_tokenizer, nullptr);
// Create Map operation on ds
@ -757,7 +775,8 @@ TEST_F(MindDataTestPipeline, TestJiebaTokenizerSuccess1) {
EXPECT_NE(ds, nullptr);
// Create jieba_tokenizer operation on ds
std::shared_ptr<TensorOperation> jieba_tokenizer = text::JiebaTokenizer(hmm_path, mp_path, JiebaMode::kHmm);
std::shared_ptr<TensorTransform> jieba_tokenizer =
std::make_shared<text::JiebaTokenizer>(hmm_path, mp_path, JiebaMode::kHmm);
EXPECT_NE(jieba_tokenizer, nullptr);
// Create Map operation on ds
@ -803,7 +822,8 @@ TEST_F(MindDataTestPipeline, TestJiebaTokenizerSuccess2) {
EXPECT_NE(ds, nullptr);
// Create jieba_tokenizer operation on ds
std::shared_ptr<TensorOperation> jieba_tokenizer = text::JiebaTokenizer(hmm_path, mp_path, JiebaMode::kMp, true);
std::shared_ptr<TensorTransform> jieba_tokenizer =
std::make_shared<text::JiebaTokenizer>(hmm_path, mp_path, JiebaMode::kMp, true);
EXPECT_NE(jieba_tokenizer, nullptr);
// Create Map operation on ds
@ -849,32 +869,106 @@ TEST_F(MindDataTestPipeline, TestJiebaTokenizerSuccess2) {
iter->Stop();
}
TEST_F(MindDataTestPipeline, TestJiebaTokenizerFail) {
TEST_F(MindDataTestPipeline, TestJiebaTokenizerFail1) {
// Testing the incorrect parameter of JiebaTokenizer interface.
MS_LOG(INFO) << "Doing MindDataTestPipeline-TestJiebaTokenizerFail.";
MS_LOG(INFO) << "Doing MindDataTestPipeline-TestJiebaTokenizerFail1.";
// Create a TextFile dataset
std::string data_file = datasets_root_path_ + "/testJiebaDataset/3.txt";
std::string hmm_path = datasets_root_path_ + "/jiebadict/hmm_model.utf8";
std::string mp_path = datasets_root_path_ + "/jiebadict/jieba.dict.utf8";
std::shared_ptr<Dataset> ds = TextFile({data_file}, 0, ShuffleMode::kFalse);
EXPECT_NE(ds, nullptr);
// Create jieba_tokenizer operation on ds
// Testing the parameter hmm_path is empty
std::shared_ptr<TensorOperation> jieba_tokenizer = text::JiebaTokenizer("", mp_path, JiebaMode::kMp);
EXPECT_EQ(jieba_tokenizer, nullptr);
std::shared_ptr<TensorTransform> jieba_tokenizer =
std::make_shared<text::JiebaTokenizer>("", mp_path, JiebaMode::kMp);
EXPECT_NE(jieba_tokenizer, nullptr);
// Create a Map operation on ds
ds = ds->Map({jieba_tokenizer});
EXPECT_NE(ds, nullptr);
std::shared_ptr<Iterator> iter = ds->CreateIterator();
// Expect failure: invalid JiebaTokenizer input (parameter hmm_path is empty)
EXPECT_EQ(iter, nullptr);
}
TEST_F(MindDataTestPipeline, TestJiebaTokenizerFail2) {
// Testing the incorrect parameter of JiebaTokenizer interface.
MS_LOG(INFO) << "Doing MindDataTestPipeline-TestJiebaTokenizerFail2.";
// Create a TextFile dataset
std::string data_file = datasets_root_path_ + "/testJiebaDataset/3.txt";
std::string hmm_path = datasets_root_path_ + "/jiebadict/hmm_model.utf8";
std::shared_ptr<Dataset> ds = TextFile({data_file}, 0, ShuffleMode::kFalse);
EXPECT_NE(ds, nullptr);
// Create jieba_tokenizer operation on ds
// Testing the parameter mp_path is empty
std::shared_ptr<TensorOperation> jieba_tokenizer1 = text::JiebaTokenizer(hmm_path, "", JiebaMode::kMp);
EXPECT_EQ(jieba_tokenizer1, nullptr);
// Testing the parameter hmm_path is invalid path
std::shared_ptr<TensorTransform> jieba_tokenizer =
std::make_shared<text::JiebaTokenizer>(hmm_path, "", JiebaMode::kMp);
EXPECT_NE(jieba_tokenizer, nullptr);
// Create a Map operation on ds
ds = ds->Map({jieba_tokenizer});
EXPECT_NE(ds, nullptr);
std::shared_ptr<Iterator> iter = ds->CreateIterator();
// Expect failure: invalid JiebaTokenizer input (parameter mp_path is empty)
EXPECT_EQ(iter, nullptr);
}
TEST_F(MindDataTestPipeline, TestJiebaTokenizerFail3) {
// Testing the incorrect parameter of JiebaTokenizer interface.
MS_LOG(INFO) << "Doing MindDataTestPipeline-TestJiebaTokenizerFail3.";
// Create a TextFile dataset
std::string data_file = datasets_root_path_ + "/testJiebaDataset/3.txt";
std::string hmm_path_invalid = datasets_root_path_ + "/jiebadict/1.txt";
std::shared_ptr<TensorOperation> jieba_tokenizer2 = text::JiebaTokenizer(hmm_path_invalid, mp_path, JiebaMode::kMp);
EXPECT_EQ(jieba_tokenizer2, nullptr);
// Testing the parameter mp_path is invalid path
std::string mp_path = datasets_root_path_ + "/jiebadict/jieba.dict.utf8";
std::shared_ptr<Dataset> ds = TextFile({data_file}, 0, ShuffleMode::kFalse);
EXPECT_NE(ds, nullptr);
// Create jieba_tokenizer operation on ds
// Testing the parameter hmm_path is invalid path
std::shared_ptr<TensorTransform> jieba_tokenizer =
std::make_shared<text::JiebaTokenizer>(hmm_path_invalid, mp_path, JiebaMode::kMp);
EXPECT_NE(jieba_tokenizer, nullptr);
// Create a Map operation on ds
ds = ds->Map({jieba_tokenizer});
EXPECT_NE(ds, nullptr);
std::shared_ptr<Iterator> iter = ds->CreateIterator();
// Expect failure: invalid JiebaTokenizer input (parameter hmm_path is invalid path)
EXPECT_EQ(iter, nullptr);
}
TEST_F(MindDataTestPipeline, TestJiebaTokenizerFail4) {
// Testing the incorrect parameter of JiebaTokenizer interface.
MS_LOG(INFO) << "Doing MindDataTestPipeline-TestJiebaTokenizerFail4.";
// Create a TextFile dataset
std::string data_file = datasets_root_path_ + "/testJiebaDataset/3.txt";
std::string hmm_path = datasets_root_path_ + "/jiebadict/hmm_model.utf8";
std::string mp_path_invalid = datasets_root_path_ + "/jiebadict/1.txt";
std::shared_ptr<TensorOperation> jieba_tokenizer3 = text::JiebaTokenizer(hmm_path, mp_path_invalid, JiebaMode::kMp);
EXPECT_EQ(jieba_tokenizer3, nullptr);
std::shared_ptr<Dataset> ds = TextFile({data_file}, 0, ShuffleMode::kFalse);
EXPECT_NE(ds, nullptr);
// Create jieba_tokenizer operation on ds
// Testing the parameter mp_path is invalid path
std::shared_ptr<TensorTransform> jieba_tokenizer =
std::make_shared<text::JiebaTokenizer>(hmm_path, mp_path_invalid, JiebaMode::kMp);
EXPECT_NE(jieba_tokenizer, nullptr);
// Create a Map operation on ds
ds = ds->Map({jieba_tokenizer});
EXPECT_NE(ds, nullptr);
std::shared_ptr<Iterator> iter = ds->CreateIterator();
// Expect failure: invalid JiebaTokenizer input (parameter mp_path is invalid path)
EXPECT_EQ(iter, nullptr);
}
TEST_F(MindDataTestPipeline, TestJiebaTokenizerAddWord) {
@ -889,8 +983,8 @@ TEST_F(MindDataTestPipeline, TestJiebaTokenizerAddWord) {
EXPECT_NE(ds, nullptr);
// Create jieba_tokenizer operation on ds
std::shared_ptr<text::JiebaTokenizerOperation> jieba_tokenizer =
text::JiebaTokenizer(hmm_path, mp_path, JiebaMode::kMp);
std::shared_ptr<text::JiebaTokenizer> jieba_tokenizer =
std::make_shared<text::JiebaTokenizer>(hmm_path, mp_path, JiebaMode::kMp);
EXPECT_NE(jieba_tokenizer, nullptr);
// Add word with freq not provided (default 0)
@ -939,8 +1033,8 @@ TEST_F(MindDataTestPipeline, TestJiebaTokenizerAddWord1) {
EXPECT_NE(ds, nullptr);
// Create jieba_tokenizer operation on ds
std::shared_ptr<text::JiebaTokenizerOperation> jieba_tokenizer =
text::JiebaTokenizer(hmm_path, mp_path, JiebaMode::kMp);
std::shared_ptr<text::JiebaTokenizer> jieba_tokenizer =
std::make_shared<text::JiebaTokenizer>(hmm_path, mp_path, JiebaMode::kMp);
EXPECT_NE(jieba_tokenizer, nullptr);
// Add word with freq is set explicitly to 0
@ -989,8 +1083,8 @@ TEST_F(MindDataTestPipeline, TestJiebaTokenizerAddWord2) {
EXPECT_NE(ds, nullptr);
// Create jieba_tokenizer operation on ds
std::shared_ptr<text::JiebaTokenizerOperation> jieba_tokenizer =
text::JiebaTokenizer(hmm_path, mp_path, JiebaMode::kMp);
std::shared_ptr<text::JiebaTokenizer> jieba_tokenizer =
std::make_shared<text::JiebaTokenizer>(hmm_path, mp_path, JiebaMode::kMp);
EXPECT_NE(jieba_tokenizer, nullptr);
// Add word with freq 10
@ -1039,8 +1133,8 @@ TEST_F(MindDataTestPipeline, TestJiebaTokenizerAddWord3) {
EXPECT_NE(ds, nullptr);
// Create jieba_tokenizer operation on ds
std::shared_ptr<text::JiebaTokenizerOperation> jieba_tokenizer =
text::JiebaTokenizer(hmm_path, mp_path, JiebaMode::kMp);
std::shared_ptr<text::JiebaTokenizer> jieba_tokenizer =
std::make_shared<text::JiebaTokenizer>(hmm_path, mp_path, JiebaMode::kMp);
EXPECT_NE(jieba_tokenizer, nullptr);
// Add word with freq 20000
@ -1089,13 +1183,13 @@ TEST_F(MindDataTestPipeline, TestJiebaTokenizerAddWordFail) {
EXPECT_NE(ds, nullptr);
// Testing the parameter word of AddWord is empty
std::shared_ptr<text::JiebaTokenizerOperation> jieba_tokenizer =
text::JiebaTokenizer(hmm_path, mp_path, JiebaMode::kMp);
std::shared_ptr<text::JiebaTokenizer> jieba_tokenizer =
std::make_shared<text::JiebaTokenizer>(hmm_path, mp_path, JiebaMode::kMp);
EXPECT_NE(jieba_tokenizer, nullptr);
EXPECT_NE(jieba_tokenizer->AddWord("", 10), Status::OK());
// Testing the parameter freq of AddWord is negative
std::shared_ptr<text::JiebaTokenizerOperation> jieba_tokenizer1 =
text::JiebaTokenizer(hmm_path, mp_path, JiebaMode::kMp);
std::shared_ptr<text::JiebaTokenizer> jieba_tokenizer1 =
std::make_shared<text::JiebaTokenizer>(hmm_path, mp_path, JiebaMode::kMp);
EXPECT_NE(jieba_tokenizer1, nullptr);
EXPECT_NE(jieba_tokenizer1->AddWord("我们", -1), Status::OK());
}
@ -1110,10 +1204,10 @@ TEST_F(MindDataTestPipeline, TestSlidingWindowSuccess) {
EXPECT_NE(ds, nullptr);
// Create white_tokenizer operation on ds
std::shared_ptr<TensorOperation> white_tokenizer = text::WhitespaceTokenizer();
std::shared_ptr<TensorTransform> white_tokenizer = std::make_shared<text::WhitespaceTokenizer>();
EXPECT_NE(white_tokenizer, nullptr);
// Create sliding_window operation on ds
std::shared_ptr<TensorOperation> sliding_window = text::SlidingWindow(3, 0);
std::shared_ptr<TensorTransform> sliding_window = std::make_shared<text::SlidingWindow>(3, 0);
EXPECT_NE(sliding_window, nullptr);
// Create Map operation on ds
@ -1160,10 +1254,10 @@ TEST_F(MindDataTestPipeline, TestSlidingWindowSuccess1) {
EXPECT_NE(ds, nullptr);
// Create white_tokenizer operation on ds
std::shared_ptr<TensorOperation> white_tokenizer = text::WhitespaceTokenizer();
std::shared_ptr<TensorTransform> white_tokenizer = std::make_shared<text::WhitespaceTokenizer>();
EXPECT_NE(white_tokenizer, nullptr);
// Create sliding_window operation on ds
std::shared_ptr<TensorOperation> sliding_window = text::SlidingWindow(2, -1);
std::shared_ptr<TensorTransform> sliding_window = std::make_shared<text::SlidingWindow>(2, -1);
EXPECT_NE(sliding_window, nullptr);
// Create Map operation on ds
@ -1199,9 +1293,9 @@ TEST_F(MindDataTestPipeline, TestSlidingWindowSuccess1) {
iter->Stop();
}
TEST_F(MindDataTestPipeline, TestSlidingWindowFail) {
TEST_F(MindDataTestPipeline, TestSlidingWindowFail1) {
// Testing the incorrect parameter of SlidingWindow interface.
MS_LOG(INFO) << "Doing MindDataTestPipeline-TestSlidingWindowFail.";
MS_LOG(INFO) << "Doing MindDataTestPipeline-TestSlidingWindowFail1.";
// Create a TextFile dataset
std::string data_file = datasets_root_path_ + "/testTextFileDataset/1.txt";
@ -1211,12 +1305,40 @@ TEST_F(MindDataTestPipeline, TestSlidingWindowFail) {
// Create sliding_window operation on ds
// Testing the parameter width less than or equal to 0
// The parameter axis support 0 or -1 only for now
std::shared_ptr<TensorOperation> sliding_window = text::SlidingWindow(0, 0);
EXPECT_EQ(sliding_window, nullptr);
std::shared_ptr<TensorTransform> sliding_window = std::make_shared<text::SlidingWindow>(0, 0);
EXPECT_NE(sliding_window, nullptr);
// Create a Map operation on ds
ds = ds->Map({sliding_window});
EXPECT_NE(ds, nullptr);
std::shared_ptr<Iterator> iter = ds->CreateIterator();
// Expect failure: invalid SlidingWindow input (width less than or equal to 0)
EXPECT_EQ(iter, nullptr);
}
TEST_F(MindDataTestPipeline, TestSlidingWindowFail2) {
// Testing the incorrect parameter of SlidingWindow interface.
MS_LOG(INFO) << "Doing MindDataTestPipeline-TestSlidingWindowFail2.";
// Create a TextFile dataset
std::string data_file = datasets_root_path_ + "/testTextFileDataset/1.txt";
std::shared_ptr<Dataset> ds = TextFile({data_file}, 0, ShuffleMode::kFalse);
EXPECT_NE(ds, nullptr);
// Create sliding_window operation on ds
// Testing the parameter width less than or equal to 0
// The parameter axis support 0 or -1 only for now
std::shared_ptr<TensorOperation> sliding_window1 = text::SlidingWindow(-2, 0);
EXPECT_EQ(sliding_window1, nullptr);
std::shared_ptr<TensorTransform> sliding_window = std::make_shared<text::SlidingWindow>(-2, 0);
EXPECT_NE(sliding_window, nullptr);
// Create a Map operation on ds
ds = ds->Map({sliding_window});
EXPECT_NE(ds, nullptr);
std::shared_ptr<Iterator> iter = ds->CreateIterator();
// Expect failure: invalid SlidingWindow input (width less than or equal to 0)
EXPECT_EQ(iter, nullptr);
}
TEST_F(MindDataTestPipeline, TestToNumberSuccess1) {
@ -1234,7 +1356,7 @@ TEST_F(MindDataTestPipeline, TestToNumberSuccess1) {
EXPECT_NE(ds, nullptr);
// Create ToNumber operation on ds
std::shared_ptr<TensorOperation> to_number = text::ToNumber("int64");
std::shared_ptr<TensorTransform> to_number = std::make_shared<text::ToNumber>("int64");
EXPECT_NE(to_number, nullptr);
// Create a Map operation on ds
@ -1287,7 +1409,7 @@ TEST_F(MindDataTestPipeline, TestToNumberSuccess2) {
EXPECT_NE(ds, nullptr);
// Create ToNumber operation on ds
std::shared_ptr<TensorOperation> to_number = text::ToNumber("float64");
std::shared_ptr<TensorTransform> to_number = std::make_shared<text::ToNumber>("float64");
EXPECT_NE(to_number, nullptr);
// Create a Map operation on ds
@ -1340,7 +1462,7 @@ TEST_F(MindDataTestPipeline, TestToNumberFail1) {
EXPECT_NE(ds, nullptr);
// Create ToNumber operation on ds
std::shared_ptr<TensorOperation> to_number = text::ToNumber("int8");
std::shared_ptr<TensorTransform> to_number = std::make_shared<text::ToNumber>("int8");
EXPECT_NE(to_number, nullptr);
// Create a Map operation on ds
@ -1390,7 +1512,7 @@ TEST_F(MindDataTestPipeline, TestToNumberFail2) {
EXPECT_NE(ds, nullptr);
// Create ToNumber operation on ds
std::shared_ptr<TensorOperation> to_number = text::ToNumber("float16");
std::shared_ptr<TensorTransform> to_number = std::make_shared<text::ToNumber>("float16");
EXPECT_NE(to_number, nullptr);
// Create a Map operation on ds
@ -1436,7 +1558,7 @@ TEST_F(MindDataTestPipeline, TestToNumberFail3) {
EXPECT_NE(ds, nullptr);
// Create ToNumber operation on ds
std::shared_ptr<TensorOperation> to_number = text::ToNumber("int64");
std::shared_ptr<TensorTransform> to_number = std::make_shared<text::ToNumber>("int64");
EXPECT_NE(to_number, nullptr);
// Create a Map operation on ds
@ -1478,16 +1600,39 @@ TEST_F(MindDataTestPipeline, TestToNumberFail4) {
EXPECT_NE(ds, nullptr);
// Create ToNumber operation on ds
std::shared_ptr<TensorOperation> to_number1 = text::ToNumber("string");
std::shared_ptr<TensorTransform> to_number = std::make_shared<text::ToNumber>("string");
EXPECT_NE(to_number, nullptr);
// Create a Map operation on ds
ds = ds->Map({to_number}, {"text"});
EXPECT_NE(ds, nullptr);
std::shared_ptr<Iterator> iter = ds->CreateIterator();
// Expect failure: invalid parameter with non numerical data type
EXPECT_EQ(to_number1, nullptr);
EXPECT_EQ(iter, nullptr);
}
TEST_F(MindDataTestPipeline, TestToNumberFail5) {
MS_LOG(INFO) << "Doing MindDataTestPipeline-TestToNumberFail5.";
// Test ToNumber with non numerical data type
std::string data_file = datasets_root_path_ + "/testTokenizerData/to_number.txt";
// Create a TextFile dataset
std::shared_ptr<Dataset> ds = TextFile({data_file}, 0, ShuffleMode::kFalse);
EXPECT_NE(ds, nullptr);
// Create ToNumber operation on ds
std::shared_ptr<TensorOperation> to_number2 = text::ToNumber("bool");
std::shared_ptr<TensorTransform> to_number = std::make_shared<text::ToNumber>("bool");
EXPECT_NE(to_number, nullptr);
// Create a Map operation on ds
ds = ds->Map({to_number}, {"text"});
EXPECT_NE(ds, nullptr);
std::shared_ptr<Iterator> iter = ds->CreateIterator();
// Expect failure: invalid parameter with non numerical data type
EXPECT_EQ(to_number2, nullptr);
EXPECT_EQ(iter, nullptr);
}
TEST_F(MindDataTestPipeline, TestTruncateSequencePairSuccess1) {
@ -1512,7 +1657,7 @@ TEST_F(MindDataTestPipeline, TestTruncateSequencePairSuccess1) {
EXPECT_NE(ds, nullptr);
// Create a truncate_sequence_pair operation on ds
std::shared_ptr<TensorOperation> truncate_sequence_pair = text::TruncateSequencePair(4);
std::shared_ptr<TensorTransform> truncate_sequence_pair = std::make_shared<text::TruncateSequencePair>(4);
EXPECT_NE(truncate_sequence_pair, nullptr);
// Create Map operation on ds
@ -1580,7 +1725,7 @@ TEST_F(MindDataTestPipeline, TestTruncateSequencePairSuccess2) {
EXPECT_NE(ds, nullptr);
// Create a truncate_sequence_pair operation on ds
std::shared_ptr<TensorOperation> truncate_sequence_pair = text::TruncateSequencePair(5);
std::shared_ptr<TensorTransform> truncate_sequence_pair = std::make_shared<text::TruncateSequencePair>(5);
EXPECT_NE(truncate_sequence_pair, nullptr);
// Create Map operation on ds
@ -1641,10 +1786,16 @@ TEST_F(MindDataTestPipeline, TestTruncateSequencePairFail) {
EXPECT_NE(ds, nullptr);
// Create a truncate_sequence_pair operation on ds
std::shared_ptr<TensorOperation> truncate_sequence_pair = text::TruncateSequencePair(-1);
std::shared_ptr<TensorTransform> truncate_sequence_pair = std::make_shared<text::TruncateSequencePair>(-1);
EXPECT_NE(truncate_sequence_pair, nullptr);
// Expect failure: invalid parameter with negative max_length
EXPECT_EQ(truncate_sequence_pair, nullptr);
// Create a Map operation on ds
ds = ds->Map({truncate_sequence_pair});
EXPECT_NE(ds, nullptr);
std::shared_ptr<Iterator> iter = ds->CreateIterator();
// Expect failure: invalid TruncateSequencePair input (invalid parameter with negative max_length)
EXPECT_EQ(iter, nullptr);
}
TEST_F(MindDataTestPipeline, TestNgramSuccess) {
@ -1657,10 +1808,10 @@ TEST_F(MindDataTestPipeline, TestNgramSuccess) {
EXPECT_NE(ds, nullptr);
// Create white_tokenizer operation on ds
std::shared_ptr<TensorOperation> white_tokenizer = text::WhitespaceTokenizer();
std::shared_ptr<TensorTransform> white_tokenizer = std::make_shared<text::WhitespaceTokenizer>();
EXPECT_NE(white_tokenizer, nullptr);
// Create sliding_window operation on ds
std::shared_ptr<TensorOperation> ngram_op = text::Ngram({2}, {"_", 1}, {"_", 1}, " ");
std::shared_ptr<TensorTransform> ngram_op(new text::Ngram({2}, {"_", 1}, {"_", 1}, " "));
EXPECT_NE(ngram_op, nullptr);
// Create Map operation on ds
@ -1707,10 +1858,10 @@ TEST_F(MindDataTestPipeline, TestNgramSuccess1) {
EXPECT_NE(ds, nullptr);
// Create white_tokenizer operation on ds
std::shared_ptr<TensorOperation> white_tokenizer = text::WhitespaceTokenizer();
std::shared_ptr<TensorTransform> white_tokenizer = std::make_shared<text::WhitespaceTokenizer>();
EXPECT_NE(white_tokenizer, nullptr);
// Create sliding_window operation on ds
std::shared_ptr<TensorOperation> ngram_op = text::Ngram({2, 3}, {"&", 2}, {"&", 2}, "-");
std::shared_ptr<TensorTransform> ngram_op(new text::Ngram({2, 3}, {"&", 2}, {"&", 2}, "-"));
EXPECT_NE(ngram_op, nullptr);
// Create Map operation on ds
@ -1752,9 +1903,9 @@ TEST_F(MindDataTestPipeline, TestNgramSuccess1) {
iter->Stop();
}
TEST_F(MindDataTestPipeline, TestNgramFail) {
TEST_F(MindDataTestPipeline, TestNgramFail1) {
// Testing the incorrect parameter of Ngram interface.
MS_LOG(INFO) << "Doing MindDataTestPipeline-TestNgramFail.";
MS_LOG(INFO) << "Doing MindDataTestPipeline-TestNgramFail1.";
// Create a TextFile dataset
std::string data_file = datasets_root_path_ + "/testTextFileDataset/1.txt";
@ -1763,31 +1914,108 @@ TEST_F(MindDataTestPipeline, TestNgramFail) {
// Create sliding_window operation on ds
// Testing the vector of ngram is empty
std::shared_ptr<TensorOperation> ngram_op = text::Ngram({});
EXPECT_EQ(ngram_op, nullptr);
// Testing the value of ngrams vector less than and equal to 0
std::shared_ptr<TensorOperation> ngram_op1 = text::Ngram({0});
EXPECT_EQ(ngram_op1, nullptr);
// Testing the value of ngrams vector less than and equal to 0
std::shared_ptr<TensorOperation> ngram_op2 = text::Ngram({-2});
EXPECT_EQ(ngram_op2, nullptr);
// Testing the second parameter pad_width in left_pad vector less than 0
std::shared_ptr<TensorOperation> ngram_op3 = text::Ngram({2}, {"", -1});
EXPECT_EQ(ngram_op3, nullptr);
// Testing the second parameter pad_width in right_pad vector less than 0
std::shared_ptr<TensorOperation> ngram_op4 = text::Ngram({2}, {"", 1}, {"", -1});
EXPECT_EQ(ngram_op4, nullptr);
std::shared_ptr<TensorTransform> ngram_op(new text::Ngram({}));
EXPECT_NE(ngram_op, nullptr);
// Create a Map operation on ds
ds = ds->Map({ngram_op});
EXPECT_NE(ds, nullptr);
std::shared_ptr<Iterator> iter = ds->CreateIterator();
// Expect failure: invalid Ngram input (the vector of ngram is empty)
EXPECT_EQ(iter, nullptr);
}
TEST_F(MindDataTestPipeline, TestTextOperationName) {
MS_LOG(INFO) << "Doing MindDataTestPipeline-TestTextOperationName.";
TEST_F(MindDataTestPipeline, TestNgramFail2) {
// Testing the incorrect parameter of Ngram interface.
MS_LOG(INFO) << "Doing MindDataTestPipeline-TestNgramFail2.";
// Create object for the tensor op, and check the name
std::string data_file = datasets_root_path_ + "/testVocab/words.txt";
std::shared_ptr<TensorOperation> sentence_piece_tokenizer_op =
text::SentencePieceTokenizer(data_file, SPieceTokenizerOutType::kString);
std::string correct_name = "SentencepieceTokenizer";
EXPECT_EQ(correct_name, sentence_piece_tokenizer_op->Name());
// Create a TextFile dataset
std::string data_file = datasets_root_path_ + "/testTextFileDataset/1.txt";
std::shared_ptr<Dataset> ds = TextFile({data_file}, 0, ShuffleMode::kFalse);
EXPECT_NE(ds, nullptr);
// Create sliding_window operation on ds
// Testing the value of ngrams vector less than and equal to 0
std::shared_ptr<TensorTransform> ngram_op(new text::Ngram({0}));
EXPECT_NE(ngram_op, nullptr);
// Create a Map operation on ds
ds = ds->Map({ngram_op});
EXPECT_NE(ds, nullptr);
std::shared_ptr<Iterator> iter = ds->CreateIterator();
// Expect failure: invalid Ngram input (the value of ngrams vector less than and equal to 0)
EXPECT_EQ(iter, nullptr);
}
TEST_F(MindDataTestPipeline, TestNgramFail3) {
// Testing the incorrect parameter of Ngram interface.
MS_LOG(INFO) << "Doing MindDataTestPipeline-TestNgramFail3.";
// Create a TextFile dataset
std::string data_file = datasets_root_path_ + "/testTextFileDataset/1.txt";
std::shared_ptr<Dataset> ds = TextFile({data_file}, 0, ShuffleMode::kFalse);
EXPECT_NE(ds, nullptr);
// Create sliding_window operation on ds
// Testing the value of ngrams vector less than and equal to 0
std::shared_ptr<TensorTransform> ngram_op(new text::Ngram({-2}));
EXPECT_NE(ngram_op, nullptr);
// Create a Map operation on ds
ds = ds->Map({ngram_op});
EXPECT_NE(ds, nullptr);
std::shared_ptr<Iterator> iter = ds->CreateIterator();
// Expect failure: invalid Ngram input (the value of ngrams vector less than and equal to 0)
EXPECT_EQ(iter, nullptr);
}
TEST_F(MindDataTestPipeline, TestNgramFail4) {
// Testing the incorrect parameter of Ngram interface.
MS_LOG(INFO) << "Doing MindDataTestPipeline-TestNgramFail4.";
// Create a TextFile dataset
std::string data_file = datasets_root_path_ + "/testTextFileDataset/1.txt";
std::shared_ptr<Dataset> ds = TextFile({data_file}, 0, ShuffleMode::kFalse);
EXPECT_NE(ds, nullptr);
// Create sliding_window operation on ds
// Testing the second parameter pad_width in left_pad vector less than 0
std::shared_ptr<TensorTransform> ngram_op(new text::Ngram({2}, {"", -1}));
EXPECT_NE(ngram_op, nullptr);
// Create a Map operation on ds
ds = ds->Map({ngram_op});
EXPECT_NE(ds, nullptr);
std::shared_ptr<Iterator> iter = ds->CreateIterator();
// Expect failure: invalid Ngram input (the second parameter pad_width in left_pad vector less than 0)
EXPECT_EQ(iter, nullptr);
}
TEST_F(MindDataTestPipeline, TestNgramFail5) {
// Testing the incorrect parameter of Ngram interface.
MS_LOG(INFO) << "Doing MindDataTestPipeline-TestNgramFail5.";
// Create a TextFile dataset
std::string data_file = datasets_root_path_ + "/testTextFileDataset/1.txt";
std::shared_ptr<Dataset> ds = TextFile({data_file}, 0, ShuffleMode::kFalse);
EXPECT_NE(ds, nullptr);
// Create sliding_window operation on ds
// Testing the second parameter pad_width in right_pad vector less than 0
std::shared_ptr<TensorTransform> ngram_op(new text::Ngram({2}, {"", 1}, {"", -1}));
EXPECT_NE(ngram_op, nullptr);
// Create a Map operation on ds
ds = ds->Map({ngram_op});
EXPECT_NE(ds, nullptr);
std::shared_ptr<Iterator> iter = ds->CreateIterator();
// Expect failure: invalid Ngram input (the second parameter pad_width in left_pad vector less than 0)
EXPECT_EQ(iter, nullptr);
}
TEST_F(MindDataTestPipeline, TestNormalizeUTF8Success) {
@ -1800,7 +2028,7 @@ TEST_F(MindDataTestPipeline, TestNormalizeUTF8Success) {
EXPECT_NE(ds, nullptr);
// Create normalizeutf8 operation on ds
std::shared_ptr<TensorOperation> normalizeutf8 = text::NormalizeUTF8(NormalizeForm::kNfkc);
std::shared_ptr<TensorTransform> normalizeutf8 = std::make_shared<text::NormalizeUTF8>(NormalizeForm::kNfkc);
EXPECT_NE(normalizeutf8, nullptr);
// Create Map operation on ds
@ -1844,7 +2072,7 @@ TEST_F(MindDataTestPipeline, TestNormalizeUTF8Success1) {
EXPECT_NE(ds, nullptr);
// Create normalizeutf8 operation on ds
std::shared_ptr<TensorOperation> normalizeutf8 = text::NormalizeUTF8(NormalizeForm::kNfc);
std::shared_ptr<TensorTransform> normalizeutf8 = std::make_shared<text::NormalizeUTF8>(NormalizeForm::kNfc);
EXPECT_NE(normalizeutf8, nullptr);
// Create Map operation on ds
@ -1888,7 +2116,7 @@ TEST_F(MindDataTestPipeline, TestNormalizeUTF8Success2) {
EXPECT_NE(ds, nullptr);
// Create normalizeutf8 operation on ds
std::shared_ptr<TensorOperation> normalizeutf8 = text::NormalizeUTF8(NormalizeForm::kNfd);
std::shared_ptr<TensorTransform> normalizeutf8 = std::make_shared<text::NormalizeUTF8>(NormalizeForm::kNfd);
EXPECT_NE(normalizeutf8, nullptr);
// Create Map operation on ds
@ -1932,7 +2160,7 @@ TEST_F(MindDataTestPipeline, TestNormalizeUTF8Success3) {
EXPECT_NE(ds, nullptr);
// Create normalizeutf8 operation on ds
std::shared_ptr<TensorOperation> normalizeutf8 = text::NormalizeUTF8(NormalizeForm::kNfkd);
std::shared_ptr<TensorTransform> normalizeutf8 = std::make_shared<text::NormalizeUTF8>(NormalizeForm::kNfkd);
EXPECT_NE(normalizeutf8, nullptr);
// Create Map operation on ds
@ -1976,7 +2204,7 @@ TEST_F(MindDataTestPipeline, TestRegexReplaceSuccess) {
EXPECT_NE(ds, nullptr);
// Create regex_replace operation on ds
std::shared_ptr<TensorOperation> regex_replace = text::RegexReplace("\\s+", "_", true);
std::shared_ptr<TensorTransform> regex_replace = std::make_shared<text::RegexReplace>("\\s+", "_", true);
EXPECT_NE(regex_replace, nullptr);
// Create Map operation on ds
@ -2021,7 +2249,7 @@ TEST_F(MindDataTestPipeline, TestRegexReplaceSuccess1) {
EXPECT_NE(ds, nullptr);
// Create regex_replace operation on ds
std::shared_ptr<TensorOperation> regex_replace = text::RegexReplace("\\s+", "_", false);
std::shared_ptr<TensorTransform> regex_replace = std::make_shared<text::RegexReplace>("\\s+", "_", false);
EXPECT_NE(regex_replace, nullptr);
// Create Map operation on ds
@ -2067,7 +2295,7 @@ TEST_F(MindDataTestPipeline, TestRegexTokenizerSuccess) {
EXPECT_NE(ds, nullptr);
// Create regex_tokenizer operation on ds
std::shared_ptr<TensorOperation> regex_tokenizer = text::RegexTokenizer("\\s+", "\\s+", false);
std::shared_ptr<TensorTransform> regex_tokenizer = std::make_shared<text::RegexTokenizer>("\\s+", "\\s+", false);
EXPECT_NE(regex_tokenizer, nullptr);
// Create Map operation on ds
@ -2119,7 +2347,7 @@ TEST_F(MindDataTestPipeline, TestRegexTokenizerSuccess1) {
EXPECT_NE(ds, nullptr);
// Create regex_tokenizer operation on ds
std::shared_ptr<TensorOperation> regex_tokenizer = text::RegexTokenizer("\\s+", "\\s+", true);
std::shared_ptr<TensorTransform> regex_tokenizer = std::make_shared<text::RegexTokenizer>("\\s+", "\\s+", true);
EXPECT_NE(regex_tokenizer, nullptr);
// Create Map operation on ds
@ -2186,7 +2414,7 @@ TEST_F(MindDataTestPipeline, TestUnicodeCharTokenizerSuccess) {
EXPECT_NE(ds, nullptr);
// Create unicodechar_tokenizer operation on ds
std::shared_ptr<TensorOperation> unicodechar_tokenizer = text::UnicodeCharTokenizer();
std::shared_ptr<TensorTransform> unicodechar_tokenizer = std::make_shared<text::UnicodeCharTokenizer>();
EXPECT_NE(unicodechar_tokenizer, nullptr);
// Create Map operation on ds
@ -2235,7 +2463,7 @@ TEST_F(MindDataTestPipeline, TestUnicodeCharTokenizerSuccess1) {
EXPECT_NE(ds, nullptr);
// Create unicodechar_tokenizer operation on ds
std::shared_ptr<TensorOperation> unicodechar_tokenizer = text::UnicodeCharTokenizer(true);
std::shared_ptr<TensorTransform> unicodechar_tokenizer = std::make_shared<text::UnicodeCharTokenizer>(true);
EXPECT_NE(unicodechar_tokenizer, nullptr);
// Create Map operation on ds
@ -2305,7 +2533,7 @@ TEST_F(MindDataTestPipeline, TestUnicodeScriptTokenizerSuccess) {
EXPECT_NE(ds, nullptr);
// Create unicodescript_tokenizer operation on ds
std::shared_ptr<TensorOperation> unicodescript_tokenizer = text::UnicodeScriptTokenizer();
std::shared_ptr<TensorTransform> unicodescript_tokenizer = std::make_shared<text::UnicodeScriptTokenizer>();
EXPECT_NE(unicodescript_tokenizer, nullptr);
// Create Map operation on ds
@ -2352,7 +2580,7 @@ TEST_F(MindDataTestPipeline, TestUnicodeScriptTokenizerSuccess1) {
EXPECT_NE(ds, nullptr);
// Create unicodescript_tokenizer operation on ds
std::shared_ptr<TensorOperation> unicodescript_tokenizer = text::UnicodeScriptTokenizer(true);
std::shared_ptr<TensorTransform> unicodescript_tokenizer = std::make_shared<text::UnicodeScriptTokenizer>(true);
EXPECT_NE(unicodescript_tokenizer, nullptr);
// Create Map operation on ds
@ -2399,7 +2627,8 @@ TEST_F(MindDataTestPipeline, TestUnicodeScriptTokenizerSuccess2) {
EXPECT_NE(ds, nullptr);
// Create unicodescript_tokenizer operation on ds
std::shared_ptr<TensorOperation> unicodescript_tokenizer = text::UnicodeScriptTokenizer(false, true);
std::shared_ptr<TensorTransform> unicodescript_tokenizer =
std::make_shared<text::UnicodeScriptTokenizer>(false, true);
EXPECT_NE(unicodescript_tokenizer, nullptr);
// Create Map operation on ds
@ -2459,7 +2688,7 @@ TEST_F(MindDataTestPipeline, TestUnicodeScriptTokenizerSuccess3) {
EXPECT_NE(ds, nullptr);
// Create unicodescript_tokenizer operation on ds
std::shared_ptr<TensorOperation> unicodescript_tokenizer = text::UnicodeScriptTokenizer(true, true);
std::shared_ptr<TensorTransform> unicodescript_tokenizer = std::make_shared<text::UnicodeScriptTokenizer>(true, true);
EXPECT_NE(unicodescript_tokenizer, nullptr);
// Create Map operation on ds
@ -2518,7 +2747,7 @@ TEST_F(MindDataTestPipeline, TestWhitespaceTokenizerSuccess) {
EXPECT_NE(ds, nullptr);
// Create white_tokenizer operation on ds
std::shared_ptr<TensorOperation> white_tokenizer = text::WhitespaceTokenizer();
std::shared_ptr<TensorTransform> white_tokenizer = std::make_shared<text::WhitespaceTokenizer>();
EXPECT_NE(white_tokenizer, nullptr);
// Create Map operation on ds
@ -2564,7 +2793,7 @@ TEST_F(MindDataTestPipeline, TestWhitespaceTokenizerSuccess1) {
EXPECT_NE(ds, nullptr);
// Create white_tokenizer operation on ds
std::shared_ptr<TensorOperation> white_tokenizer = text::WhitespaceTokenizer(true);
std::shared_ptr<TensorTransform> white_tokenizer = std::make_shared<text::WhitespaceTokenizer>(true);
EXPECT_NE(white_tokenizer, nullptr);
// Create Map operation on ds

View File

@ -50,7 +50,7 @@ TEST_F(MindDataTestPipeline, TestVocabLookupOp) {
EXPECT_EQ(s, Status::OK());
// Create Lookup operation on ds
std::shared_ptr<TensorOperation> lookup = text::Lookup(vocab, "<unk>", "int32");
std::shared_ptr<TensorTransform> lookup = std::make_shared<text::Lookup>(vocab, "<unk>", "int32");
EXPECT_NE(lookup, nullptr);
// Create Map operation on ds
@ -94,7 +94,7 @@ TEST_F(MindDataTestPipeline, TestVocabLookupOpEmptyString) {
EXPECT_EQ(s, Status::OK());
// Create Lookup operation on ds
std::shared_ptr<TensorOperation> lookup = text::Lookup(vocab, "", "int32");
std::shared_ptr<TensorTransform> lookup = std::make_shared<text::Lookup>(vocab, "", "int32");
EXPECT_NE(lookup, nullptr);
// Create Map operation on ds
@ -137,20 +137,39 @@ TEST_F(MindDataTestPipeline, TestVocabLookupOpFail1) {
EXPECT_EQ(s, Status::OK());
// Create lookup op for ds
// Expected failure: "<unk>" is not a word of vocab
std::shared_ptr<TensorOperation> lookup = text::Lookup(vocab, "<unk>", "int32");
EXPECT_EQ(lookup, nullptr);
std::shared_ptr<TensorTransform> lookup = std::make_shared<text::Lookup>(vocab, "<unk>", "int32");
EXPECT_NE(lookup, nullptr);
// Create a Map operation on ds
ds = ds->Map({lookup});
EXPECT_NE(ds, nullptr);
std::shared_ptr<Iterator> iter = ds->CreateIterator();
// Expect failure: invalid Lookup input ("<unk>" is not a word of vocab)
EXPECT_EQ(iter, nullptr);
}
TEST_F(MindDataTestPipeline, TestVocabLookupOpFail2) {
MS_LOG(INFO) << "Doing MindDataTestPipeline-TestVocabLookupOpFail2.";
// Create a TextFile Dataset
std::string data_file = datasets_root_path_ + "/testVocab/words.txt";
std::shared_ptr<Dataset> ds = TextFile({data_file}, 0, ShuffleMode::kFalse);
EXPECT_NE(ds, nullptr);
// Vocab has nothing
std::shared_ptr<Vocab> vocab;
// Create lookup op
// Expected failure: vocab is null
std::shared_ptr<TensorOperation> lookup = text::Lookup(vocab, "", "int32");
EXPECT_EQ(lookup, nullptr);
std::shared_ptr<TensorTransform> lookup = std::make_shared<text::Lookup>(vocab, "", "int32");
EXPECT_NE(lookup, nullptr);
// Create a Map operation on ds
ds = ds->Map({lookup});
EXPECT_NE(ds, nullptr);
std::shared_ptr<Iterator> iter = ds->CreateIterator();
// Expect failure: invalid Lookup input (vocab is null)
EXPECT_EQ(iter, nullptr);
}
TEST_F(MindDataTestPipeline, TestVocabFromDataset) {
@ -171,7 +190,7 @@ TEST_F(MindDataTestPipeline, TestVocabFromDataset) {
EXPECT_EQ(home_index, 4);
// Create Lookup operation on ds
std::shared_ptr<TensorOperation> lookup = text::Lookup(vocab, "<unk>", "int32");
std::shared_ptr<TensorTransform> lookup = std::make_shared<text::Lookup>(vocab, "<unk>", "int32");
EXPECT_NE(lookup, nullptr);
// Create Map operation on ds
@ -217,7 +236,7 @@ TEST_F(MindDataTestPipeline, TestVocabFromDatasetDefault) {
EXPECT_EQ(home_index, 2);
// Create Lookup operation on ds
std::shared_ptr<TensorOperation> lookup = text::Lookup(vocab, "home");
std::shared_ptr<TensorTransform> lookup = std::make_shared<text::Lookup>(vocab, "home");
EXPECT_NE(lookup, nullptr);
// Create Map operation on ds
@ -325,7 +344,7 @@ TEST_F(MindDataTestPipeline, TestVocabFromDatasetInt64) {
EXPECT_EQ(home_index, 2);
// Create Lookup operation on ds
std::shared_ptr<TensorOperation> lookup = text::Lookup(vocab, "home", "int64");
std::shared_ptr<TensorTransform> lookup = std::make_shared<text::Lookup>(vocab, "home", "int64");
EXPECT_NE(lookup, nullptr);
// Create Map operation on ds

View File

@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright 2020-2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -97,8 +97,7 @@ TEST_F(MindDataTestPipeline, TestDuplicateSuccess) {
EXPECT_NE(ds, nullptr);
// Create objects for the tensor ops
std::shared_ptr<TensorOperation> duplicate = transforms::Duplicate();
EXPECT_NE(duplicate, nullptr);
transforms::Duplicate duplicate = transforms::Duplicate();
// Create a Map operation on ds
ds = ds->Map({duplicate}, {"image"}, {"image", "image_copy"});
@ -151,7 +150,7 @@ TEST_F(MindDataTestPipeline, TestOneHotSuccess1) {
EXPECT_NE(ds, nullptr);
// Create objects for the tensor ops
std::shared_ptr<TensorOperation> one_hot_op = transforms::OneHot(number_of_classes);
std::shared_ptr<TensorTransform> one_hot_op = std::make_shared<transforms::OneHot>(number_of_classes);
EXPECT_NE(one_hot_op, nullptr);
// Create a Map operation on ds
@ -209,7 +208,7 @@ TEST_F(MindDataTestPipeline, TestOneHotSuccess2) {
EXPECT_NE(ds, nullptr);
// Create objects for the tensor ops
std::shared_ptr<TensorOperation> one_hot_op = transforms::OneHot(10);
std::shared_ptr<TensorTransform> one_hot_op = std::make_shared<transforms::OneHot>(10);
EXPECT_NE(one_hot_op, nullptr);
// Create a Map operation on ds
@ -246,16 +245,46 @@ TEST_F(MindDataTestPipeline, TestOneHotSuccess2) {
iter->Stop();
}
TEST_F(MindDataTestPipeline, TestOneHotFail) {
MS_LOG(INFO) << "Doing MindDataTestPipeline-TestOneHotFail with invalid params.";
TEST_F(MindDataTestPipeline, TestOneHotFail1) {
MS_LOG(INFO) << "Doing MindDataTestPipeline-TestOneHotFail1 with invalid params.";
// Create a Cifar10 Dataset
std::string folder_path = datasets_root_path_ + "/testCifar10Data/";
std::shared_ptr<Dataset> ds = Cifar10(folder_path, "all", RandomSampler(false, 10));
EXPECT_NE(ds, nullptr);
// incorrect num_class
std::shared_ptr<TensorOperation> one_hot_op1 = transforms::OneHot(0);
EXPECT_EQ(one_hot_op1, nullptr);
std::shared_ptr<TensorTransform> one_hot_op = std::make_shared<transforms::OneHot>(0);
EXPECT_NE(one_hot_op, nullptr);
// Create a Map operation on ds
ds = ds->Map({one_hot_op}, {"label"});
EXPECT_NE(ds, nullptr);
std::shared_ptr<Iterator> iter = ds->CreateIterator();
// Expect failure: invalid OneHot input
EXPECT_EQ(iter, nullptr);
}
TEST_F(MindDataTestPipeline, TestOneHotFail2) {
MS_LOG(INFO) << "Doing MindDataTestPipeline-TestOneHotFail2 with invalid params.";
// Create a Cifar10 Dataset
std::string folder_path = datasets_root_path_ + "/testCifar10Data/";
std::shared_ptr<Dataset> ds = Cifar10(folder_path, "all", RandomSampler(false, 10));
EXPECT_NE(ds, nullptr);
// incorrect num_class
std::shared_ptr<TensorOperation> one_hot_op2 = transforms::OneHot(-5);
EXPECT_EQ(one_hot_op2, nullptr);
std::shared_ptr<TensorTransform> one_hot_op = std::make_shared<transforms::OneHot>(-5);
EXPECT_NE(one_hot_op, nullptr);
// Create a Map operation on ds
ds = ds->Map({one_hot_op}, {"label"});
EXPECT_NE(ds, nullptr);
std::shared_ptr<Iterator> iter = ds->CreateIterator();
// Expect failure: invalid OneHot input
EXPECT_EQ(iter, nullptr);
}
TEST_F(MindDataTestPipeline, TestRandomApplySuccess) {
@ -379,15 +408,6 @@ TEST_F(MindDataTestPipeline, TestRandomChoiceFail) {
EXPECT_EQ(random_choice3, nullptr);
}
TEST_F(MindDataTestPipeline, TestTransformOperationName) {
MS_LOG(INFO) << "Doing MindDataTestPipeline-TestTransformOperationName.";
// Create object for the tensor op, and check the name
std::shared_ptr<TensorOperation> duplicate_op = transforms::Duplicate();
std::string correct_name = "Duplicate";
EXPECT_EQ(correct_name, duplicate_op->Name());
}
TEST_F(MindDataTestPipeline, TestTypeCastSuccess) {
MS_LOG(INFO) << "Doing MindDataTestPipeline-TestTypeCastSuccess.";
@ -415,7 +435,7 @@ TEST_F(MindDataTestPipeline, TestTypeCastSuccess) {
iter->Stop();
// Create objects for the tensor ops
std::shared_ptr<TensorOperation> type_cast = transforms::TypeCast("uint16");
std::shared_ptr<TensorTransform> type_cast = std::make_shared<transforms::TypeCast>("uint16");
EXPECT_NE(type_cast, nullptr);
// Create a Map operation on ds
@ -441,7 +461,20 @@ TEST_F(MindDataTestPipeline, TestTypeCastSuccess) {
TEST_F(MindDataTestPipeline, TestTypeCastFail) {
MS_LOG(INFO) << "Doing MindDataTestPipeline-TestTypeCastFail with invalid params.";
// Create a Cifar10 Dataset
std::string folder_path = datasets_root_path_ + "/testCifar10Data/";
std::shared_ptr<Dataset> ds = Cifar10(folder_path, "all", RandomSampler(false, 10));
EXPECT_NE(ds, nullptr);
// incorrect data type
std::shared_ptr<TensorOperation> type_cast = transforms::TypeCast("char");
EXPECT_EQ(type_cast, nullptr);
std::shared_ptr<TensorTransform> type_cast = std::make_shared<transforms::TypeCast>("char");
EXPECT_NE(type_cast, nullptr);
// Create a Map operation on ds
ds = ds->Map({type_cast}, {"image", "label"});
EXPECT_NE(ds, nullptr);
std::shared_ptr<Iterator> iter = ds->CreateIterator();
// Expect failure: invalid TypeCast input
EXPECT_EQ(iter, nullptr);
}

View File

@ -294,7 +294,7 @@ TEST_F(MindDataTestPipeline, TestCutMixBatchSuccess1) {
EXPECT_NE(ds, nullptr);
// Create objects for the tensor ops
std::shared_ptr<TensorOperation> one_hot_op = transforms::OneHot(number_of_classes);
std::shared_ptr<TensorTransform> one_hot_op = std::make_shared<transforms::OneHot>(number_of_classes);
EXPECT_NE(one_hot_op, nullptr);
// Create a Map operation on ds
@ -356,7 +356,7 @@ TEST_F(MindDataTestPipeline, TestCutMixBatchSuccess2) {
EXPECT_NE(ds, nullptr);
// Create objects for the tensor ops
std::shared_ptr<TensorOperation> one_hot_op = transforms::OneHot(number_of_classes);
std::shared_ptr<TensorTransform> one_hot_op = std::make_shared<transforms::OneHot>(number_of_classes);
EXPECT_NE(one_hot_op, nullptr);
// Create a Map operation on ds
@ -415,7 +415,7 @@ TEST_F(MindDataTestPipeline, TestCutMixBatchFail1) {
EXPECT_NE(ds, nullptr);
// Create objects for the tensor ops
std::shared_ptr<TensorOperation> one_hot_op = transforms::OneHot(10);
std::shared_ptr<TensorTransform> one_hot_op = std::make_shared<transforms::OneHot>(10);
EXPECT_NE(one_hot_op, nullptr);
// Create a Map operation on ds
@ -441,7 +441,7 @@ TEST_F(MindDataTestPipeline, TestCutMixBatchFail2) {
EXPECT_NE(ds, nullptr);
// Create objects for the tensor ops
std::shared_ptr<TensorOperation> one_hot_op = transforms::OneHot(10);
std::shared_ptr<TensorTransform> one_hot_op = std::make_shared<transforms::OneHot>(10);
EXPECT_NE(one_hot_op, nullptr);
// Create a Map operation on ds
@ -467,7 +467,7 @@ TEST_F(MindDataTestPipeline, TestCutMixBatchFail3) {
EXPECT_NE(ds, nullptr);
// Create objects for the tensor ops
std::shared_ptr<TensorOperation> one_hot_op = transforms::OneHot(10);
std::shared_ptr<TensorTransform> one_hot_op = std::make_shared<transforms::OneHot>(10);
EXPECT_NE(one_hot_op, nullptr);
// Create a Map operation on ds
@ -493,7 +493,7 @@ TEST_F(MindDataTestPipeline, TestCutMixBatchFail4) {
EXPECT_NE(ds, nullptr);
// Create objects for the tensor ops
std::shared_ptr<TensorOperation> one_hot_op = transforms::OneHot(10);
std::shared_ptr<TensorTransform> one_hot_op = std::make_shared<transforms::OneHot>(10);
EXPECT_NE(one_hot_op, nullptr);
// Create a Map operation on ds
@ -733,7 +733,7 @@ TEST_F(MindDataTestPipeline, TestMixUpBatchFail1) {
EXPECT_NE(ds, nullptr);
// Create objects for the tensor ops
std::shared_ptr<TensorOperation> one_hot_op = transforms::OneHot(10);
std::shared_ptr<TensorTransform> one_hot_op = std::make_shared<transforms::OneHot>(10);
EXPECT_NE(one_hot_op, nullptr);
// Create a Map operation on ds
@ -758,7 +758,7 @@ TEST_F(MindDataTestPipeline, TestMixUpBatchFail2) {
EXPECT_NE(ds, nullptr);
// Create objects for the tensor ops
std::shared_ptr<TensorOperation> one_hot_op = transforms::OneHot(10);
std::shared_ptr<TensorTransform> one_hot_op = std::make_shared<transforms::OneHot>(10);
EXPECT_NE(one_hot_op, nullptr);
// Create a Map operation on ds
@ -783,7 +783,7 @@ TEST_F(MindDataTestPipeline, TestMixUpBatchSuccess1) {
EXPECT_NE(ds, nullptr);
// Create objects for the tensor ops
std::shared_ptr<TensorOperation> one_hot_op = transforms::OneHot(10);
std::shared_ptr<TensorTransform> one_hot_op = std::make_shared<transforms::OneHot>(10);
EXPECT_NE(one_hot_op, nullptr);
// Create a Map operation on ds
@ -834,7 +834,7 @@ TEST_F(MindDataTestPipeline, TestMixUpBatchSuccess2) {
EXPECT_NE(ds, nullptr);
// Create objects for the tensor ops
std::shared_ptr<TensorOperation> one_hot_op = transforms::OneHot(10);
std::shared_ptr<TensorTransform> one_hot_op = std::make_shared<transforms::OneHot>(10);
EXPECT_NE(one_hot_op, nullptr);
// Create a Map operation on ds
@ -2710,51 +2710,51 @@ TEST_F(MindDataTestPipeline, TestResize1) {
iter->Stop();
}
TEST_F(MindDataTestPipeline, TestRescaleSucess1) {
MS_LOG(INFO) << "Doing MindDataTestPipeline-TestRescaleSucess1.";
// Create an ImageFolder Dataset
std::string folder_path = datasets_root_path_ + "/testPK/data/";
std::shared_ptr<Dataset> ds = ImageFolder(folder_path, true, SequentialSampler(0, 1));
EXPECT_NE(ds, nullptr);
// Create an iterator over the result of the above dataset
// This will trigger the creation of the Execution Tree and launch it.
std::shared_ptr<Iterator> iter = ds->CreateIterator();
EXPECT_NE(iter, nullptr);
// Iterate the dataset and get each row
std::unordered_map<std::string, mindspore::MSTensor> row;
iter->GetNextRow(&row);
auto image = row["image"];
// Create objects for the tensor ops
std::shared_ptr<TensorOperation> rescale = mindspore::dataset::vision::Rescale(1.0, 0.0);
EXPECT_NE(rescale, nullptr);
// Convert to the same type
std::shared_ptr<TensorOperation> type_cast = transforms::TypeCast("uint8");
EXPECT_NE(type_cast, nullptr);
ds = ds->Map({rescale, type_cast}, {"image"});
EXPECT_NE(ds, nullptr);
// Create an iterator over the result of the above dataset
// This will trigger the creation of the Execution Tree and launch it.
std::shared_ptr<Iterator> iter1 = ds->CreateIterator();
EXPECT_NE(iter1, nullptr);
// Iterate the dataset and get each row1
std::unordered_map<std::string, mindspore::MSTensor> row1;
iter1->GetNextRow(&row1);
auto image1 = row1["image"];
// EXPECT_EQ(*image, *image1);
// Manually terminate the pipeline
iter1->Stop();
}
// TEST_F(MindDataTestPipeline, TestRescaleSucess1) {
// MS_LOG(INFO) << "Doing MindDataTestPipeline-TestRescaleSucess1.";
// // Create an ImageFolder Dataset
// std::string folder_path = datasets_root_path_ + "/testPK/data/";
// std::shared_ptr<Dataset> ds = ImageFolder(folder_path, true, SequentialSampler(0, 1));
// EXPECT_NE(ds, nullptr);
//
// // Create an iterator over the result of the above dataset
// // This will trigger the creation of the Execution Tree and launch it.
// std::shared_ptr<Iterator> iter = ds->CreateIterator();
// EXPECT_NE(iter, nullptr);
//
// // Iterate the dataset and get each row
// std::unordered_map<std::string, mindspore::MSTensor> row;
// iter->GetNextRow(&row);
//
// auto image = row["image"];
//
// // Create objects for the tensor ops
// std::shared_ptr<TensorOperation> rescale = mindspore::dataset::vision::Rescale(1.0, 0.0);
// EXPECT_NE(rescale, nullptr);
//
// // Convert to the same type
// std::shared_ptr<TensorTransform> type_cast = std::make_shared<transforms::TypeCast>("uint8");
// EXPECT_NE(type_cast, nullptr);
//
// ds = ds->Map({rescale, type_cast}, {"image"});
// EXPECT_NE(ds, nullptr);
//
// // Create an iterator over the result of the above dataset
// // This will trigger the creation of the Execution Tree and launch it.
// std::shared_ptr<Iterator> iter1 = ds->CreateIterator();
// EXPECT_NE(iter1, nullptr);
//
// // Iterate the dataset and get each row1
// std::unordered_map<std::string, mindspore::MSTensor> row1;
// iter1->GetNextRow(&row1);
//
// auto image1 = row1["image"];
//
// // EXPECT_EQ(*image, *image1);
//
// // Manually terminate the pipeline
// iter1->Stop();
//}
TEST_F(MindDataTestPipeline, TestRescaleSucess2) {
MS_LOG(INFO) << "Doing MindDataTestPipeline-TestRescaleSucess2 with different params.";

View File

@ -332,7 +332,7 @@ TEST_F(MindDataTestCallback, TestCAPICallback) {
ASSERT_OK(schema->add_column("label", mindspore::TypeId::kNumberTypeUInt32, {}));
std::shared_ptr<Dataset> ds = RandomData(44, schema);
ASSERT_NE(ds, nullptr);
ds = ds->Map({transforms::TypeCast("uint64")}, {"label"}, {}, {}, nullptr, {cb1});
ds = ds->Map({std::make_shared<transforms::TypeCast>("uint64")}, {"label"}, {}, {}, nullptr, {cb1});
ASSERT_NE(ds, nullptr);
ds = ds->Repeat(2);
ASSERT_NE(ds, nullptr);

View File

@ -119,7 +119,7 @@ TEST_F(MindDataTestTreeAdapter, TestProjectMapTreeAdapter) {
EXPECT_NE(ds, nullptr);
// Create objects for the tensor ops
std::shared_ptr<TensorOperation> one_hot = transforms::OneHot(10);
std::shared_ptr<TensorTransform> one_hot = std::make_shared<transforms::OneHot>(10);
EXPECT_NE(one_hot, nullptr);
// Create a Map operation, this will automatically add a project after map

View File

@ -34,37 +34,37 @@ using mindspore::MsLogLevel::INFO;
class MindDataTestOptimizationPass : public UT::DatasetOpTesting {};
TEST_F(MindDataTestOptimizationPass, MindDataTestAutoWorkerPass) {
MS_LOG(INFO) << "Doing MindDataTestOptimizationPass-MindDataTestAutoWorkerPass.";
std::shared_ptr<SchemaObj> schema = std::make_shared<SchemaObj>();
ASSERT_TRUE(schema->add_column("label", "uint32", {}));
std::shared_ptr<Dataset> map_leaf = ImageFolder("dir")->SetNumWorkers(0);
std::shared_ptr<Dataset> nonmap_leaf = RandomData(44, schema)->SetNumWorkers(0);
std::shared_ptr<Dataset> batch = Zip({map_leaf, nonmap_leaf})->Batch(1)->SetNumWorkers(0);
std::shared_ptr<Dataset> map = batch->Map({})->SetNumWorkers(0);
// {ImageFolder, RandomData} -> zip -> batch
EXPECT_EQ(map_leaf->IRNode()->num_workers(), 0);
EXPECT_EQ(nonmap_leaf->IRNode()->num_workers(), 0);
EXPECT_EQ(batch->IRNode()->num_workers(), 0);
EXPECT_EQ(map->IRNode()->num_workers(), 0);
std::unique_ptr<IRPass> pass = std::make_unique<AutoWorkerPass>();
bool m = false;
ASSERT_OK(pass->Run(map->IRNode(), &m));
// checking that after this pass, num_workers are set correctly (aka a positive number)
// It is hard to test a exact value because num_threads are different for different machine
// however, this will for sure succeed bc regardless of the total threads on cpu, this would always be >= 1
EXPECT_NE(map_leaf->IRNode()->num_workers(), 0);
EXPECT_NE(nonmap_leaf->IRNode()->num_workers(), 0);
EXPECT_NE(batch->IRNode()->num_workers(), 0);
EXPECT_NE(map->IRNode()->num_workers(), 0);
MS_LOG(DEBUG) << map_leaf->IRNode()->Name() << ": num_worker=" << map_leaf->IRNode()->num_workers();
MS_LOG(DEBUG) << nonmap_leaf->IRNode()->Name() << ": num_worker=" << nonmap_leaf->IRNode()->num_workers();
MS_LOG(DEBUG) << batch->IRNode()->Name() << ": num_worker=" << batch->IRNode()->num_workers();
MS_LOG(DEBUG) << map->IRNode()->Name() << ": num_worker=" << map->IRNode()->num_workers();
}
// TEST_F(MindDataTestOptimizationPass, MindDataTestAutoWorkerPass) {
// MS_LOG(INFO) << "Doing MindDataTestOptimizationPass-MindDataTestAutoWorkerPass.";
//
// std::shared_ptr<SchemaObj> schema = std::make_shared<SchemaObj>();
// ASSERT_TRUE(schema->add_column("label", "uint32", {}));
// std::shared_ptr<Dataset> map_leaf = ImageFolder("dir")->SetNumWorkers(0);
// std::shared_ptr<Dataset> nonmap_leaf = RandomData(44, schema)->SetNumWorkers(0);
// std::shared_ptr<Dataset> batch = Zip({map_leaf, nonmap_leaf})->Batch(1)->SetNumWorkers(0);
// std::shared_ptr<Dataset> map = batch->Map({})->SetNumWorkers(0);
// // {ImageFolder, RandomData} -> zip -> batch
// EXPECT_EQ(map_leaf->IRNode()->num_workers(), 0);
// EXPECT_EQ(nonmap_leaf->IRNode()->num_workers(), 0);
// EXPECT_EQ(batch->IRNode()->num_workers(), 0);
// EXPECT_EQ(map->IRNode()->num_workers(), 0);
//
// std::unique_ptr<IRPass> pass = std::make_unique<AutoWorkerPass>();
// bool m = false;
// ASSERT_OK(pass->Run(map->IRNode(), &m));
//
// // checking that after this pass, num_workers are set correctly (aka a positive number)
// // It is hard to test a exact value because num_threads are different for different machine
// // however, this will for sure succeed bc regardless of the total threads on cpu, this would always be >= 1
// EXPECT_NE(map_leaf->IRNode()->num_workers(), 0);
// EXPECT_NE(nonmap_leaf->IRNode()->num_workers(), 0);
// EXPECT_NE(batch->IRNode()->num_workers(), 0);
// EXPECT_NE(map->IRNode()->num_workers(), 0);
// MS_LOG(DEBUG) << map_leaf->IRNode()->Name() << ": num_worker=" << map_leaf->IRNode()->num_workers();
// MS_LOG(DEBUG) << nonmap_leaf->IRNode()->Name() << ": num_worker=" << nonmap_leaf->IRNode()->num_workers();
// MS_LOG(DEBUG) << batch->IRNode()->Name() << ": num_worker=" << batch->IRNode()->num_workers();
// MS_LOG(DEBUG) << map->IRNode()->Name() << ": num_worker=" << map->IRNode()->num_workers();
//}
TEST_F(MindDataTestOptimizationPass, MindDataTestTensorFusionPass) {
MS_LOG(INFO) << "Doing MindDataTestOptimizationPass-MindDataTestTensorFusionPass.";