!20227 MD CI pclint warning fixes

Merge pull request !20227 from cathwong/ckw_ci_q3_lint3_pclint
This commit is contained in:
i-robot 2021-07-14 16:26:25 +00:00 committed by Gitee
commit 7d61be91cc
10 changed files with 45 additions and 43 deletions

View File

@ -250,7 +250,7 @@ PYBIND_REGISTER(TFRecordNode, 2, ([](const py::module *m) {
THROW_IF_ERROR(tfrecord->ValidateParams());
return tfrecord;
}))
.def(py::init([](const py::list dataset_files, std::string schema, py::list columns_list,
.def(py::init([](const py::list dataset_files, std::string schema, const py::list columns_list,
int64_t num_samples, int32_t shuffle, int32_t num_shards, int32_t shard_id,
bool shard_equal_rows) {
std::shared_ptr<TFRecordNode> tfrecord = std::make_shared<TFRecordNode>(

View File

@ -76,15 +76,17 @@ PYBIND_REGISTER(
}));
}));
PYBIND_REGISTER(ConcatenateOperation, 1, ([](const py::module *m) {
(void)py::class_<transforms::ConcatenateOperation, TensorOperation,
std::shared_ptr<transforms::ConcatenateOperation>>(*m, "ConcatenateOperation")
.def(py::init([](int8_t axis, std::shared_ptr<Tensor> prepend, std::shared_ptr<Tensor> append) {
auto concatenate = std::make_shared<transforms::ConcatenateOperation>(axis, prepend, append);
THROW_IF_ERROR(concatenate->ValidateParams());
return concatenate;
}));
}));
PYBIND_REGISTER(
ConcatenateOperation, 1, ([](const py::module *m) {
(void)
py::class_<transforms::ConcatenateOperation, TensorOperation, std::shared_ptr<transforms::ConcatenateOperation>>(
*m, "ConcatenateOperation")
.def(py::init([](int8_t axis, const std::shared_ptr<Tensor> &prepend, const std::shared_ptr<Tensor> &append) {
auto concatenate = std::make_shared<transforms::ConcatenateOperation>(axis, prepend, append);
THROW_IF_ERROR(concatenate->ValidateParams());
return concatenate;
}));
}));
PYBIND_REGISTER(
DuplicateOperation, 1, ([](const py::module *m) {
@ -101,7 +103,7 @@ PYBIND_REGISTER(FillOperation, 1, ([](const py::module *m) {
(void)
py::class_<transforms::FillOperation, TensorOperation, std::shared_ptr<transforms::FillOperation>>(
*m, "FillOperation")
.def(py::init([](std::shared_ptr<Tensor> fill_value) {
.def(py::init([](const std::shared_ptr<Tensor> &fill_value) {
auto fill = std::make_shared<transforms::FillOperation>(fill_value);
THROW_IF_ERROR(fill->ValidateParams());
return fill;
@ -112,7 +114,7 @@ PYBIND_REGISTER(MaskOperation, 1, ([](const py::module *m) {
(void)
py::class_<transforms::MaskOperation, TensorOperation, std::shared_ptr<transforms::MaskOperation>>(
*m, "MaskOperation")
.def(py::init([](RelationalOp op, std::shared_ptr<Tensor> constant, DataType dtype) {
.def(py::init([](RelationalOp op, const std::shared_ptr<Tensor> &constant, DataType dtype) {
auto mask = std::make_shared<transforms::MaskOperation>(op, constant, dtype);
THROW_IF_ERROR(mask->ValidateParams());
return mask;
@ -134,7 +136,7 @@ PYBIND_REGISTER(
PadEndOperation, 1, ([](const py::module *m) {
(void)py::class_<transforms::PadEndOperation, TensorOperation, std::shared_ptr<transforms::PadEndOperation>>(
*m, "PadEndOperation")
.def(py::init([](TensorShape pad_shape, std::shared_ptr<Tensor> pad_value) {
.def(py::init([](TensorShape pad_shape, const std::shared_ptr<Tensor> &pad_value) {
auto pad_end = std::make_shared<transforms::PadEndOperation>(pad_shape, pad_value);
THROW_IF_ERROR(pad_end->ValidateParams());
return pad_end;
@ -213,7 +215,7 @@ PYBIND_REGISTER(
TypeCastOperation, 1, ([](const py::module *m) {
(void)py::class_<transforms::TypeCastOperation, TensorOperation, std::shared_ptr<transforms::TypeCastOperation>>(
*m, "TypeCastOperation")
.def(py::init([](std::string data_type) {
.def(py::init([](const std::string &data_type) {
auto type_cast = std::make_shared<transforms::TypeCastOperation>(data_type);
THROW_IF_ERROR(type_cast->ValidateParams());
return type_cast;

View File

@ -201,7 +201,7 @@ PYBIND_REGISTER(
PYBIND_REGISTER(ToNumberOperation, 1, ([](const py::module *m) {
(void)py::class_<text::ToNumberOperation, TensorOperation, std::shared_ptr<text::ToNumberOperation>>(
*m, "ToNumberOperation")
.def(py::init([](std::string data_type) {
.def(py::init([](const std::string &data_type) {
auto to_number = std::make_shared<text::ToNumberOperation>(data_type);
THROW_IF_ERROR(to_number->ValidateParams());
return to_number;

View File

@ -152,7 +152,7 @@ Status JiebaTokenizer::AddWordChar(const std::vector<char> &word, int64_t freq)
MS_LOG(ERROR) << err_msg;
RETURN_STATUS_SYNTAX_ERROR(err_msg);
}
data_->words_list_.emplace_back(CharToString(word), freq);
(void)data_->words_list_.emplace_back(CharToString(word), freq);
return Status::OK();
}
@ -192,12 +192,11 @@ Status JiebaTokenizer::ParserFile(const std::string &file_path,
}
std::regex regex("^\\s*([^\\s*]+?)\\s*([0-9]+)?\\s*$");
std::smatch tokens;
std::regex_match(line, tokens, regex);
if (std::regex_match(line, tokens, regex)) {
if (tokens.size() == size_two) {
user_dict->emplace_back(tokens.str(value_one), 0);
(void)user_dict->emplace_back(tokens.str(value_one), 0);
} else if (tokens.size() == size_three) {
user_dict->emplace_back(tokens.str(value_one), strtoll(tokens.str(value_two).c_str(), NULL, 0));
(void)user_dict->emplace_back(tokens.str(value_one), strtoll(tokens.str(value_two).c_str(), NULL, 0));
} else {
continue;
}

View File

@ -62,13 +62,14 @@ std::shared_ptr<TensorOperation> Compose::Parse() { return std::make_shared<Comp
#ifndef ENABLE_ANDROID
// Constructor to Concatenate
struct Concatenate::Data {
explicit Data(int8_t axis, MSTensor prepend, MSTensor append) : axis_(axis), prepend_(prepend), append_(append) {}
explicit Data(int8_t axis, const MSTensor &prepend, const MSTensor &append)
: axis_(axis), prepend_(prepend), append_(append) {}
int8_t axis_;
MSTensor prepend_;
MSTensor append_;
};
Concatenate::Concatenate(int8_t axis, MSTensor prepend, MSTensor append)
Concatenate::Concatenate(int8_t axis, const MSTensor &prepend, const MSTensor &append)
: data_(std::make_shared<Data>(axis, prepend, append)) {}
std::shared_ptr<TensorOperation> Concatenate::Parse() {
@ -95,11 +96,11 @@ std::shared_ptr<TensorOperation> Duplicate::Parse() { return std::make_shared<Du
#ifndef ENABLE_ANDROID
// Constructor to Fill
struct Fill::Data {
explicit Data(MSTensor fill_value) : fill_value_(fill_value) {}
explicit Data(const MSTensor &fill_value) : fill_value_(fill_value) {}
MSTensor fill_value_;
};
Fill::Fill(MSTensor fill_value) : data_(std::make_shared<Data>(fill_value)) {}
Fill::Fill(const MSTensor &fill_value) : data_(std::make_shared<Data>(fill_value)) {}
std::shared_ptr<TensorOperation> Fill::Parse() {
std::shared_ptr<Tensor> out_fill_value;
@ -113,14 +114,14 @@ std::shared_ptr<TensorOperation> Fill::Parse() {
// Constructor to Mask
struct Mask::Data {
explicit Data(RelationalOp op, MSTensor constant, mindspore::DataType ms_type)
explicit Data(RelationalOp op, const MSTensor &constant, mindspore::DataType ms_type)
: op_(op), constant_(constant), ms_type_(ms_type) {}
RelationalOp op_;
MSTensor constant_;
mindspore::DataType ms_type_;
};
Mask::Mask(RelationalOp op, MSTensor constant, mindspore::DataType ms_type)
Mask::Mask(RelationalOp op, const MSTensor &constant, mindspore::DataType ms_type)
: data_(std::make_shared<Data>(op, constant, ms_type)) {}
std::shared_ptr<TensorOperation> Mask::Parse() {
@ -149,13 +150,13 @@ std::shared_ptr<TensorOperation> OneHot::Parse() { return std::make_shared<OneHo
#ifndef ENABLE_ANDROID
// Constructor to PadEnd
struct PadEnd::Data {
explicit Data(const std::vector<dsize_t> &pad_shape, MSTensor pad_value)
explicit Data(const std::vector<dsize_t> &pad_shape, const MSTensor &pad_value)
: pad_shape_(pad_shape), pad_value_(pad_value) {}
std::vector<dsize_t> pad_shape_;
MSTensor pad_value_;
};
PadEnd::PadEnd(const std::vector<dsize_t> &pad_shape, MSTensor pad_value)
PadEnd::PadEnd(const std::vector<dsize_t> &pad_shape, const MSTensor &pad_value)
: data_(std::make_shared<Data>(pad_shape, pad_value)) {}
std::shared_ptr<TensorOperation> PadEnd::Parse() {

View File

@ -160,7 +160,7 @@ class Concatenate final : public TensorTransform {
/// \param[in] axis Concatenate the tensors along given axis, only support 0 or -1 so far (default=0).
/// \param[in] prepend MSTensor to be prepended to the concatenated tensors (default={}).
/// \param[in] append MSTensor to be appended to the concatenated tensors (default={}).
explicit Concatenate(int8_t axis = 0, MSTensor prepend = {}, MSTensor append = {});
explicit Concatenate(int8_t axis = 0, const MSTensor &prepend = {}, const MSTensor &append = {});
/// \brief Destructor
~Concatenate() = default;
@ -199,7 +199,7 @@ class Fill final : public TensorTransform {
/// \param[in] fill_value Scalar value to fill the tensor with.
/// It can only be MSTensor of the following types from mindspore::DataType:
/// String, Bool, Int8/16/32/64, UInt8/16/32/64, Float16/32/64.
explicit Fill(MSTensor fill_value);
explicit Fill(const MSTensor &fill_value);
/// \brief Destructor
~Fill() = default;
@ -224,7 +224,7 @@ class Mask final : public TensorTransform {
/// from mindspore::DataType: String, Int, Float, Bool.
/// \param[in] de_type Type of the generated mask. It can only be numeric or boolean datatype.
/// (default=mindspore::DataType::kNumberTypeBool)
explicit Mask(RelationalOp op, MSTensor constant,
explicit Mask(RelationalOp op, const MSTensor &constant,
mindspore::DataType ms_type = mindspore::DataType(mindspore::DataType::kNumberTypeBool));
/// \brief Destructor
@ -268,7 +268,7 @@ class PadEnd final : public TensorTransform {
/// Dimensions that set to `-1` will not be padded (i.e., original dim will be used).
/// Shorter dimensions will truncate the values.
/// \param[in] pad_value Value used to pad (default={}).
explicit PadEnd(const std::vector<dsize_t> &pad_shape, MSTensor pad_value = {});
explicit PadEnd(const std::vector<dsize_t> &pad_shape, const MSTensor &pad_value = {});
/// \brief Destructor
~PadEnd() = default;

View File

@ -116,7 +116,7 @@ std::shared_ptr<TensorOp> DuplicateOperation::Build() { return std::make_shared<
#ifndef ENABLE_ANDROID
// FillOperation
FillOperation::FillOperation(std::shared_ptr<Tensor> fill_value) : fill_value_(fill_value) {}
FillOperation::FillOperation(const std::shared_ptr<Tensor> &fill_value) : fill_value_(fill_value) {}
Status FillOperation::ValidateParams() {
if (fill_value_->shape() != TensorShape::CreateScalar()) {
@ -246,10 +246,10 @@ std::shared_ptr<TensorOp> SliceOperation::Build() { return std::make_shared<Slic
// TypeCastOperation
// DataType data_type - required for C++ API
TypeCastOperation::TypeCastOperation(DataType data_type) : data_type_(data_type) {}
TypeCastOperation::TypeCastOperation(const DataType &data_type) : data_type_(data_type) {}
// std::string data_type - required for Pybind
TypeCastOperation::TypeCastOperation(std::string data_type) {
TypeCastOperation::TypeCastOperation(const std::string &data_type) {
// Convert from string to DEType
DataType temp_data_type(data_type);
data_type_ = temp_data_type;

View File

@ -97,7 +97,7 @@ class DuplicateOperation : public TensorOperation {
class FillOperation : public TensorOperation {
public:
explicit FillOperation(std::shared_ptr<Tensor> fill_value);
explicit FillOperation(const std::shared_ptr<Tensor> &fill_value);
~FillOperation() = default;
@ -235,8 +235,8 @@ class SliceOperation : public TensorOperation {
class TypeCastOperation : public TensorOperation {
public:
explicit TypeCastOperation(DataType data_type); // Used for C++ API
explicit TypeCastOperation(std::string data_type); // Used for Pybind
explicit TypeCastOperation(const DataType &data_type); // Used for C++ API
explicit TypeCastOperation(const std::string &data_type); // Used for Pybind
~TypeCastOperation() = default;

View File

@ -169,7 +169,7 @@ Status JiebaTokenizerOperation::AddWord(const std::string &word, int64_t freq) {
// LookupOperation
// DataType data_type - required for C++ API
LookupOperation::LookupOperation(const std::shared_ptr<Vocab> &vocab, const std::optional<std::string> &unknown_token,
DataType data_type)
const DataType &data_type)
: vocab_(vocab), unknown_token_(unknown_token), default_id_(Vocab::kNoTokenExists), data_type_(data_type) {}
// std::string data_type - required for Pybind
@ -364,10 +364,10 @@ std::shared_ptr<TensorOp> SlidingWindowOperation::Build() {
// ToNumberOperation
// DataType data_type - required for C++ API
ToNumberOperation::ToNumberOperation(DataType data_type) : data_type_(data_type) {}
ToNumberOperation::ToNumberOperation(const DataType &data_type) : data_type_(data_type) {}
// std::string data_type - required for Pybind
ToNumberOperation::ToNumberOperation(std::string data_type) {
ToNumberOperation::ToNumberOperation(const std::string &data_type) {
// Convert from string to DEType
DataType temp_data_type(data_type);
data_type_ = temp_data_type;

View File

@ -142,7 +142,7 @@ class JiebaTokenizerOperation : public TensorOperation {
class LookupOperation : public TensorOperation {
public:
explicit LookupOperation(const std::shared_ptr<Vocab> &vocab, const std::optional<std::string> &unknown_token,
DataType data_type); // Used for C++ API
const DataType &data_type); // Used for C++ API
explicit LookupOperation(const std::shared_ptr<Vocab> &vocab, const std::optional<std::string> &unknown_token,
const std::string &data_type); // Used for Pybind
@ -275,8 +275,8 @@ class SlidingWindowOperation : public TensorOperation {
class ToNumberOperation : public TensorOperation {
public:
explicit ToNumberOperation(DataType data_type); // Used for C++ API
explicit ToNumberOperation(std::string data_type); // Used for Pybind
explicit ToNumberOperation(const DataType &data_type); // Used for C++ API
explicit ToNumberOperation(const std::string &data_type); // Used for Pybind
~ToNumberOperation() = default;