fix ci alarms

add 2021 for copyright year for a file

fix ci
This commit is contained in:
Peilin Wang 2021-04-26 17:11:37 -04:00
parent 231e30890d
commit 8bc5098d99
5 changed files with 31 additions and 29 deletions

View File

@ -29,8 +29,9 @@
namespace py = pybind11;
namespace mindspore {
namespace dataset {
BucketBatchByLengthOp::Builder::Builder(std::vector<std::string> length_dependent_columns,
std::vector<int32_t> bucket_boundaries, std::vector<int32_t> bucket_batch_sizes)
BucketBatchByLengthOp::Builder::Builder(const std::vector<std::string> &length_dependent_columns,
const std::vector<int32_t> &bucket_boundaries,
const std::vector<int32_t> &bucket_batch_sizes)
: builder_length_dependent_columns_(length_dependent_columns),
builder_bucket_boundaries_(bucket_boundaries),
builder_bucket_batch_sizes_(bucket_batch_sizes),
@ -41,7 +42,7 @@ BucketBatchByLengthOp::Builder::Builder(std::vector<std::string> length_dependen
builder_op_connector_size_ = config_manager->op_connector_size();
}
Status BucketBatchByLengthOp::Builder::SanityCheck() {
Status BucketBatchByLengthOp::Builder::SanityCheck() const {
std::string error_message;
if (builder_length_dependent_columns_.empty()) {
@ -66,7 +67,7 @@ Status BucketBatchByLengthOp::Builder::Build(std::shared_ptr<BucketBatchByLength
RETURN_IF_NOT_OK(SanityCheck());
// insert 0 for the first bucket
builder_bucket_boundaries_.insert(builder_bucket_boundaries_.begin(), 0);
(void)builder_bucket_boundaries_.insert(builder_bucket_boundaries_.begin(), 0);
*new_bucket_batch_by_length_op = std::make_shared<BucketBatchByLengthOp>(
builder_length_dependent_columns_, builder_bucket_boundaries_, builder_bucket_batch_sizes_,
@ -76,10 +77,10 @@ Status BucketBatchByLengthOp::Builder::Build(std::shared_ptr<BucketBatchByLength
return Status::OK();
}
BucketBatchByLengthOp::BucketBatchByLengthOp(std::vector<std::string> length_dependent_columns,
std::vector<int32_t> bucket_boundaries,
std::vector<int32_t> bucket_batch_sizes,
std::shared_ptr<TensorOp> element_length_function, PadInfo pad_info,
BucketBatchByLengthOp::BucketBatchByLengthOp(const std::vector<std::string> &length_dependent_columns,
const std::vector<int32_t> &bucket_boundaries,
const std::vector<int32_t> &bucket_batch_sizes,
std::shared_ptr<TensorOp> element_length_function, const PadInfo &pad_info,
bool pad_to_bucket_boundary, bool drop_remainder,
int32_t op_connector_size)
: PipelineOp(op_connector_size),

View File

@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright 2020-2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -37,8 +37,8 @@ class BucketBatchByLengthOp : public PipelineOp {
public:
class Builder {
public:
Builder(std::vector<std::string> length_dependent_columns, std::vector<int32_t> bucket_boundaries,
std::vector<int32_t> bucket_batch_sizes);
Builder(const std::vector<std::string> &length_dependent_columns, const std::vector<int32_t> &bucket_boundaries,
const std::vector<int32_t> &bucket_batch_sizes);
~Builder() = default;
@ -85,7 +85,7 @@ class BucketBatchByLengthOp : public PipelineOp {
Status Build(std::shared_ptr<BucketBatchByLengthOp> *new_bucket_batch_by_length_op);
private:
Status SanityCheck();
Status SanityCheck() const;
std::vector<std::string> builder_length_dependent_columns_;
std::vector<int32_t> builder_bucket_boundaries_;
@ -97,9 +97,10 @@ class BucketBatchByLengthOp : public PipelineOp {
int32_t builder_op_connector_size_;
};
BucketBatchByLengthOp(std::vector<std::string> length_dependent_columns, std::vector<int32_t> bucket_boundaries,
std::vector<int32_t> bucket_batch_sizes, std::shared_ptr<TensorOp> element_length_function,
PadInfo pad_info, bool pad_to_bucket_boundary, bool drop_remainder, int32_t op_connector_size);
BucketBatchByLengthOp(const std::vector<std::string> &length_dependent_columns,
const std::vector<int32_t> &bucket_boundaries, const std::vector<int32_t> &bucket_batch_sizes,
std::shared_ptr<TensorOp> element_length_function, const PadInfo &pad_info,
bool pad_to_bucket_boundary, bool drop_remainder, int32_t op_connector_size);
// Destructor
~BucketBatchByLengthOp() = default;

View File

@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright 2020-2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -91,7 +91,7 @@ Status ToNumberOp::OutputShape(const std::vector<TensorShape> &input_shapes, std
}
template <typename T>
Status ToNumberOp::ToSignedIntegral(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor> *output) {
Status ToNumberOp::ToSignedIntegral(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor> *output) const {
std::vector<T> casted;
for (auto it = input->begin<std::string_view>(); it != input->end<std::string_view>(); ++it) {
@ -128,7 +128,7 @@ Status ToNumberOp::ToSignedIntegral(const std::shared_ptr<Tensor> &input, std::s
}
template <typename T>
Status ToNumberOp::ToUnsignedIntegral(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor> *output) {
Status ToNumberOp::ToUnsignedIntegral(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor> *output) const {
std::vector<T> casted;
for (auto it = input->begin<std::string_view>(); it != input->end<std::string_view>(); ++it) {
@ -174,7 +174,7 @@ Status ToNumberOp::ToUnsignedIntegral(const std::shared_ptr<Tensor> &input, std:
return Status::OK();
}
Status ToNumberOp::ToFloat16(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor> *output) {
Status ToNumberOp::ToFloat16(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor> *output) const {
// special case, float16 does not exist in c++, no native support for
// casting, so cast to float first then use this method, which use Eigen.
std::shared_ptr<Tensor> temp;
@ -184,7 +184,7 @@ Status ToNumberOp::ToFloat16(const std::shared_ptr<Tensor> &input, std::shared_p
return Status::OK();
}
Status ToNumberOp::ToFloat(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor> *output) {
Status ToNumberOp::ToFloat(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor> *output) const {
std::vector<float> casted;
for (auto it = input->begin<std::string_view>(); it != input->end<std::string_view>(); ++it) {
@ -222,7 +222,7 @@ Status ToNumberOp::ToFloat(const std::shared_ptr<Tensor> &input, std::shared_ptr
return Status::OK();
}
Status ToNumberOp::ToDouble(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor> *output) {
Status ToNumberOp::ToDouble(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor> *output) const {
std::vector<double> casted;
for (auto it = input->begin<std::string_view>(); it != input->end<std::string_view>(); ++it) {

View File

@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright 2020-2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -61,16 +61,16 @@ class ToNumberOp : public TensorOp {
private:
template <typename T>
Status ToSignedIntegral(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor> *output);
Status ToSignedIntegral(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor> *output) const;
template <typename T>
Status ToUnsignedIntegral(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor> *output);
Status ToUnsignedIntegral(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor> *output) const;
Status ToFloat16(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor> *output);
Status ToFloat16(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor> *output) const;
Status ToFloat(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor> *output);
Status ToFloat(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor> *output) const;
Status ToDouble(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor> *output);
Status ToDouble(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor> *output) const;
DataType cast_to_type_;
};

View File

@ -513,7 +513,7 @@ AbstractBasePtr InferImplExpandDims(const AnalysisEnginePtr &, const PrimitivePt
std::vector<int64_t> shape;
std::vector<int64_t> x_shape = x->shape()->shape();
shape.insert(shape.end(), x_shape.begin(), x_shape.end());
(void)shape.insert(shape.end(), x_shape.begin(), x_shape.end());
auto axis = primitive->GetAttr("axis");
auto value = GetValue<int64_t>(axis);
if (value < -(SizeToInt(x_shape.size()) + 1) || value > SizeToInt(x_shape.size())) {
@ -523,7 +523,7 @@ AbstractBasePtr InferImplExpandDims(const AnalysisEnginePtr &, const PrimitivePt
if (value < 0) {
value = value + SizeToInt(x_shape.size()) + 1;
}
shape.insert(shape.begin() + value, 1);
(void)shape.insert(shape.begin() + value, 1);
auto ret = std::make_shared<AbstractTensor>(x->element(), std::make_shared<Shape>(shape));
return ret;