From fd58f1e18db32a36832853d23b40f9ae38644087 Mon Sep 17 00:00:00 2001 From: liubuyu Date: Thu, 15 Oct 2020 15:32:29 +0800 Subject: [PATCH] add some ops implement by c++ --- mindspore/core/base/core_ops.h | 1 + mindspore/core/c_ops/add.cc | 53 ++++++ mindspore/core/c_ops/add.h | 42 +++++ mindspore/core/c_ops/avg_pool.cc | 104 ++++++++++++ mindspore/core/c_ops/avg_pool.h | 50 ++++++ mindspore/core/c_ops/conv2d.cc | 91 +++++------ mindspore/core/c_ops/conv2d.h | 34 ++-- mindspore/core/c_ops/depthwise_conv2d.cc | 199 +++++++++++++++++++++++ mindspore/core/c_ops/depthwise_conv2d.h | 60 +++++++ mindspore/core/c_ops/op_utils.h | 46 ++++++ mindspore/core/c_ops/op_utils.h.cc | 57 +++++++ mindspore/core/c_ops/relu6.cc | 52 ++++++ mindspore/core/c_ops/relu6.h | 40 +++++ mindspore/core/c_ops/reshape.cc | 44 +++++ mindspore/core/c_ops/reshape.h | 42 +++++ mindspore/core/c_ops/softmax.cc | 68 ++++++++ mindspore/core/c_ops/softmax.h | 44 +++++ mindspore/core/c_ops/squeeze.cc | 79 +++++++++ mindspore/core/c_ops/squeeze.h | 46 ++++++ 19 files changed, 1087 insertions(+), 65 deletions(-) create mode 100644 mindspore/core/c_ops/add.cc create mode 100644 mindspore/core/c_ops/add.h create mode 100644 mindspore/core/c_ops/avg_pool.cc create mode 100644 mindspore/core/c_ops/avg_pool.h create mode 100644 mindspore/core/c_ops/depthwise_conv2d.cc create mode 100644 mindspore/core/c_ops/depthwise_conv2d.h create mode 100644 mindspore/core/c_ops/op_utils.h create mode 100644 mindspore/core/c_ops/op_utils.h.cc create mode 100644 mindspore/core/c_ops/relu6.cc create mode 100644 mindspore/core/c_ops/relu6.h create mode 100644 mindspore/core/c_ops/reshape.cc create mode 100644 mindspore/core/c_ops/reshape.h create mode 100644 mindspore/core/c_ops/softmax.cc create mode 100644 mindspore/core/c_ops/softmax.h create mode 100644 mindspore/core/c_ops/squeeze.cc create mode 100644 mindspore/core/c_ops/squeeze.h diff --git a/mindspore/core/base/core_ops.h b/mindspore/core/base/core_ops.h index 35799bcdd22..d02996c1b2b 100644 --- a/mindspore/core/base/core_ops.h +++ b/mindspore/core/base/core_ops.h @@ -124,6 +124,7 @@ inline const PrimitivePtr kPrimPoolingGrad = std::make_shared("Poolin inline const PrimitivePtr kPrimMaxPool = std::make_shared("MaxPool"); inline const PrimitivePtr kPrimMaxPoolGrad = std::make_shared("MaxPoolGrad"); inline const PrimitivePtr kPrimApplyCenteredRMSProp = std::make_shared("ApplyCenteredRMSProp"); +inline const PrimitivePtr kPrimAvgPool = std::make_shared("AvgPool"); inline const PrimitivePtr kPrimAvgPoolGrad = std::make_shared("AvgPoolGrad"); inline const PrimitivePtr kPrimAvgPoolGradVm = std::make_shared("AvgPoolGradVm"); inline const PrimitivePtr kPrimFusedBatchNorm = std::make_shared("FusedBatchNorm"); diff --git a/mindspore/core/c_ops/add.cc b/mindspore/core/c_ops/add.cc new file mode 100644 index 00000000000..7646f1e47a8 --- /dev/null +++ b/mindspore/core/c_ops/add.cc @@ -0,0 +1,53 @@ +/** + * Copyright 2019-2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "c_ops/add.h" +#include +#include +#include +#include +#include +#include "c_ops/op_utils.h" +#include "utils/check_convert_utils.h" +#include "abstract/primitive_infer_map.h" + +namespace mindspore { +abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(primitive); + auto add_prim = primitive->cast(); + MS_EXCEPTION_IF_NULL(add_prim); + auto op_name = add_prim->name(); + return BroadCastInferShape(op_name, input_args); +} + +TypePtr InferType(const PrimitivePtr &prim, const std::vector &input_args) { + if (std::any_of(input_args.begin(), input_args.end(), [](const AbstractBasePtr &a) { return a == nullptr; })) { + MS_LOG(EXCEPTION) << "nullptr"; + } + std::map types; + types.emplace("x", input_args[0]->BuildType()); + types.emplace("y", input_args[1]->BuildType()); + auto infer_type = CheckAndConvertUtils::CheckTensorTypeSame(types, common_valid_types, prim->name()); + return TypeIdToType(infer_type); +} + +AbstractBasePtr TensorAddInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + return std::make_shared(InferType(primitive, input_args), + InferShape(primitive, input_args)->shape()); +} +REGISTER_PRIMITIVE_EVAL_IMPL(TensorAdd, prim::kPrimTensorAdd, TensorAddInfer); +} // namespace mindspore diff --git a/mindspore/core/c_ops/add.h b/mindspore/core/c_ops/add.h new file mode 100644 index 00000000000..3921d33d721 --- /dev/null +++ b/mindspore/core/c_ops/add.h @@ -0,0 +1,42 @@ +/** + * Copyright 2019-2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_C_OPS_ADD_H_ +#define MINDSPORE_CORE_C_OPS_ADD_H_ +#include +#include +#include +#include +#include "c_ops/primitive_c.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +constexpr auto kNameTensorAdd = "TensorAdd"; +class TensorAdd : public PrimitiveC { + public: + TensorAdd() : PrimitiveC(kNameTensorAdd) { InitIOName({"x", "y"}, {"output"}); } + ~TensorAdd() = default; + MS_DECLARE_PARENT(TensorAdd, PrimitiveC); + void Init() {} +}; + +AbstractBasePtr TensorAddInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); +using PrimTensorAddPtr = std::shared_ptr; +} // namespace mindspore + +#endif // MINDSPORE_CORE_C_OPS_ADD_H_ diff --git a/mindspore/core/c_ops/avg_pool.cc b/mindspore/core/c_ops/avg_pool.cc new file mode 100644 index 00000000000..183b2619c4b --- /dev/null +++ b/mindspore/core/c_ops/avg_pool.cc @@ -0,0 +1,104 @@ +/** + * Copyright 2019-2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "c_ops/avg_pool.h" +#include +#include +#include +#include +#include +#include "c_ops/op_utils.h" +#include "utils/check_convert_utils.h" +#include "abstract/primitive_infer_map.h" + +namespace mindspore { +void AvgPool::set_padding(const std::string &pad) { this->AddAttr("padding", MakeValue(pad)); } + +void AvgPool::set_kernel_size(const std::vector &kernel_size) { this->AddAttr("ksize", MakeValue(kernel_size)); } + +void AvgPool::set_strides(const std::vector &strides) { this->AddAttr("strides", MakeValue(strides)); } + +std::vector AvgPool::get_strides() const { + auto value_ptr = GetAttr("strides"); + return GetValue>(value_ptr); +} + +std::vector AvgPool::get_kernel_size() const { + auto value_ptr = GetAttr("ksize"); + return GetValue>(value_ptr); +} + +std::string AvgPool::get_padding() const { + auto value_ptr = GetAttr("padding"); + return GetValue(value_ptr); +} + +void AvgPool::Init(const std::vector &kernel_size, const std::vector &stride, const std::string &padding) { + auto prim_name = this->name(); + this->AddAttr("data_format", MakeValue("NCHW")); + this->set_padding(CheckAndConvertUtils::CheckString("padding", padding, {"valid", "same"}, prim_name)); + this->set_kernel_size(CheckAndConvertUtils::CheckPositiveVector("ksize", kernel_size, prim_name, false, true)); + this->set_strides(CheckAndConvertUtils::CheckPositiveVector("strides", stride, this->name(), false, true)); +} + +abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(primitive); + auto pool_prim = primitive->cast(); + MS_EXCEPTION_IF_NULL(pool_prim); + auto op_name = pool_prim->name(); + auto in_shape = CheckAndConvertUtils::ConvertShapePtrToShape("x_shape", input_args[0]->GetShapeTrack(), op_name); + CheckAndConvertUtils::CheckInteger("x_rank", in_shape.size(), kEqual, 4, op_name); + auto kernel_size = pool_prim->get_kernel_size(); + auto pad_mode = pool_prim->get_padding(); + auto batch = in_shape[0]; + auto channel = in_shape[1]; + auto in_h = in_shape[2]; + auto in_w = in_shape[3]; + + auto strides = pool_prim->get_strides(); + auto kernel_h = kernel_size[2]; + auto kernel_w = kernel_size[3]; + auto stride_h = strides[2]; + auto stride_w = strides[3]; + int out_h = -1; + int out_w = -1; + if (pad_mode == "valid") { + out_h = ceil((in_h - (kernel_h - 1)) / stride_h); + out_w = ceil((in_w - (kernel_w - 1)) / stride_w); + } else if (pad_mode == "same") { + out_h = ceil(in_h / stride_h); + out_w = ceil(in_w / stride_w); + } + std::vector out_shape = {batch, channel, out_h, out_w}; + if (std::any_of(out_shape.begin(), out_shape.end(), [](int a) { return a <= 0; })) { + MS_LOG(EXCEPTION) << "Kernel size is not valid."; + } + return std::make_shared(out_shape); +} + +TypePtr InferType(const PrimitivePtr &prim, const std::vector &input_args) { + if (std::any_of(input_args.begin(), input_args.end(), [](AbstractBasePtr a) { return a == nullptr; })) { + MS_LOG(EXCEPTION) << "nullptr"; + } + return input_args[0]->BuildType(); +} + +AbstractBasePtr AvgPoolInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + return std::make_shared(InferType(primitive, input_args), + InferShape(primitive, input_args)->shape()); +} +REGISTER_PRIMITIVE_EVAL_IMPL(AvgPool, prim::kPrimAvgPool, AvgPoolInfer); +} // namespace mindspore diff --git a/mindspore/core/c_ops/avg_pool.h b/mindspore/core/c_ops/avg_pool.h new file mode 100644 index 00000000000..6ae01b239af --- /dev/null +++ b/mindspore/core/c_ops/avg_pool.h @@ -0,0 +1,50 @@ +/** + * Copyright 2019-2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_C_OPS_AVG_POOL_H_ +#define MINDSPORE_CORE_C_OPS_AVG_POOL_H_ + +#include +#include +#include +#include +#include "c_ops/primitive_c.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +constexpr auto kNameAvgPool = "AvgPool"; +class AvgPool : public PrimitiveC { + public: + AvgPool() : PrimitiveC(kNameAvgPool) { InitIOName({"x"}, {"output"}); } + ~AvgPool() = default; + MS_DECLARE_PARENT(AvgPool, PrimitiveC); + void Init(const std::vector &kernel_size = {1}, const std::vector &stride = {1}, + const std::string &padding = "valid"); + void set_padding(const std::string &pad); + void set_kernel_size(const std::vector &kernel_size); + void set_strides(const std::vector &strides); + std::vector get_kernel_size() const; + std::vector get_strides() const; + std::string get_padding() const; +}; + +AbstractBasePtr AvgPoolInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); +using PrimAvgPoolPtr = std::shared_ptr; +} // namespace mindspore + +#endif // MINDSPORE_CORE_C_OPS_AVG_POOL_H_ diff --git a/mindspore/core/c_ops/conv2d.cc b/mindspore/core/c_ops/conv2d.cc index 00bce06467c..a25241401ae 100644 --- a/mindspore/core/c_ops/conv2d.cc +++ b/mindspore/core/c_ops/conv2d.cc @@ -25,22 +25,12 @@ namespace mindspore { namespace { -constexpr auto kKernelSize = "kernel_size"; -constexpr auto kStride = "stride"; -constexpr auto kDilation = "dilation"; -constexpr auto kPadMode = "pad_mode"; -constexpr auto kPad = "pad"; -constexpr auto kMode = "mode"; -constexpr auto kGroup = "group"; -constexpr auto kOutputChannel = "output channel"; -constexpr auto kPadList = "pad_list"; -constexpr auto kConv2DName = "Conv2D"; abstract::ShapePtr Conv2dInferShape(const PrimitivePtr &primitive, const std::vector &input_args) { MS_EXCEPTION_IF_NULL(primitive); auto conv_prim = primitive->cast(); MS_EXCEPTION_IF_NULL(conv_prim); auto prim_name = conv_prim->name(); - CheckAndConvertUtils::CheckInRange("Conv2d Infer", input_args.size(), kIncludeBoth, {2, 3}, prim_name); + CheckAndConvertUtils::CheckInRange("conv2d_infer", input_args.size(), kIncludeBoth, {2, 3}, prim_name); auto x_shape = CheckAndConvertUtils::ConvertShapePtrToShape("x_shape", input_args[0]->GetShapeTrack(), prim_name); auto w_shape = CheckAndConvertUtils::ConvertShapePtrToShape("w_shape", input_args[1]->GetShapeTrack(), prim_name); @@ -48,17 +38,17 @@ abstract::ShapePtr Conv2dInferShape(const PrimitivePtr &primitive, const std::ve CheckAndConvertUtils::CheckInteger("x rank", x_shape.size(), kEqual, 4, prim_name); CheckAndConvertUtils::Check("x_shape[1] / group", x_shape[1] / conv_prim->get_group(), kEqual, "w_shape[1]", w_shape[1], conv_prim->name()); - auto out_channel = conv_prim->GetOutputChannel(); + auto out_channel = conv_prim->get_output_channel(); CheckAndConvertUtils::Check("out_channel", out_channel, kEqual, "w_shape[0]", w_shape[0], conv_prim->name()); std::vector temp_w; std::copy(w_shape.begin() + 2, w_shape.end(), std::back_inserter(temp_w)); - CheckAndConvertUtils::Check("kernel_size", conv_prim->GetKernelSize(), kEqual, "w_shape[2:4]", temp_w, + CheckAndConvertUtils::Check("kernel_size", conv_prim->get_kernel_size(), kEqual, "w_shape[2:4]", temp_w, conv_prim->name()); auto kernel_size_h = w_shape[2]; auto kernel_size_w = w_shape[3]; - auto stride = conv_prim->GetStride(); - auto dilation = conv_prim->GetDilation(); + auto stride = conv_prim->get_stride(); + auto dilation = conv_prim->get_dilation(); auto stride_h = stride[2]; auto stride_w = stride[3]; auto dilation_h = dilation[2]; @@ -66,7 +56,7 @@ abstract::ShapePtr Conv2dInferShape(const PrimitivePtr &primitive, const std::ve int h_out = -1; int w_out = -1; std::vector pad_list(4, 0); - auto pad_mode = conv_prim->GetPadMode(); + auto pad_mode = conv_prim->get_pad_mode(); if (pad_mode == "valid") { h_out = ceil((x_shape[2] - dilation_h * (kernel_size_h - 1)) / stride_h); w_out = ceil((x_shape[3] - dilation_w * (kernel_size_w - 1)) / stride_w); @@ -82,18 +72,18 @@ abstract::ShapePtr Conv2dInferShape(const PrimitivePtr &primitive, const std::ve pad_list.emplace_back(pad_left); pad_list.emplace_back(pad_needed_h - pad_left); } else if (pad_mode == "pad") { - std::copy(conv_prim->GetPad().begin(), conv_prim->GetPad().end(), std::back_inserter(pad_list)); - auto pad_top = conv_prim->GetPad()[0]; - auto pad_bottom = conv_prim->GetPad()[1]; - auto pad_right = conv_prim->GetPad()[2]; - auto pad_left = conv_prim->GetPad()[3]; + std::copy(conv_prim->get_pad().begin(), conv_prim->get_pad().end(), std::back_inserter(pad_list)); + auto pad_top = conv_prim->get_pad()[0]; + auto pad_bottom = conv_prim->get_pad()[1]; + auto pad_right = conv_prim->get_pad()[2]; + auto pad_left = conv_prim->get_pad()[3]; h_out = 1 + (x_shape[2] + pad_top + pad_bottom - kernel_size_h - (kernel_size_h - 1) * (dilation_h - 1)) / stride_h; w_out = 1 + (x_shape[3] + pad_left + pad_right - kernel_size_w - (kernel_size_w - 1) * (dilation_w - 1)) / stride_w; h_out = floor(h_out); w_out = floor(w_out); } - conv_prim->SetPadList(pad_list); + conv_prim->set_pad_list(pad_list); std::vector out_shape = {x_shape[0], out_channel, h_out, w_out}; return std::make_shared(out_shape); } @@ -122,44 +112,45 @@ void Conv2D::Init(int out_channel, const std::vector &kernel_size, int mode auto prim_name = this->name(); this->AddAttr("data_format", MakeValue("NCHW")); this->AddAttr("offset_a", MakeValue(0)); - this->SetKernelSize(CheckAndConvertUtils::CheckPositiveVector(kKernelSize, kernel_size, prim_name)); - this->SetStride(CheckAndConvertUtils::CheckPositiveVector(kStride, stride, this->name(), true, true)); - this->SetDilation(CheckAndConvertUtils::CheckPositiveVector(kDilation, dilation, this->name(), true, true)); - this->SetPadMode(CheckAndConvertUtils::CheckString(kPadMode, pad_mode, {"valid", "same", "pad"}, prim_name)); - CheckAndConvertUtils::CheckInteger("pad size", pad.size(), kEqual, 4, prim_name); + this->set_kernel_size(CheckAndConvertUtils::CheckPositiveVector(kKernelSize, kernel_size, prim_name)); + this->set_stride(CheckAndConvertUtils::CheckPositiveVector(kStride, stride, this->name(), true, true)); + this->set_dilation(CheckAndConvertUtils::CheckPositiveVector(kDilation, dilation, this->name(), true, true)); + this->set_pad_mode(CheckAndConvertUtils::CheckString(kPadMode, pad_mode, {"valid", "same", "pad"}, prim_name)); + CheckAndConvertUtils::CheckInteger("pad_size", pad.size(), kEqual, 4, prim_name); if (pad_mode == "pad") { for (auto item : pad) { - CheckAndConvertUtils::Check("pad item", item, kGreaterEqual, "zeros list", 0, prim_name); + CheckAndConvertUtils::Check("pad_item", item, kGreaterEqual, "zeros_list", 0, prim_name); } } else { - CheckAndConvertUtils::Check(kPad, pad, kEqual, "zeros list", {0, 0, 0, 0}, prim_name); + CheckAndConvertUtils::Check(kPad, pad, kEqual, "zeros_list", {0, 0, 0, 0}, prim_name); } - this->SetPad(CheckAndConvertUtils::CheckPositiveVector(kPad, pad, this->name(), true, true)); - this->SetMode(CheckAndConvertUtils::CheckInteger("mode", mode, kEqual, 1, prim_name)); - this->SetOutChannel(CheckAndConvertUtils::CheckInteger("out_channel", out_channel, kGreaterThan, 0, prim_name)); - this->SetGroup(CheckAndConvertUtils::CheckInteger("group", group, kGreaterThan, 0, prim_name)); + this->set_pad(CheckAndConvertUtils::CheckPositiveVector(kPad, pad, this->name(), true, true)); + this->set_mode(CheckAndConvertUtils::CheckInteger("mode", mode, kEqual, 1, prim_name)); + this->set_out_channel(CheckAndConvertUtils::CheckInteger("out_channel", out_channel, kGreaterThan, 0, prim_name)); + this->set_group(CheckAndConvertUtils::CheckInteger("group", group, kGreaterThan, 0, prim_name)); } -std::vector Conv2D::GetKernelSize() const { + +std::vector Conv2D::get_kernel_size() const { auto value_ptr = GetAttr(kKernelSize); return GetValue>(value_ptr); } -std::vector Conv2D::GetStride() const { +std::vector Conv2D::get_stride() const { auto value_ptr = GetAttr(kStride); return GetValue>(value_ptr); } -std::vector Conv2D::GetDilation() const { +std::vector Conv2D::get_dilation() const { auto value_ptr = GetAttr(kDilation); return GetValue>(value_ptr); } -std::string Conv2D::GetPadMode() const { +std::string Conv2D::get_pad_mode() const { auto value_ptr = this->GetAttr(kPadMode); return GetValue(value_ptr); } -std::vector Conv2D::GetPad() const { +std::vector Conv2D::get_pad() const { auto value_ptr = this->GetAttr(kPad); return GetValue>(value_ptr); } -int Conv2D::GetMode() const { +int Conv2D::get_mode() const { auto value_ptr = this->GetAttr(kMode); return GetValue(value_ptr); } @@ -168,20 +159,22 @@ int Conv2D::get_group() const { auto value_ptr = this->GetAttr(kGroup); return GetValue(value_ptr); } -int Conv2D::GetOutputChannel() const { +int Conv2D::get_output_channel() const { auto value_ptr = this->GetAttr(kOutputChannel); return GetValue(value_ptr); } -void Conv2D::SetKernelSize(const std::vector &kernel_size) { this->AddAttr(kKernelSize, MakeValue(kernel_size)); } -void Conv2D::SetStride(const std::vector &stride) { this->AddAttr(kStride, MakeValue(stride)); } -void Conv2D::SetDilation(const std::vector &dilation) { this->AddAttr(kDilation, MakeValue(dilation)); } -void Conv2D::SetPadMode(const std::string &pad_mode) { this->AddAttr(kPadMode, MakeValue(pad_mode)); } -void Conv2D::SetPad(const std::vector &pad) { this->AddAttr(kPad, MakeValue(pad)); } -void Conv2D::SetMode(int mode) { this->AddAttr(kMode, MakeValue(mode)); } -void Conv2D::SetGroup(int group) { this->AddAttr(kGroup, MakeValue(group)); } -void Conv2D::SetOutChannel(int output_channel) { this->AddAttr(kOutputChannel, MakeValue(output_channel)); } -void Conv2D::SetPadList(const std::vector &pad_list) { this->AddAttr(kPadList, MakeValue(pad_list)); } +void Conv2D::set_kernel_size(const std::vector &kernel_size) { + this->AddAttr(kKernelSize, MakeValue(kernel_size)); +} +void Conv2D::set_stride(const std::vector &stride) { this->AddAttr(kStride, MakeValue(stride)); } +void Conv2D::set_dilation(const std::vector &dilation) { this->AddAttr(kDilation, MakeValue(dilation)); } +void Conv2D::set_pad_mode(const std::string &pad_mode) { this->AddAttr(kPadMode, MakeValue(pad_mode)); } +void Conv2D::set_pad(const std::vector &pad) { this->AddAttr(kPad, MakeValue(pad)); } +void Conv2D::set_mode(int mode) { this->AddAttr(kMode, MakeValue(mode)); } +void Conv2D::set_group(int group) { this->AddAttr(kGroup, MakeValue(group)); } +void Conv2D::set_out_channel(int output_channel) { this->AddAttr(kOutputChannel, MakeValue(output_channel)); } +void Conv2D::set_pad_list(const std::vector &pad_list) { this->AddAttr(kPadList, MakeValue(pad_list)); } AbstractBasePtr Conv2dInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, const std::vector &input_args) { diff --git a/mindspore/core/c_ops/conv2d.h b/mindspore/core/c_ops/conv2d.h index b00ada6db41..670106a1abc 100644 --- a/mindspore/core/c_ops/conv2d.h +++ b/mindspore/core/c_ops/conv2d.h @@ -21,10 +21,12 @@ #include #include +#include "c_ops/op_utils.h" #include "c_ops/primitive_c.h" #include "abstract/abstract_value.h" #include "utils/check_convert_utils.h" namespace mindspore { +constexpr auto kConv2DName = "Conv2D"; class Conv2D : public PrimitiveC { public: Conv2D(); @@ -33,23 +35,23 @@ class Conv2D : public PrimitiveC { void Init(int out_channel, const std::vector &kernel_size, int mode = 1, const std::string &pad_mode = "valid", const std::vector &pad = {0, 0, 0, 0}, const std::vector &stride = {1, 1, 1, 1}, const std::vector &dilation = {1, 1, 1, 1}, int group = 1); - std::vector GetKernelSize() const; - std::vector GetStride() const; - std::vector GetDilation() const; - std::string GetPadMode() const; - std::vector GetPad() const; - int GetMode() const; + std::vector get_kernel_size() const; + std::vector get_stride() const; + std::vector get_dilation() const; + std::string get_pad_mode() const; + std::vector get_pad() const; + int get_mode() const; int get_group() const; - int GetOutputChannel() const; - void SetKernelSize(const std::vector &kernel_size); - void SetStride(const std::vector &stride); - void SetDilation(const std::vector &dilation); - void SetPadMode(const std::string &pad_mode); - void SetPad(const std::vector &pad); - void SetMode(int mode); - void SetGroup(int group); - void SetOutChannel(int output_channel); - void SetPadList(const std::vector &pad_list); + int get_output_channel() const; + void set_kernel_size(const std::vector &kernel_size); + void set_stride(const std::vector &stride); + void set_dilation(const std::vector &dilation); + void set_pad_mode(const std::string &pad_mode); + void set_pad(const std::vector &pad); + void set_mode(int mode); + void set_group(int group); + void set_out_channel(int output_channel); + void set_pad_list(const std::vector &pad_list); }; AbstractBasePtr Conv2dInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, const std::vector &input_args); diff --git a/mindspore/core/c_ops/depthwise_conv2d.cc b/mindspore/core/c_ops/depthwise_conv2d.cc new file mode 100644 index 00000000000..dd57a1cc0f3 --- /dev/null +++ b/mindspore/core/c_ops/depthwise_conv2d.cc @@ -0,0 +1,199 @@ +/** + * Copyright 2019-2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "c_ops/depthwise_conv2d.h" +#include +#include +#include +#include +#include "c_ops/op_utils.h" +#include "utils/check_convert_utils.h" +#include "abstract/primitive_infer_map.h" + +namespace mindspore { +void DepthWiseConv2D::Init(int channel_multiplier, const std::vector &kernel_size, int mode, + const std::string &pad_mode, const std::vector &pad, const std::vector &stride, + const std::vector &dilation, int group) { + auto prim_name = this->name(); + this->AddAttr("data_format", MakeValue("NCHW")); + this->AddAttr("offset_a", MakeValue(0)); + this->set_mode(CheckAndConvertUtils::CheckInteger("mode", mode, kEqual, 3, prim_name)); + + this->set_kernel_size(CheckAndConvertUtils::CheckPositiveVector(kKernelSize, kernel_size, prim_name)); + auto strides = CheckAndConvertUtils::CheckPositiveVector(kStride, stride, this->name(), false, false); + if (strides[0] != strides[1]) { + MS_EXCEPTION(ValueError) << "The height and width of stride should be equal, but got height " << strides[0] + << ", width " << strides[1]; + } + this->set_stride(strides); + auto dilations = CheckAndConvertUtils::CheckPositiveVector(kDilation, dilation, this->name(), false, false); + if (dilations[0] != dilations[1]) { + MS_EXCEPTION(ValueError) << "The height and width of dilation should be equal, but got height " << dilations[0] + << ", width " << dilations[1]; + } + this->set_dilation(dilations); + this->set_pad_mode(CheckAndConvertUtils::CheckString(kPadMode, pad_mode, {"valid", "same", "pad"}, prim_name)); + + CheckAndConvertUtils::CheckInteger("pad_size", pad.size(), kEqual, 4, prim_name); + if (pad_mode == "pad") { + for (auto item : pad) { + CheckAndConvertUtils::Check("pad_item", item, kGreaterEqual, "zeros_list", 0, prim_name); + } + } else { + CheckAndConvertUtils::Check(kPad, pad, kEqual, "zeros_list", {0, 0, 0, 0}, prim_name); + } + this->set_pad(CheckAndConvertUtils::CheckPositiveVector(kPad, pad, this->name(), true, true)); + + this->set_out_channel( + CheckAndConvertUtils::CheckInteger("channel_multiplier", channel_multiplier, kGreaterThan, 0, prim_name)); + this->set_group(CheckAndConvertUtils::CheckInteger("group", group, kGreaterThan, 0, prim_name)); +} + +std::vector DepthWiseConv2D::get_kernel_size() const { + auto value_ptr = GetAttr(kKernelSize); + return GetValue>(value_ptr); +} +std::vector DepthWiseConv2D::get_stride() const { + auto value_ptr = GetAttr(kStride); + return GetValue>(value_ptr); +} +std::vector DepthWiseConv2D::get_dilation() const { + auto value_ptr = GetAttr(kDilation); + return GetValue>(value_ptr); +} +std::string DepthWiseConv2D::get_pad_mode() const { + auto value_ptr = this->GetAttr(kPadMode); + return GetValue(value_ptr); +} +std::vector DepthWiseConv2D::get_pad() const { + auto value_ptr = this->GetAttr(kPad); + return GetValue>(value_ptr); +} +int DepthWiseConv2D::get_mode() const { + auto value_ptr = this->GetAttr(kMode); + return GetValue(value_ptr); +} + +int DepthWiseConv2D::get_group() const { + auto value_ptr = this->GetAttr(kGroup); + return GetValue(value_ptr); +} +int DepthWiseConv2D::get_output_channel() const { + auto value_ptr = this->GetAttr(kOutputChannel); + return GetValue(value_ptr); +} + +void DepthWiseConv2D::set_kernel_size(const std::vector &kernel_size) { + this->AddAttr(kKernelSize, MakeValue(kernel_size)); +} + +void DepthWiseConv2D::set_stride(const std::vector &stride) { this->AddAttr(kStride, MakeValue(stride)); } +void DepthWiseConv2D::set_dilation(const std::vector &dilation) { this->AddAttr(kDilation, MakeValue(dilation)); } +void DepthWiseConv2D::set_pad_mode(const std::string &pad_mode) { this->AddAttr(kPadMode, MakeValue(pad_mode)); } +void DepthWiseConv2D::set_pad(const std::vector &pad) { this->AddAttr(kPad, MakeValue(pad)); } +void DepthWiseConv2D::set_mode(int mode) { this->AddAttr(kMode, MakeValue(mode)); } +void DepthWiseConv2D::set_group(int group) { this->AddAttr(kGroup, MakeValue(group)); } +void DepthWiseConv2D::set_out_channel(int output_channel) { this->AddAttr(kOutputChannel, MakeValue(output_channel)); } +void DepthWiseConv2D::set_pads(const std::vector &pad_list) { this->AddAttr(kPads, MakeValue(pad_list)); } + +abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(primitive); + auto conv_prim = primitive->cast(); + MS_EXCEPTION_IF_NULL(conv_prim); + auto prim_name = conv_prim->name(); + CheckAndConvertUtils::CheckInRange("conv2d_Infer", input_args.size(), kIncludeBoth, {2, 3}, prim_name); + auto x_shape = CheckAndConvertUtils::ConvertShapePtrToShape("x_shape", input_args[0]->GetShapeTrack(), prim_name); + auto w_shape = CheckAndConvertUtils::ConvertShapePtrToShape("w_shape", input_args[1]->GetShapeTrack(), prim_name); + + CheckAndConvertUtils::CheckInteger("weight_rank", w_shape.size(), kEqual, 4, prim_name); + CheckAndConvertUtils::CheckInteger("x_rank", x_shape.size(), kEqual, 4, prim_name); + CheckAndConvertUtils::Check("x_shape[1]", x_shape[1], kEqual, "w_shape[1]", w_shape[1], conv_prim->name()); + auto out_channel = conv_prim->get_output_channel(); + + std::vector temp_w; + std::copy(w_shape.begin() + 2, w_shape.end(), std::back_inserter(temp_w)); + CheckAndConvertUtils::Check("kernel_size", conv_prim->get_kernel_size(), kEqual, "w_shape[2:4]", temp_w, + conv_prim->name()); + + auto kernel_size_n = w_shape[0]; + if (kernel_size_n != 1) { + MS_EXCEPTION(ValueError) << "The batch of input weeight should be 1, but got " << kernel_size_n; + } + auto kernel_size_h = w_shape[2]; + auto kernel_size_w = w_shape[3]; + auto stride = conv_prim->get_stride(); + auto dilation = conv_prim->get_dilation(); + auto stride_h = stride[2]; + auto stride_w = stride[3]; + auto dilation_h = dilation[2]; + auto dilation_w = dilation[3]; + int h_out = -1; + int w_out = -1; + std::vector pad_list(4, 0); + auto pad_mode = conv_prim->get_pad_mode(); + if (pad_mode == "valid") { + h_out = ceil((x_shape[2] - dilation_h * (kernel_size_h - 1)) / stride_h); + w_out = ceil((x_shape[3] - dilation_w * (kernel_size_w - 1)) / stride_w); + } else if (pad_mode == "same") { + h_out = ceil(x_shape[2] / stride_h); + w_out = ceil(x_shape[3] / stride_w); + + auto pad_needed_h = std::max(0, (h_out - 1) * stride_h + dilation_h * (kernel_size_h - 1) + 1 - x_shape[2]); + pad_list.emplace_back(floor(pad_needed_h / 2)); + pad_list.emplace_back(pad_needed_h / 2); + auto pad_needed_w = std::max(0, (w_out - 1) * stride_w + dilation_w * (kernel_size_w - 1) + 1 - x_shape[3]); + auto pad_left = floor(pad_needed_w / 2); + pad_list.emplace_back(pad_left); + pad_list.emplace_back(pad_needed_h - pad_left); + } else if (pad_mode == "pad") { + std::copy(conv_prim->get_pad().begin(), conv_prim->get_pad().end(), std::back_inserter(pad_list)); + auto pad_top = conv_prim->get_pad()[0]; + auto pad_bottom = conv_prim->get_pad()[1]; + auto pad_right = conv_prim->get_pad()[2]; + auto pad_left = conv_prim->get_pad()[3]; + + h_out = 1 + (x_shape[2] + pad_top + pad_bottom - kernel_size_h - (kernel_size_h - 1) * (dilation_h - 1)) / stride_h; + w_out = 1 + (x_shape[3] + pad_left + pad_right - kernel_size_w - (kernel_size_w - 1) * (dilation_w - 1)) / stride_w; + h_out = floor(h_out); + w_out = floor(w_out); + } + conv_prim->set_pads(pad_list); + std::vector out_shape = {x_shape[0], out_channel * x_shape[1], h_out, w_out}; + return std::make_shared(out_shape); +} + +TypePtr InferType(const PrimitivePtr &prim, const std::vector &input_args) { + CheckAndConvertUtils::CheckInRange("", input_args.size(), kIncludeBoth, {2, 3}, prim->name()); + for (const auto &item : input_args) { + MS_EXCEPTION_IF_NULL(item); + } + + std::map types; + types.emplace("x", input_args[0]->BuildType()); + types.emplace("w", input_args[1]->BuildType()); + auto infer_type = CheckAndConvertUtils::CheckTensorTypeSame(types, common_valid_types, prim->name()); + if (infer_type == kNumberTypeInt8) { + return std::make_shared(TypeIdToType(kNumberTypeInt32)); + } + return TypeIdToType(infer_type); +} + +AbstractBasePtr DepthWiseConv2DInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + return std::make_shared(InferType(primitive, input_args), + InferShape(primitive, input_args)->shape()); +} +} // namespace mindspore diff --git a/mindspore/core/c_ops/depthwise_conv2d.h b/mindspore/core/c_ops/depthwise_conv2d.h new file mode 100644 index 00000000000..f7773e8e88e --- /dev/null +++ b/mindspore/core/c_ops/depthwise_conv2d.h @@ -0,0 +1,60 @@ +/** + * Copyright 2019-2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_C_OPS_DEPTHWISE_CONV2D_H +#define MINDSPORE_CORE_C_OPS_DEPTHWISE_CONV2D_H +#include +#include +#include +#include +#include "c_ops/primitive_c.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +constexpr auto kNameDepthWiseConv2D = "DepthwiseConv2dNative"; +class DepthWiseConv2D : public PrimitiveC { + public: + DepthWiseConv2D() : PrimitiveC(kNameDepthWiseConv2D) { InitIOName({"x", "w"}, {"output"}); } + ~DepthWiseConv2D() = default; + MS_DECLARE_PARENT(DepthWiseConv2D, PrimitiveC); + void Init(int out_channel, const std::vector &kernel_size, int mode = 1, const std::string &pad_mode = "valid", + const std::vector &pad = {0, 0, 0, 0}, const std::vector &stride = {1, 1, 1, 1}, + const std::vector &dilation = {1, 1, 1, 1}, int group = 1); + std::vector get_kernel_size() const; + std::vector get_stride() const; + std::vector get_dilation() const; + std::string get_pad_mode() const; + std::vector get_pad() const; + int get_mode() const; + int get_group() const; + int get_output_channel() const; + void set_kernel_size(const std::vector &kernel_size); + void set_stride(const std::vector &stride); + void set_dilation(const std::vector &dilation); + void set_pad_mode(const std::string &pad_mode); + void set_pad(const std::vector &pad); + void set_mode(int mode); + void set_group(int group); + void set_out_channel(int output_channel); + void set_pads(const std::vector &pad_list); +}; +AbstractBasePtr DepthWiseConv2DInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); +using PrimDepthWiseConv2DPtr = std::shared_ptr; +} // namespace mindspore + +#endif // MINDSPORE_CORE_C_OPS_DEPTHWISE_CONV2D_H diff --git a/mindspore/core/c_ops/op_utils.h b/mindspore/core/c_ops/op_utils.h new file mode 100644 index 00000000000..8955f2a61ca --- /dev/null +++ b/mindspore/core/c_ops/op_utils.h @@ -0,0 +1,46 @@ +/** + * Copyright 2019-2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_C_OPS_CONV_UTILS_H +#define MINDSPORE_CORE_C_OPS_CONV_UTILS_H +#include +#include +#include +#include +#include +#include "abstract/primitive_infer_map.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +constexpr auto kKernelSize = "kernel_size"; +constexpr auto kStride = "stride"; +constexpr auto kDilation = "dilation"; +constexpr auto kPadMode = "pad_mode"; +constexpr auto kPad = "pad"; +constexpr auto kPads = "pads"; +constexpr auto kMode = "mode"; +constexpr auto kGroup = "group"; +constexpr auto kOutputChannel = "output_channel"; +constexpr auto kPadList = "pad_list"; +constexpr auto kAxis = "axis"; + +const std::set common_valid_types = { + kNumberTypeInt8, kNumberTypeInt16, kNumberTypeInt32, kNumberTypeInt64, kNumberTypeUInt8, kNumberTypeUInt16, + kNumberTypeUInt32, kNumberTypeUInt64, kNumberTypeFloat16, kNumberTypeFloat32, kNumberTypeFloat64}; + +abstract::ShapePtr BroadCastInferShape(const std::string &op_name, const std::vector &input_args); +} // namespace mindspore +#endif // MINDSPORE_CORE_C_OPS_CONV_UTILS_H diff --git a/mindspore/core/c_ops/op_utils.h.cc b/mindspore/core/c_ops/op_utils.h.cc new file mode 100644 index 00000000000..e8d40048c54 --- /dev/null +++ b/mindspore/core/c_ops/op_utils.h.cc @@ -0,0 +1,57 @@ +/** + * Copyright 2019-2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include +#include +#include "c_ops/op_utils.h" +#include "abstract/primitive_infer_map.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +abstract::ShapePtr BroadCastInferShape(const std::string &op_name, const std::vector &input_args) { + MS_LOG(INFO) << "Do infer shape for op " << op_name; + auto x_shape = CheckAndConvertUtils::ConvertShapePtrToShape("x_shape", input_args[0]->GetShapeTrack(), op_name); + auto y_shape = CheckAndConvertUtils::ConvertShapePtrToShape("y_shape", input_args[1]->GetShapeTrack(), op_name); + if (x_shape == y_shape) { + return std::make_shared(x_shape); + } + + auto x_length = x_shape.size(); + auto y_length = y_shape.size(); + auto length = x_length < y_length ? x_length : y_length; + std::vector broadcast_shape; + if (x_length == length) { + std::copy(y_shape.begin(), y_shape.end() - length, std::back_inserter(broadcast_shape)); + } else { + std::copy(x_shape.begin(), x_shape.end() - length, std::back_inserter(broadcast_shape)); + } + for (int i = -length; i < 0; i++) { + if (x_shape[x_length + i] == 1) { + broadcast_shape.push_back(y_shape[y_length + i]); + } else if (y_shape[y_length + i] == 1) { + broadcast_shape.push_back(x_shape[x_length + i]); + } else if (x_shape[x_length + i] == y_shape[y_length + i]) { + broadcast_shape.push_back(x_shape[x_length + i]); + } else { + MS_EXCEPTION(ValueError) << "For op " << op_name << ", the two input can not broadcast"; + } + } + return std::make_shared(broadcast_shape); +} +} // namespace mindspore diff --git a/mindspore/core/c_ops/relu6.cc b/mindspore/core/c_ops/relu6.cc new file mode 100644 index 00000000000..90eb49f3e22 --- /dev/null +++ b/mindspore/core/c_ops/relu6.cc @@ -0,0 +1,52 @@ +/** + * Copyright 2019-2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "c_ops/relu6.h" +#include +#include +#include +#include +#include +#include "utils/check_convert_utils.h" +#include "abstract/primitive_infer_map.h" + +namespace mindspore { +abstract::ShapePtr Relu6InferShape(const PrimitivePtr &primitive, const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(primitive); + auto x = input_args[0]->GetShapeTrack(); + auto shape_element = x->cast(); + MS_EXCEPTION_IF_NULL(shape_element); + return shape_element; +} + +TypePtr Relu6InferType(const PrimitivePtr &prim, const std::vector &input_args) { + const std::set valid_types = {kNumberTypeFloat16, kNumberTypeFloat32}; + if (std::any_of(input_args.begin(), input_args.end(), [](AbstractBasePtr a) { return a == nullptr; })) { + MS_LOG(EXCEPTION) << "nullptr"; + } + std::map types; + types.emplace("x", input_args[0]->BuildType()); + auto infer_type = CheckAndConvertUtils::CheckTensorTypeSame(types, valid_types, prim->name()); + return TypeIdToType(infer_type); +} + +AbstractBasePtr Relu6Infer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + return std::make_shared(Relu6InferType(primitive, input_args), + Relu6InferShape(primitive, input_args)->shape()); +} +REGISTER_PRIMITIVE_EVAL_IMPL(Relu6, prim::kPrimRelu6, Relu6Infer); +} // namespace mindspore diff --git a/mindspore/core/c_ops/relu6.h b/mindspore/core/c_ops/relu6.h new file mode 100644 index 00000000000..31c8f178a85 --- /dev/null +++ b/mindspore/core/c_ops/relu6.h @@ -0,0 +1,40 @@ +/** + * Copyright 2019-2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_CORE_C_OPS_RELU6_H_ +#define MINDSPORE_CORE_C_OPS_RELU6_H_ +#include +#include +#include +#include +#include "c_ops/primitive_c.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +constexpr auto kNameRelu6 = "Relu6"; +class Relu6 : public PrimitiveC { + public: + Relu6() : PrimitiveC(kNameRelu6) { InitIOName({"x"}, {"output"}); } + ~Relu6() = default; + MS_DECLARE_PARENT(Relu6, PrimitiveC); + void Init() {} +}; + +AbstractBasePtr Relu6Infer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); +using PrimRelu6Ptr = std::shared_ptr; +} // namespace mindspore +#endif // MINDSPORE_CORE_C_OPS_RELU6_H_ diff --git a/mindspore/core/c_ops/reshape.cc b/mindspore/core/c_ops/reshape.cc new file mode 100644 index 00000000000..3646d46c2b5 --- /dev/null +++ b/mindspore/core/c_ops/reshape.cc @@ -0,0 +1,44 @@ +/** + * Copyright 2019-2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "c_ops/reshape.h" +#include +#include +#include +#include +#include +#include "utils/check_convert_utils.h" +#include "abstract/primitive_infer_map.h" + +namespace mindspore { +abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector &input_args) { + // to do + return nullptr; +} + +TypePtr InferType(const PrimitivePtr &prim, const std::vector &input_args) { + // to do + return nullptr; +} + +AbstractBasePtr ReshapeInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + return std::make_shared(InferType(primitive, input_args), + InferShape(primitive, input_args)->shape()); +} + +REGISTER_PRIMITIVE_EVAL_IMPL(Reshape, prim::kPrimReshape, ReshapeInfer); +} // namespace mindspore diff --git a/mindspore/core/c_ops/reshape.h b/mindspore/core/c_ops/reshape.h new file mode 100644 index 00000000000..5850e5b3136 --- /dev/null +++ b/mindspore/core/c_ops/reshape.h @@ -0,0 +1,42 @@ +/** + * Copyright 2019-2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_CORE_C_OPS_RESHAPE_H_ +#define MINDSPORE_CORE_C_OPS_RESHAPE_H_ + +#include +#include +#include +#include +#include "c_ops/primitive_c.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +constexpr auto kNameReshape = "Reshape"; +class Reshape : public PrimitiveC { + public: + Reshape() : PrimitiveC(kNameReshape) { InitIOName({"tensor", "shape"}, {"output"}); } + ~Reshape() = default; + MS_DECLARE_PARENT(Reshape, PrimitiveC); + void Init() {} +}; + +AbstractBasePtr ReshapeInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); +using PrimTensorAddPtr = std::shared_ptr; +} // namespace mindspore + +#endif // MINDSPORE_CORE_C_OPS_RESHAPE_H_ diff --git a/mindspore/core/c_ops/softmax.cc b/mindspore/core/c_ops/softmax.cc new file mode 100644 index 00000000000..7ee49646f97 --- /dev/null +++ b/mindspore/core/c_ops/softmax.cc @@ -0,0 +1,68 @@ +/** + * Copyright 2019-2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "c_ops/softmax.h" +#include +#include +#include +#include +#include +#include "c_ops/op_utils.h" +#include "utils/check_convert_utils.h" +#include "abstract/primitive_infer_map.h" + +namespace mindspore { +void Softmax::set_axis(const std::vector &axis) { this->set_attr(kAxis, MakeValue(axis)); } + +void Softmax::Init(int axis) { + auto op_name = this->name(); + std::vector axis_vec = {axis}; + CheckAndConvertUtils::CheckInteger("axis_len", axis_vec.size(), kEqual, 1, op_name); + auto rank = axis_vec.size(); + for (auto &item : axis_vec) { + CheckAndConvertUtils::CheckInRange("axis", item, kIncludeLeft, {-rank, rank}, op_name); + } + this->set_axis(axis_vec); +} + +abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(primitive); + auto softmax_prim = primitive->cast(); + MS_EXCEPTION_IF_NULL(softmax_prim); + auto op_name = softmax_prim->name(); + auto in_shape = CheckAndConvertUtils::ConvertShapePtrToShape("input_shape", input_args[0]->GetShapeTrack(), op_name); + return std::make_shared(in_shape); +} + +TypePtr InferType(const PrimitivePtr &prim, const std::vector &input_args) { + if (std::any_of(input_args.begin(), input_args.end(), [](const AbstractBasePtr &a) { return a == nullptr; })) { + MS_LOG(EXCEPTION) << "nullptr"; + } + std::map types; + const std::set valid_types = {kNumberTypeFloat16, kNumberTypeFloat32, kNumberTypeFloat64}; + types.emplace("x", input_args[0]->BuildType()); + auto infer_type = CheckAndConvertUtils::CheckTensorTypeSame(types, valid_types, prim->name()); + return TypeIdToType(infer_type); +} + +AbstractBasePtr SoftmaxInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + return std::make_shared(InferType(primitive, input_args), + InferShape(primitive, input_args)->shape()); +} + +REGISTER_PRIMITIVE_EVAL_IMPL(Softmax, prim::kPrimSoftmax, SoftmaxInfer); +} // namespace mindspore diff --git a/mindspore/core/c_ops/softmax.h b/mindspore/core/c_ops/softmax.h new file mode 100644 index 00000000000..529750bd810 --- /dev/null +++ b/mindspore/core/c_ops/softmax.h @@ -0,0 +1,44 @@ +/** + * Copyright 2019-2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_C_OPS_SOFTMAX_H_ +#define MINDSPORE_CORE_C_OPS_SOFTMAX_H_ + +#include +#include +#include +#include +#include "c_ops/primitive_c.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +constexpr auto kNameSoftmax = "Softmax"; +class Softmax : public PrimitiveC { + public: + Softmax() : PrimitiveC(kNameSoftmax) { InitIOName({"x"}, {"output"}); } + ~Softmax() = default; + MS_DECLARE_PARENT(Softmax, PrimitiveC); + void Init(int axis = 1); + void set_axis(const std::vector &axis); +}; + +AbstractBasePtr SoftmaxInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); +using PrimSoftmaxPtr = std::shared_ptr; +} // namespace mindspore + +#endif // MINDSPORE_CORE_C_OPS_SOFTMAX_H_ diff --git a/mindspore/core/c_ops/squeeze.cc b/mindspore/core/c_ops/squeeze.cc new file mode 100644 index 00000000000..117352baa4a --- /dev/null +++ b/mindspore/core/c_ops/squeeze.cc @@ -0,0 +1,79 @@ +/** + * Copyright 2019-2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "c_ops/squeeze.h" +#include +#include +#include +#include "c_ops/op_utils.h" +#include "utils/check_convert_utils.h" +#include "abstract/primitive_infer_map.h" + +namespace mindspore { +void Squeeze::set_axis(const std::vector &axis) { this->set_attr(kAxis, MakeValue(axis)); } +void Squeeze::Init(const std::vector &axis) { this->set_axis(axis); } +std::vector Squeeze::get_axis() const { + auto value_ptr = this->GetAttr(kAxis); + return GetValue>(value_ptr); +} + +abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(primitive); + auto squeeze_prim = primitive->cast(); + MS_EXCEPTION_IF_NULL(squeeze_prim); + auto op_name = squeeze_prim->name(); + auto axis = squeeze_prim->get_axis(); + std::vector infer_shape; + + auto in_shape = CheckAndConvertUtils::ConvertShapePtrToShape("input_shape", input_args[0]->GetShapeTrack(), op_name); + auto len = in_shape.size(); + if (axis.empty()) { + std::copy_if(in_shape.begin(), in_shape.end(), std::back_inserter(infer_shape), + [](int value) { return value != 1; }); + } else { + for (auto &item : axis) { + CheckAndConvertUtils::CheckInRange("axis_or_elememt", item, kIncludeBoth, {-len, len + 1}, op_name); + auto idx = item >= 0 ? item : len + item; + if (in_shape[idx] != 1) { + MS_EXCEPTION(ValueError) << "Cannot select an axis to squeeze out which has size not equal to one."; + } + } + for (size_t i = 0; i < len; i++) { + auto it = std::find(axis.begin(), axis.end(), i); + auto it2 = std::find(axis.begin(), axis.end(), i - len); + if (!(it != axis.end() || it2 != axis.end())) { + infer_shape.push_back(in_shape[i]); + } + } + } + return std::make_shared(infer_shape); +} + +TypePtr InferType(const PrimitivePtr &prim, const std::vector &input_args) { + if (std::any_of(input_args.begin(), input_args.end(), [](AbstractBasePtr a) { return a == nullptr; })) { + MS_LOG(EXCEPTION) << "nullptr"; + } + return input_args[0]->BuildType(); +} + +AbstractBasePtr SqueezeInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + return std::make_shared(InferType(primitive, input_args), + InferShape(primitive, input_args)->shape()); +} + +REGISTER_PRIMITIVE_EVAL_IMPL(Squeeze, prim::kPrimSqueeze, SqueezeInfer); +} // namespace mindspore diff --git a/mindspore/core/c_ops/squeeze.h b/mindspore/core/c_ops/squeeze.h new file mode 100644 index 00000000000..e08ba5d4ca7 --- /dev/null +++ b/mindspore/core/c_ops/squeeze.h @@ -0,0 +1,46 @@ +/** + * Copyright 2019-2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_C_OPS_SQUEEZE_H_ +#define MINDSPORE_CORE_C_OPS_SQUEEZE_H_ + +#include +#include +#include +#include +#include "c_ops/primitive_c.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +constexpr auto kNameSqueeze = "Squeeze"; +class Squeeze : public PrimitiveC { + public: + Squeeze() : PrimitiveC(kNameSqueeze) { InitIOName({"x"}, {"output"}); } + ~Squeeze() = default; + MS_DECLARE_PARENT(Squeeze, PrimitiveC); + void Init(const std::vector &axis = {}); + void set_axis(const std::vector &axis); + std::vector get_axis() const; +}; + +AbstractBasePtr SqueezeInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); +using PrimSqueezePtr = std::shared_ptr; + +} // namespace mindspore + +#endif // MINDSPORE_CORE_C_OPS_SQUEEZE_H_