remove duplicate core/ops

This commit is contained in:
mengyuanli 2022-08-11 09:23:51 +08:00
parent cd39fe0d66
commit 0298c826a0
7 changed files with 78 additions and 179 deletions

View File

@ -17,7 +17,7 @@
#include "plugin/device/cpu/kernel/relu_grad_v2_cpu_kernel.h"
#include <algorithm>
#include <functional>
#include "mindspore/core/ops/relu_grad_v2.h"
#include "mindspore/core/ops/grad/relu_grad_v2.h"
#include "plugin/device/cpu/hal/device/cpu_device_address.h"
#include "utils/ms_utils.h"
@ -106,13 +106,14 @@ int ReluGradV2CpuKernelMod::Resize(const BaseOperatorPtr &base_operator, const s
return ret;
}
auto input_shape = inputs[kIndex0]->GetShapeVector();
if (input_shape.size() != kDim4) {
MS_LOG(ERROR) << "For '" << kernel_name_ << "', the dims of input shape must be 4, but got " << input_shape.size();
if (input_shape.size() < kDim4) {
MS_LOG(ERROR) << "For '" << kernel_name_ << "', the dims of input shape must be greater than 4, but got "
<< input_shape.size();
return KRET_RESIZE_FAILED;
}
auto mask_shape = inputs[kIndex1]->GetShapeVector();
if (mask_shape.size() < kDim4) {
MS_LOG(ERROR) << "For '" << kernel_name_ << "', the dims of mask shape should greater than 4, but got "
MS_LOG(ERROR) << "For '" << kernel_name_ << "', the dims of mask shape should be greater than 4, but got "
<< mask_shape.size();
return KRET_RESIZE_FAILED;
}

View File

@ -100,8 +100,9 @@ int ReLUV2CpuKernelMod::Resize(const BaseOperatorPtr &base_operator, const std::
return ret;
}
auto input_shape = inputs[kIndex0]->GetShapeVector();
if (input_shape.size() != kDim4) {
MS_LOG(ERROR) << "For '" << kernel_name_ << "', the dims of input shape must be 4, but got " << input_shape.size();
if (input_shape.size() < kDim4) {
MS_LOG(ERROR) << "For '" << kernel_name_ << "', the dims of input shape must be greater than 4, but got "
<< input_shape.size();
return KRET_RESIZE_FAILED;
}
return KRET_OK;

View File

@ -1,5 +1,5 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -28,38 +28,60 @@
namespace mindspore {
namespace ops {
namespace {
abstract::ShapePtr ReLUGradV2InferShape(const std::vector<AbstractBasePtr> &input_args) {
auto x = input_args[0]->BuildShape();
MS_EXCEPTION_IF_NULL(x);
auto shape_element = x->cast<abstract::ShapePtr>();
MS_EXCEPTION_IF_NULL(shape_element);
return shape_element;
}
TypePtr ReLUGradV2InferType(const PrimitivePtr &prim, const std::vector<AbstractBasePtr> &input_args) {
auto prim_name = prim->name();
MS_EXCEPTION_IF_NULL(input_args[0]);
auto x_type_map = input_args[0]->BuildType();
MS_EXCEPTION_IF_NULL(x_type_map);
auto x_type = x_type_map->cast<TensorTypePtr>();
MS_EXCEPTION_IF_NULL(x_type);
std::set<TypePtr> valid_x_type = {kTensorType};
return CheckAndConvertUtils::CheckTensorTypeValid("input_x", x_type, valid_x_type, prim_name);
}
} // namespace
MIND_API_OPERATOR_IMPL(ReLUGradV2, BaseOperator);
AbstractBasePtr ReLUGradV2Infer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive,
const std::vector<AbstractBasePtr> &input_args) {
constexpr const size_t kReluGradV2InputNum = 2;
constexpr const size_t kGradientIndex = 0;
constexpr const size_t kMaskIndex = 1;
constexpr const size_t kReluGradV2GradientDims = 4;
abstract::ShapePtr ReluGradV2InferShape(const PrimitivePtr &primitive, const std::vector<AbstractBasePtr> &input_args) {
MS_EXCEPTION_IF_NULL(primitive);
auto prim_name = primitive->name();
const int64_t input_num = 2;
(void)CheckAndConvertUtils::CheckInteger("input numbers", SizeToLong(input_args.size()), kEqual, input_num,
prim_name);
for (const auto &item : input_args) {
MS_EXCEPTION_IF_NULL(item);
}
return abstract::MakeAbstract(ReLUGradV2InferShape(input_args), ReLUGradV2InferType(primitive, input_args));
auto gradient_shape_map = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[kGradientIndex]->BuildShape());
auto gradient_input_shape = gradient_shape_map[kShape];
if (gradient_input_shape.size() < kReluGradV2GradientDims) {
MS_EXCEPTION(ValueError) << "For '" << primitive->name()
<< "', The dims of 'gradient' must be greater than 4,but got a " +
std::to_string(gradient_input_shape.size()) + "-D tensor";
}
auto mask_shape_map = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[kMaskIndex]->BuildShape());
auto mask_input_shape = mask_shape_map[kShape];
if (mask_input_shape.size() < kReluGradV2GradientDims) {
MS_EXCEPTION(ValueError) << "For '" << primitive->name()
<< "', The 'mask' dims must be greater than 4,but got " +
std::to_string(mask_input_shape.size()) + "-D tensor";
}
auto gradient_build_shape = input_args[kGradientIndex]->BuildShape();
MS_EXCEPTION_IF_NULL(gradient_build_shape);
auto gradient_shape = gradient_build_shape->cast<abstract::ShapePtr>();
MS_EXCEPTION_IF_NULL(gradient_shape);
return gradient_shape;
}
REGISTER_PRIMITIVE_EVAL_IMPL(ReLUGradV2, prim::kPrimReluGradV2, ReLUGradV2Infer, nullptr, true);
TypePtr ReluGradV2InferType(const PrimitivePtr &prim, const std::vector<AbstractBasePtr> &input_args) {
MS_EXCEPTION_IF_NULL(prim);
auto prim_name = prim->name();
MS_EXCEPTION_IF_NULL(input_args[kGradientIndex]);
auto gradient_type = input_args[kGradientIndex]->BuildType();
MS_EXCEPTION_IF_NULL(gradient_type);
if (!gradient_type->isa<TensorType>()) {
MS_EXCEPTION(TypeError) << "The " << prim_name << "'s "
<< " input must be tensor type but got " << gradient_type->ToString();
}
return gradient_type;
}
} // namespace
MIND_API_OPERATOR_IMPL(ReluGradV2, BaseOperator);
AbstractBasePtr ReluGradV2Infer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive,
const std::vector<AbstractBasePtr> &input_args) {
MS_EXCEPTION_IF_NULL(primitive);
(void)CheckAndConvertUtils::CheckInteger("ReluGradV2 infer", input_args.size(), kEqual, kReluGradV2InputNum,
primitive->name());
auto type = ReluGradV2InferType(primitive, input_args);
auto shape = ReluGradV2InferShape(primitive, input_args);
return abstract::MakeAbstract(shape, type);
}
REGISTER_PRIMITIVE_EVAL_IMPL(ReluGradV2, prim::kPrimReluGradV2, ReluGradV2Infer, nullptr, true);
} // namespace ops
} // namespace mindspore

View File

@ -1,5 +1,5 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -13,8 +13,8 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CORE_OPS_RELUGRADV2_H_
#define MINDSPORE_CORE_OPS_RELUGRADV2_H_
#ifndef MINDSPORE_CORE_OPS_GRAD_RELU_GRAD_V2_H_
#define MINDSPORE_CORE_OPS_GRAD_RELU_GRAD_V2_H_
#include <map>
#include <vector>
#include <string>
@ -24,14 +24,23 @@
namespace mindspore {
namespace ops {
constexpr auto kNameReLUGradV2 = "ReLUGradV2";
class MIND_API ReLUGradV2 : public BaseOperator {
constexpr auto kReluGradV2 = "ReluGradV2";
/// \brief Grad op of ReLUV2.
class MIND_API ReluGradV2 : public BaseOperator {
public:
MIND_API_BASE_MEMBER(ReLUGradV2);
ReLUGradV2() : BaseOperator(kNameReLUGradV2) { InitIOName({"x"}, {"output"}); }
MIND_API_BASE_MEMBER(ReluGradV2);
/// \brief Constructor.
ReluGradV2() : BaseOperator(kReluGradV2) { InitIOName({"gradients", "mask"}, {"output"}); }
/// \brief Constructor.
explicit ReluGradV2(const std::string k_name) : BaseOperator(k_name) {
InitIOName({"gradients", "mask"}, {"output"});
}
/// \brief Init.
void Init() const {}
};
abstract::AbstractBasePtr ReluGradV2Infer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive,
const std::vector<abstract::AbstractBasePtr> &input_args);
} // namespace ops
} // namespace mindspore
#endif // MINDSPORE_CORE_OPS_RELUGRADV2_H_
#endif // MINDSPORE_CORE_OPS_GRAD_RELU_GRAD_V2_H_

View File

@ -1,87 +0,0 @@
/**
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "ops/relu_grad_v2.h"
#include <string>
#include <algorithm>
#include <map>
#include <set>
#include <vector>
#include "ops/op_utils.h"
#include "utils/check_convert_utils.h"
#include "abstract/ops/primitive_infer_map.h"
#include "mindapi/src/helper.h"
namespace mindspore {
namespace ops {
namespace {
constexpr const size_t kReluGradV2InputNum = 2;
constexpr const size_t kGradientIndex = 0;
constexpr const size_t kMaskIndex = 1;
constexpr const size_t kReluGradV2GradientDims = 4;
abstract::ShapePtr ReluGradV2InferShape(const PrimitivePtr &primitive, const std::vector<AbstractBasePtr> &input_args) {
MS_EXCEPTION_IF_NULL(primitive);
for (const auto &item : input_args) {
MS_EXCEPTION_IF_NULL(item);
}
auto gradient_shape_map = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[kGradientIndex]->BuildShape());
auto gradient_input_shape = gradient_shape_map[kShape];
if (gradient_input_shape.size() != kReluGradV2GradientDims) {
MS_EXCEPTION(ValueError) << "For '" << primitive->name()
<< "', The 'gradient' must be a 4-D tensor,but got a " +
std::to_string(gradient_input_shape.size()) + "-D tensor";
}
auto mask_shape_map = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[kMaskIndex]->BuildShape());
auto mask_input_shape = mask_shape_map[kShape];
if (mask_input_shape.size() < kReluGradV2GradientDims) {
MS_EXCEPTION(ValueError) << "For '" << primitive->name()
<< "', The 'mask' dims must be greater than 4,but got " +
std::to_string(mask_input_shape.size()) + "-D tensor";
}
auto gradient_build_shape = input_args[kGradientIndex]->BuildShape();
MS_EXCEPTION_IF_NULL(gradient_build_shape);
auto gradient_shape = gradient_build_shape->cast<abstract::ShapePtr>();
MS_EXCEPTION_IF_NULL(gradient_shape);
return gradient_shape;
}
TypePtr ReluGradV2InferType(const PrimitivePtr &prim, const std::vector<AbstractBasePtr> &input_args) {
MS_EXCEPTION_IF_NULL(prim);
auto prim_name = prim->name();
MS_EXCEPTION_IF_NULL(input_args[kGradientIndex]);
auto gradient_type = input_args[kGradientIndex]->BuildType();
MS_EXCEPTION_IF_NULL(gradient_type);
if (!gradient_type->isa<TensorType>()) {
MS_EXCEPTION(TypeError) << "The " << prim_name << "'s "
<< " input must be tensor type but got " << gradient_type->ToString();
}
return gradient_type;
}
} // namespace
MIND_API_OPERATOR_IMPL(ReluGradV2, BaseOperator);
AbstractBasePtr ReluGradV2Infer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive,
const std::vector<AbstractBasePtr> &input_args) {
MS_EXCEPTION_IF_NULL(primitive);
(void)CheckAndConvertUtils::CheckInteger("ReluGradV2 infer", SizeToLong(input_args.size()), kEqual,
kReluGradV2InputNum, primitive->name());
auto type = ReluGradV2InferType(primitive, input_args);
auto shape = ReluGradV2InferShape(primitive, input_args);
return abstract::MakeAbstract(shape, type);
}
REGISTER_PRIMITIVE_EVAL_IMPL(ReluGradV2, prim::kPrimReluGradV2, ReluGradV2Infer, nullptr, true);
} // namespace ops
} // namespace mindspore

View File

@ -1,47 +0,0 @@
/**
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CORE_OPS_RELU_GRAD_V2_H_
#define MINDSPORE_CORE_OPS_RELU_GRAD_V2_H_
#include <map>
#include <vector>
#include <string>
#include <memory>
#include "ops/base_operator.h"
#include "mindapi/base/types.h"
namespace mindspore {
namespace ops {
constexpr auto kReluGradV2 = "ReluGradV2";
/// \brief Grad op of ReLUV2.
class MIND_API ReluGradV2 : public BaseOperator {
public:
MIND_API_BASE_MEMBER(ReluGradV2);
/// \brief Constructor.
ReluGradV2() : BaseOperator(kReluGradV2) { InitIOName({"gradients", "mask"}, {"output"}); }
/// \brief Constructor.
explicit ReluGradV2(const std::string k_name) : BaseOperator(k_name) {
InitIOName({"gradients", "mask"}, {"output"});
}
/// \brief Init.
void Init() const {}
};
abstract::AbstractBasePtr ReluGradV2Infer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive,
const std::vector<abstract::AbstractBasePtr> &input_args);
} // namespace ops
} // namespace mindspore
#endif // MINDSPORE_CORE_OPS_RELU_GRAD_V2_H_

View File

@ -36,8 +36,8 @@ constexpr int64_t kRound16 = 16;
std::vector<int64_t> ReLUV2GetOutputMaskShape(const PrimitivePtr &prim, const std::vector<int64_t> &input_shape,
const std::shared_ptr<Type> &x_dtype) {
std::vector<int64_t> mask_shape;
if (input_shape.size() != kInputDims) {
MS_EXCEPTION(ValueError) << "For '" << prim->name() << "', the 'input_x' must be a 4-D tensor, but got a "
if (input_shape.size() < kInputDims) {
MS_EXCEPTION(ValueError) << "For '" << prim->name() << "', the dims of 'input_x' must be greater than 4, but got a "
<< std::to_string(input_shape.size()) << "-D tensor.";
}
for (size_t i = 0; i < input_shape.size(); i++) {