!44593 输入不转属性: Conv2DBackpropInput/Conv2DBackpropFilter

Merge pull request !44593 from hujiahui8/ops_all_input
This commit is contained in:
i-robot 2022-11-17 10:53:09 +00:00 committed by Gitee
commit 957138006f
No known key found for this signature in database
GPG Key ID: 173E9B9CA92EEF8F
6 changed files with 18 additions and 49 deletions

View File

@ -26,8 +26,6 @@ RER_CPU_DYNAMIC_CONST_TO_ATTR(kCastOpName, 1);
RER_CPU_DYNAMIC_CONST_TO_ATTR(kFillOpName, 0);
RER_CPU_STATIC_CONST_TO_ATTR(kCastOpName, 1);
RER_CPU_STATIC_CONST_TO_ATTR(kConv2DBackpropInputOpName, 2);
RER_CPU_STATIC_CONST_TO_ATTR(kConv2DTransposeOpName, 2);
RER_CPU_STATIC_CONST_TO_ATTR(kCOO2CSROpName, 1);
RER_CPU_STATIC_CONST_TO_ATTR(kCSR2COOOpName, 1);
RER_CPU_STATIC_CONST_TO_ATTR(kCSRDivOpName, 3);

View File

@ -36,8 +36,7 @@ constexpr size_t kWidth2DStrideIndex = 3;
constexpr size_t k2DDilationSize = 4;
constexpr size_t kHeight2DDilationIndex = 2;
constexpr size_t kWidth2DDilationIndex = 3;
constexpr auto StaticInput = 2;
constexpr auto DynamicInput = 3;
constexpr auto kInputDim = 3;
constexpr auto k2DHeightIndexNCHW = 2;
constexpr auto k2DHeightIndexNHWC = 1;
@ -45,10 +44,6 @@ constexpr auto k2DHeightIndexNHWC = 1;
using KernelRunFunc = ConvGradFilterBkwGpuKernelMod::KernelRunFunc;
const std::vector<std::pair<KernelAttr, KernelRunFunc>> &ConvGradFilterBkwGpuKernelMod::GetFuncList() const {
static const std::vector<std::pair<KernelAttr, KernelRunFunc>> func_list = {
{KernelAttr().AddInputAttr(kNumberTypeFloat32).AddInputAttr(kNumberTypeFloat32).AddOutputAttr(kNumberTypeFloat32),
&ConvGradFilterBkwGpuKernelMod::LaunchKernel<float>},
{KernelAttr().AddInputAttr(kNumberTypeFloat16).AddInputAttr(kNumberTypeFloat16).AddOutputAttr(kNumberTypeFloat16),
&ConvGradFilterBkwGpuKernelMod::LaunchKernel<half>},
{KernelAttr()
.AddInputAttr(kNumberTypeFloat32)
.AddInputAttr(kNumberTypeFloat32)
@ -146,11 +141,8 @@ void ConvGradFilterBkwGpuKernelMod::CalPadList(const std::vector<int> &pad_list,
void ConvGradFilterBkwGpuKernelMod::CheckParam(const std::vector<KernelTensorPtr> &inputs) {
size_t input_num = inputs.size();
if (input_num != StaticInput && input_num != DynamicInput) {
MS_LOG(EXCEPTION) << "For '" << kernel_name_ << "', the number of inputs must be 2 or 3, but got " << input_num;
}
if (input_num == DynamicInput) {
is_dynamic_attr_ = true;
if (input_num != kInputDim) {
MS_LOG(EXCEPTION) << "For '" << kernel_name_ << "', the number of inputs must be 3, but got " << input_num;
}
}
@ -273,9 +265,7 @@ bool ConvGradFilterBkwGpuKernelMod::Init(const BaseOperatorPtr &base_operator,
if (format_attr_ == kOpFormat_NHWC) {
data_format_ = kOpFormat_NHWC;
}
if (!is_dynamic_attr_) {
filter_shape_ = GetValue<std::vector<int64_t>>(prim->GetAttr("filter_sizes"));
}
group_ = static_cast<int>(GetValue<int64_t>(prim->GetAttr("group")));
CHECK_CUDNN_RET_WITH_EXCEPT_NOTRACE(cudnnSetConvolutionGroupCount(conv_desc_, group_),
"cudnnSetConvGroupCount failed");
@ -306,14 +296,12 @@ int ConvGradFilterBkwGpuKernelMod::Resize(const BaseOperatorPtr &base_operator,
if (is_null_input_) {
return KRET_OK;
}
if (is_dynamic_attr_) {
constexpr size_t kShapeIndex = 2;
auto value_res = TryGetIntValueFromInputs(inputs, kShapeIndex, kernel_name_, true);
if (!value_res.has_value()) {
MS_LOG(EXCEPTION) << "Fail to get filter_sizes from inputs";
}
filter_shape_ = value_res.value();
constexpr size_t kShapeIndex = 2;
if (!TryGetIntValue(inputs, kShapeIndex, kernel_name_, &filter_shape_, true)) {
MS_LOG(EXCEPTION) << "For " << kernel_name_ << " can't get filter_sizes input!";
}
auto filter_shape = filter_shape_;
if (!CheckTensorSize({in_shape, dy_shape, filter_shape})) {
return KRET_RESIZE_FAILED;

View File

@ -117,7 +117,6 @@ class ConvGradFilterBkwGpuKernelMod : public NativeGpuKernelMod,
size_t padded_size_;
size_t workspace_size_;
bool use_pad_;
bool is_dynamic_attr_{false};
std::vector<int64_t> filter_shape_;
std::vector<int> pad_list_;
void *stream_ptr_{nullptr};

View File

@ -37,8 +37,7 @@ constexpr size_t kWidth2DStrideIndex = 1;
constexpr size_t k2DDilationSize = 4;
constexpr size_t kHeight2DDilationIndex = 2;
constexpr size_t kWidth2DDilationIndex = 3;
constexpr auto StaticInput = 2;
constexpr auto DynamicInput = 3;
constexpr auto kInputDim = 3;
constexpr auto k2DHeightIndexNCHW = 2;
constexpr auto k2DHeightIndexNHWC = 1;
@ -46,10 +45,6 @@ constexpr auto k2DHeightIndexNHWC = 1;
using KernelRunFunc = ConvGradInputBkwGpuKernelMod::KernelRunFunc;
const std::vector<std::pair<KernelAttr, KernelRunFunc>> &ConvGradInputBkwGpuKernelMod::GetFuncList() const {
static const std::vector<std::pair<KernelAttr, KernelRunFunc>> func_list = {
{KernelAttr().AddInputAttr(kNumberTypeFloat32).AddInputAttr(kNumberTypeFloat32).AddOutputAttr(kNumberTypeFloat32),
&ConvGradInputBkwGpuKernelMod::LaunchKernel<float>},
{KernelAttr().AddInputAttr(kNumberTypeFloat16).AddInputAttr(kNumberTypeFloat16).AddOutputAttr(kNumberTypeFloat16),
&ConvGradInputBkwGpuKernelMod::LaunchKernel<half>},
{KernelAttr()
.AddInputAttr(kNumberTypeFloat32)
.AddInputAttr(kNumberTypeFloat32)
@ -163,11 +158,8 @@ void ConvGradInputBkwGpuKernelMod::CalPadList(const ShapeVector input_shape, con
void ConvGradInputBkwGpuKernelMod::CheckParam(const std::vector<KernelTensorPtr> &inputs) {
size_t input_num = inputs.size();
if (input_num != StaticInput && input_num != DynamicInput) {
MS_LOG(EXCEPTION) << "For '" << kernel_name_ << "', the number of inputs must be 2 or 3, but got " << input_num;
}
if (input_num == DynamicInput) {
is_dynamic_attr_ = true;
if (input_num != kInputDim) {
MS_LOG(EXCEPTION) << "For '" << kernel_name_ << "', the number of inputs must be 3, but got " << input_num;
}
}
@ -254,9 +246,7 @@ bool ConvGradInputBkwGpuKernelMod::Init(const BaseOperatorPtr &base_operator,
data_format_ = mindspore::FormatEnumToString(inputs[0]->GetFormat());
auto prim = base_operator->GetPrim();
MS_EXCEPTION_IF_NULL(prim);
if (!is_dynamic_attr_) {
input_shape_ = GetValue<std::vector<int64_t>>(prim->GetAttr("input_sizes"));
}
format_attr_ = GetValue<std::string>(prim->GetAttr("format"));
if (format_attr_ == kOpFormat_NHWC) {
data_format_ = kOpFormat_NHWC;
@ -295,14 +285,12 @@ int ConvGradInputBkwGpuKernelMod::Resize(const BaseOperatorPtr &base_operator,
if (is_null_input_) {
return KRET_OK;
}
if (is_dynamic_attr_) {
constexpr size_t kShapeIndex = 2;
auto value_res = TryGetIntValueFromInputs(inputs, kShapeIndex, kernel_name_, true);
if (!value_res.has_value()) {
MS_LOG(EXCEPTION) << "Fail to get filter_sizes from inputs";
}
input_shape_ = value_res.value();
constexpr size_t kShapeIndex = 2;
if (!TryGetIntValue(inputs, kShapeIndex, kernel_name_, &input_shape_, true)) {
MS_LOG(EXCEPTION) << "For " << kernel_name_ << " can't get input_sizes input!";
}
auto input_shape = input_shape_;
int h_index = k2DHeightIndexNCHW;
int w_index = k2DHeightIndexNCHW + 1;

View File

@ -117,7 +117,6 @@ class ConvGradInputBkwGpuKernelMod : public NativeGpuKernelMod, public MatchKern
size_t workspace_size_;
bool use_pad_;
float beta_;
bool is_dynamic_attr_{false};
bool get_dynamic_attr_value_{false};
std::vector<int64_t> input_shape_;
static constexpr size_t kShapeIndex_{2};

View File

@ -25,9 +25,6 @@ namespace mindspore::opt {
RER_GPU_DYNAMIC_CONST_TO_ATTR(kCastOpName, 1);
RER_GPU_DYNAMIC_CONST_TO_ATTR(kFillOpName, 0);
RER_GPU_STATIC_CONST_TO_ATTR(kCastOpName, 1);
RER_GPU_STATIC_CONST_TO_ATTR(kConv2DBackpropFilterOpName, 2);
RER_GPU_STATIC_CONST_TO_ATTR(kConv2DBackpropInputOpName, 2);
RER_GPU_STATIC_CONST_TO_ATTR(kConv2DTransposeOpName, 2);
RER_GPU_STATIC_CONST_TO_ATTR(kCOO2CSROpName, 1);
RER_GPU_STATIC_CONST_TO_ATTR(kCSR2COOOpName, 1);
RER_GPU_STATIC_CONST_TO_ATTR(kCSRDivOpName, 3);