!33936 [assistant][ops] Add FractionalMaxPoolWithFixedKsize and FractionalMaxPoolGradWithFixedKsize

Merge pull request !33936 from 董雪堂/FractionalMaxPoolWithFixedKsize
This commit is contained in:
i-robot 2022-08-03 06:23:25 +00:00 committed by Gitee
commit eb799489ad
No known key found for this signature in database
GPG Key ID: 173E9B9CA92EEF8F
16 changed files with 1221 additions and 0 deletions

View File

@ -0,0 +1,179 @@
/**
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "plugin/device/cpu/kernel/fractional_max_pool_grad_with_fixed_ksize_cpu_kernel.h"
#include <algorithm>
#include <cmath>
#include <iostream>
#include <limits>
#include <utility>
#include <vector>
#include <string>
#include "plugin/device/cpu/hal/device/cpu_device_address.h"
namespace mindspore {
namespace kernel {
namespace {
const size_t kInputsNum = 3;
const size_t kOutputsNum = 1;
const size_t kInputIndex0 = 0;
const size_t kInputIndex1 = 1;
const size_t kInputIndex2 = 2;
const size_t kInputsDimSize = 4;
const size_t kInputsDimIndexN = 0;
const size_t kInputsDimIndexC = 1;
const size_t kInputsDimIndexH = 2;
const size_t kInputsDimIndexW = 3;
#define ADD_KERNEL(t1, t2, t3, t4) \
KernelAttr() \
.AddInputAttr(kNumberType##t1) \
.AddInputAttr(kNumberType##t2) \
.AddInputAttr(kNumberType##t3) \
.AddOutputAttr(kNumberType##t4)
} // namespace
void FractionalMaxPoolGradWithFixedKsizeCPUKernelMod::InitKernel(const CNodePtr &kernel_node) {
MS_EXCEPTION_IF_NULL(kernel_node);
kernel_name_ = common::AnfAlgo::GetCNodeName(kernel_node);
input_shape_ = AnfAlgo::GetInputDeviceShape(kernel_node, kInputIndex0);
out_backprop_shape_ = AnfAlgo::GetInputDeviceShape(kernel_node, kInputIndex1);
out_backprop_type_ = AnfAlgo::GetInputDeviceDataType(kernel_node, kInputIndex1);
argmax_shape_ = AnfAlgo::GetInputDeviceShape(kernel_node, kInputIndex2);
data_format_ = common::AnfAlgo::GetNodeAttr<string>(kernel_node, FORMAT);
if (data_format_ != "NCHW") {
MS_EXCEPTION(ValueError) << "For '" << kernel_name_ << "', the attr data_format must be NCHW.";
}
if (input_shape_.size() != kInputsDimSize) {
MS_EXCEPTION(ValueError) << "For '" << kernel_name_ << "', The dim of input origin_input must be 4, but got "
<< input_shape_.size() << ".";
}
input_n_ = input_shape_[kInputsDimIndexN];
input_c_ = input_shape_[kInputsDimIndexC];
input_h_ = input_shape_[kInputsDimIndexH];
input_w_ = input_shape_[kInputsDimIndexW];
if (out_backprop_shape_.size() != kInputsDimSize) {
MS_EXCEPTION(ValueError) << "For '" << kernel_name_ << "', The dim of input out_backprop must be 4, but got "
<< out_backprop_shape_.size() << ".";
}
out_backprop_h_ = out_backprop_shape_[kInputsDimIndexH];
out_backprop_w_ = out_backprop_shape_[kInputsDimIndexW];
if (argmax_shape_.size() != kInputsDimSize) {
MS_EXCEPTION(ValueError) << "For '" << kernel_name_ << "', The dim of input argmax must be 4, but got "
<< argmax_shape_.size() << ".";
}
for (size_t i = 0; i < kInputsDimSize; i++) {
if (out_backprop_shape_[i] != argmax_shape_[i]) {
MS_EXCEPTION(ValueError) << "For '" << kernel_name_
<< "', The shape of input out_backprop and input argmax must be equal.";
}
}
if (input_n_ != out_backprop_shape_[kInputsDimIndexN]) {
MS_EXCEPTION(ValueError) << "For '" << kernel_name_ << "', The first dimension of three inputs must be equal.";
}
if (input_c_ != out_backprop_shape_[kInputsDimIndexC]) {
MS_EXCEPTION(ValueError) << "For '" << kernel_name_ << "', The second dimension of three inputs must be equal.";
}
}
bool FractionalMaxPoolGradWithFixedKsizeCPUKernelMod::Launch(const std::vector<AddressPtr> &inputs,
const std::vector<AddressPtr> &workspace,
const std::vector<AddressPtr> &outputs) {
CHECK_KERNEL_INPUTS_NUM(inputs.size(), kInputsNum, kernel_name_);
CHECK_KERNEL_OUTPUTS_NUM(outputs.size(), kOutputsNum, kernel_name_);
switch (out_backprop_type_) {
case kNumberTypeFloat16:
return GradComputeTemplate<float16>(inputs, outputs);
case kNumberTypeFloat32:
return GradComputeTemplate<float>(inputs, outputs);
case kNumberTypeFloat64:
return GradComputeTemplate<double>(inputs, outputs);
case kNumberTypeInt32:
return GradComputeTemplate<int32_t>(inputs, outputs);
case kNumberTypeInt64:
return GradComputeTemplate<int64_t>(inputs, outputs);
default:
MS_EXCEPTION(TypeError) << "For '" << kernel_name_ << "', out_backprop_type" << out_backprop_type_
<< "not support, must be in [{DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_INT32, DT_INT64}].";
}
return true;
}
template <typename backprop_t>
bool FractionalMaxPoolGradWithFixedKsizeCPUKernelMod::GradComputeTemplate(const std::vector<AddressPtr> &inputs,
const std::vector<AddressPtr> &outputs) {
backprop_t *out_backprop_ptr = reinterpret_cast<backprop_t *>(inputs[1]->addr);
int64_t *argmax_ptr = reinterpret_cast<int64_t *>(inputs[2]->addr);
backprop_t *output_ptr = reinterpret_cast<backprop_t *>(outputs[0]->addr);
auto shard_fractional_max_pool_grad_with_fixed_ksize = [&](size_t start, size_t end) {
for (size_t n = start; n < end; n++) {
backprop_t *out_backpropForPlane = out_backprop_ptr + n * input_c_ * out_backprop_h_ * out_backprop_w_;
int64_t *argmaxForPlane = argmax_ptr + n * input_c_ * out_backprop_h_ * out_backprop_w_;
backprop_t *outputForPlane = output_ptr + n * input_c_ * input_h_ * input_w_;
FractionalMaxPoolGradWithFixedKsizeCompute<backprop_t>(out_backpropForPlane, argmaxForPlane, outputForPlane);
}
};
CPUKernelUtils::ParallelFor(shard_fractional_max_pool_grad_with_fixed_ksize, input_n_);
return true;
}
template <typename backprop_t>
void FractionalMaxPoolGradWithFixedKsizeCPUKernelMod::FractionalMaxPoolGradWithFixedKsizeCompute(
backprop_t *out_backpropForPlane, int64_t *argmaxForPlane, backprop_t *outputForPlane) {
for (int64_t plane = 0; plane < input_c_; plane++) {
backprop_t *out_backpropPlane = out_backpropForPlane + plane * out_backprop_h_ * out_backprop_w_;
int64_t *argmaxPlane = argmaxForPlane + plane * out_backprop_h_ * out_backprop_w_;
backprop_t *outputPlane = outputForPlane + plane * input_h_ * input_w_;
for (int64_t i = 0; i < input_h_; i++) {
for (int64_t j = 0; j < input_w_; j++) {
outputPlane[i * input_w_ + j] = static_cast<backprop_t>(0);
}
}
for (int64_t h = 0; h < out_backprop_h_; h++) {
for (int64_t w = 0; w < out_backprop_w_; w++) {
int input_index = h * out_backprop_w_ + w;
if (input_index < 0 || input_index >= (out_backprop_h_ * out_backprop_w_)) {
MS_EXCEPTION(ValueError) << "For '" << kernel_name_ << "', the index value of argmax is illegal.";
}
int output_index = argmaxPlane[input_index];
if (output_index < 0 || output_index >= (input_h_ * input_w_)) {
MS_EXCEPTION(ValueError) << "For '" << kernel_name_ << "', the index value of output is illegal.";
}
outputPlane[output_index] += out_backpropPlane[input_index];
}
}
}
}
std::vector<KernelAttr> FractionalMaxPoolGradWithFixedKsizeCPUKernelMod::GetOpSupport() {
static std::vector<KernelAttr> kernel_attr_list = {
ADD_KERNEL(Int32, Float16, Int64, Float16), ADD_KERNEL(Int32, Float32, Int64, Float32),
ADD_KERNEL(Int32, Float64, Int64, Float64), ADD_KERNEL(Int32, Int32, Int64, Int32),
ADD_KERNEL(Int32, Int64, Int64, Int64), ADD_KERNEL(Int64, Float16, Int64, Float16),
ADD_KERNEL(Int64, Float32, Int64, Float32), ADD_KERNEL(Int64, Float64, Int64, Float64),
ADD_KERNEL(Int64, Int32, Int64, Int32), ADD_KERNEL(Int64, Int64, Int64, Int64)};
return kernel_attr_list;
}
MS_KERNEL_FACTORY_REG(NativeCpuKernelMod, FractionalMaxPoolGradWithFixedKsize,
FractionalMaxPoolGradWithFixedKsizeCPUKernelMod);
} // namespace kernel
} // namespace mindspore

View File

@ -0,0 +1,58 @@
/**
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_FRACTIONAL_MAX_POOL_GRAD_WITH_FIXED_KSIZE_CPU_KERNEL_H_
#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_FRACTIONAL_MAX_POOL_GRAD_WITH_FIXED_KSIZE_CPU_KERNEL_H_
#include <memory>
#include <vector>
#include <string>
#include "plugin/device/cpu/kernel/cpu_kernel.h"
#include "plugin/factory/ms_factory.h"
namespace mindspore {
namespace kernel {
class FractionalMaxPoolGradWithFixedKsizeCPUKernelMod : public DeprecatedNativeCpuKernelMod {
public:
FractionalMaxPoolGradWithFixedKsizeCPUKernelMod() = default;
~FractionalMaxPoolGradWithFixedKsizeCPUKernelMod() override = default;
void InitKernel(const CNodePtr &kernel_node) override;
bool Launch(const std::vector<AddressPtr> &inputs, const std::vector<AddressPtr> &workspace,
const std::vector<AddressPtr> &outputs) override;
protected:
std::vector<KernelAttr> GetOpSupport() override;
private:
template <typename backprop_t>
bool GradComputeTemplate(const std::vector<AddressPtr> &inputs, const std::vector<AddressPtr> &outputs);
template <typename backprop_t>
void FractionalMaxPoolGradWithFixedKsizeCompute(backprop_t *out_backpropForPlane, int64_t *argmaxForPlane,
backprop_t *outputForPlane);
std::vector<int64_t> input_shape_;
std::vector<int64_t> out_backprop_shape_;
std::vector<int64_t> argmax_shape_;
std::string data_format_{"NCHW"};
TypeId out_backprop_type_;
int64_t input_n_;
int64_t input_c_;
int64_t input_h_;
int64_t input_w_;
int64_t out_backprop_h_;
int64_t out_backprop_w_;
};
} // namespace kernel
} // namespace mindspore
#endif // MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_FRACTIONAL_MAX_POOL_GRAD_WITH_FIXED_KSIZE_CPU_KERNEL_H_

View File

@ -0,0 +1,289 @@
/**
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "plugin/device/cpu/kernel/fractional_max_pool_with_fixed_ksize_cpu_kernel.h"
#include <algorithm>
#include <cmath>
#include <iostream>
#include <limits>
#include <utility>
#include <vector>
#include <string>
#include "plugin/device/cpu/hal/device/cpu_device_address.h"
namespace mindspore {
namespace kernel {
namespace {
const size_t kInputsNum = 2;
const size_t kOutputsNum = 2;
const size_t kInputIndex0 = 0;
const size_t kInputIndex1 = 1;
const size_t kOutputIndex1 = 1;
const size_t kInputDimIndexN = 0;
const size_t kInputDimIndexC = 1;
const size_t kInputDimIndexH = 2;
const size_t kInputDimIndexW = 3;
const size_t kDimSize1 = 1;
const size_t kDimSize2 = 2;
const size_t kDimSize3 = 3;
const size_t kDimSize4 = 4;
const size_t kKszieIndexH = 0;
const size_t kKszieIndexW = 1;
const size_t kOutputShapeIndexH = 0;
const size_t kOutputShapeIndexW = 1;
const size_t kRandomSimplesLastDimIndex = 2;
const int64_t kRandomSimplesThirdDimSize = 2;
const size_t kKsizeLength1 = 1;
const size_t kKsizeLength2 = 2;
const size_t kOutputShapeLength1 = 1;
const size_t kOutputShapeLength2 = 2;
#define ADD_KERNEL(t1, t2, t3, t4) \
KernelAttr() \
.AddInputAttr(kNumberType##t1) \
.AddInputAttr(kNumberType##t2) \
.AddOutputAttr(kNumberType##t3) \
.AddOutputAttr(kNumberType##t4)
} // namespace
void FractionalMaxPoolWithFixedKsizeCPUKernelMod::InitKernel(const CNodePtr &kernel_node) {
MS_EXCEPTION_IF_NULL(kernel_node);
kernel_name_ = common::AnfAlgo::GetCNodeName(kernel_node);
input_type_ = AnfAlgo::GetInputDeviceDataType(kernel_node, kInputIndex0);
input_shape_ = AnfAlgo::GetInputDeviceShape(kernel_node, kInputIndex0);
random_samples_type_ = AnfAlgo::GetInputDeviceDataType(kernel_node, kInputIndex1);
random_samples_shape_ = AnfAlgo::GetInputDeviceShape(kernel_node, kInputIndex1);
argmax_type_ = AnfAlgo::GetOutputDeviceDataType(kernel_node, kOutputIndex1);
output_shape_ = common::AnfAlgo::GetNodeAttr<std::vector<int64_t>>(kernel_node, "output_shape");
ksize_ = common::AnfAlgo::GetNodeAttr<std::vector<int64_t>>(kernel_node, "ksize");
data_format_ = common::AnfAlgo::GetNodeAttr<string>(kernel_node, FORMAT);
if (data_format_ != "NCHW") {
MS_EXCEPTION(ValueError) << "For '" << kernel_name_ << "', the attr data_format must be NCHW.";
}
if (input_shape_.size() != kDimSize4) {
MS_EXCEPTION(ValueError) << "For '" << kernel_name_ << "', the dimension of input x must be 4.";
}
if (random_samples_shape_.size() != kDimSize3) {
MS_EXCEPTION(ValueError) << "For '" << kernel_name_ << "', the dimension of input random_samples must be 3.";
}
if (ksize_.size() == kKsizeLength1) {
ksize_h_ = ksize_[kKszieIndexH];
ksize_w_ = ksize_[kKszieIndexH];
} else if (ksize_.size() == kKsizeLength2) {
ksize_h_ = ksize_[kKszieIndexH];
ksize_w_ = ksize_[kKszieIndexW];
} else {
MS_EXCEPTION(ValueError) << "For '" << kernel_name_ << "', the size of attr kszie must be equal to 1 or 2, but got "
<< ksize_.size() << ".";
}
if (output_shape_.size() == kOutputShapeLength1) {
output_h_ = output_shape_[kOutputShapeIndexH];
output_w_ = output_shape_[kOutputShapeIndexH];
} else if (output_shape_.size() == kOutputShapeLength2) {
output_h_ = output_shape_[kOutputShapeIndexH];
output_w_ = output_shape_[kOutputShapeIndexW];
} else {
MS_EXCEPTION(ValueError) << "For '" << kernel_name_
<< "', the size of attr output_shape must be equal to 1 or 2, but got "
<< output_shape_.size() << ".";
}
input_n_ = input_shape_[kInputDimIndexN];
input_c_ = input_shape_[kInputDimIndexC];
input_h_ = input_shape_[kInputDimIndexH];
input_w_ = input_shape_[kInputDimIndexW];
if (output_h_ + ksize_h_ - 1 > input_h_) {
MS_EXCEPTION(ValueError) << "For '" << kernel_name_ << "', ksize height [" << ksize_h_ << "] + output_shape_h ["
<< output_h_ << "] too large relative to input height [" << input_h_
<< "], conflict with the rule: ksize_h + output_shape_h - 1 <= input_h";
}
if (output_w_ + ksize_w_ - 1 > input_w_) {
MS_EXCEPTION(ValueError) << "For '" << kernel_name_ << "', ksize width [" << ksize_w_ << "] + output_shape_w ["
<< output_w_ << "] too large relative to input width [" << input_w_
<< "], conflict with the rule: ksize_w + output_shape_w - 1 <= input_w";
}
if (random_samples_shape_[kInputDimIndexN] != input_n_) {
MS_EXCEPTION(ValueError) << "For '" << kernel_name_
<< "', The first dim of input[x] and input[random_samples] must be equal, but "
<< "got x=[" << input_n_ << "] and random_samples=["
<< random_samples_shape_[kInputDimIndexN] << "].";
}
if (random_samples_shape_[kInputDimIndexC] != input_c_) {
MS_EXCEPTION(ValueError) << "For '" << kernel_name_
<< "', The second dim of input[x] and input[random_samples] must be equal, but "
<< "got x=[" << input_c_ << "] and random_samples=["
<< random_samples_shape_[kInputDimIndexC] << "].";
}
if (random_samples_shape_[kRandomSimplesLastDimIndex] != kRandomSimplesThirdDimSize) {
MS_EXCEPTION(ValueError) << "For '" << kernel_name_
<< "', The third dim of input[random_samples] must be 2, but got "
<< random_samples_shape_[kRandomSimplesLastDimIndex] << ".";
}
if (argmax_type_ != kNumberTypeInt64) {
MS_EXCEPTION(ValueError) << "For '" << kernel_name_ << "', the data type of output argmax must be int64.";
}
}
bool FractionalMaxPoolWithFixedKsizeCPUKernelMod::Launch(const std::vector<AddressPtr> &inputs,
const std::vector<AddressPtr> &workspace,
const std::vector<AddressPtr> &outputs) {
CHECK_KERNEL_INPUTS_NUM(inputs.size(), kInputsNum, kernel_name_);
CHECK_KERNEL_OUTPUTS_NUM(outputs.size(), kOutputsNum, kernel_name_);
switch (input_type_) {
case kNumberTypeFloat16:
return DoComputeWithRandomSamplesType<float16>(inputs, outputs, random_samples_type_);
case kNumberTypeFloat32:
return DoComputeWithRandomSamplesType<float>(inputs, outputs, random_samples_type_);
case kNumberTypeFloat64:
return DoComputeWithRandomSamplesType<double>(inputs, outputs, random_samples_type_);
case kNumberTypeInt32:
return DoComputeWithRandomSamplesType<int32_t>(inputs, outputs, random_samples_type_);
case kNumberTypeInt64:
return DoComputeWithRandomSamplesType<int64_t>(inputs, outputs, random_samples_type_);
default:
MS_EXCEPTION(TypeError) << "For '" << kernel_name_ << "', the data type of input not support.";
}
return true;
}
template <typename scalar_t>
bool FractionalMaxPoolWithFixedKsizeCPUKernelMod::DoComputeWithRandomSamplesType(const std::vector<AddressPtr> &inputs,
const std::vector<AddressPtr> &outputs,
TypeId random_samples_type_) {
switch (random_samples_type_) {
case kNumberTypeFloat16:
return ComputeTemplate<scalar_t, float16>(inputs, outputs);
case kNumberTypeFloat32:
return ComputeTemplate<scalar_t, float>(inputs, outputs);
case kNumberTypeFloat64:
return ComputeTemplate<scalar_t, double>(inputs, outputs);
default:
MS_EXCEPTION(TypeError) << "For '" << kernel_name_ << "', random_samples_type" << random_samples_type_
<< "not support, must be in [{DT_FLOAT16, DT_FLOAT, DT_DOUBLE}].";
return false;
}
}
template <typename scalar_t, typename random_sample_t>
bool FractionalMaxPoolWithFixedKsizeCPUKernelMod::ComputeTemplate(const std::vector<AddressPtr> &inputs,
const std::vector<AddressPtr> &outputs) {
scalar_t *input_ptr = reinterpret_cast<scalar_t *>(inputs[0]->addr);
random_sample_t *random_samples_ptr = reinterpret_cast<random_sample_t *>(inputs[1]->addr);
scalar_t *output_ptr = reinterpret_cast<scalar_t *>(outputs[0]->addr);
int64_t *argmax_ptr = reinterpret_cast<int64_t *>(outputs[1]->addr);
MS_EXCEPTION_IF_NULL(input_ptr);
MS_EXCEPTION_IF_NULL(random_samples_ptr);
MS_EXCEPTION_IF_NULL(output_ptr);
MS_EXCEPTION_IF_NULL(argmax_ptr);
auto shard_fractional_max_pool_with_fixed_ksize = [&](size_t start, size_t end) {
for (size_t n = start; n < end; n++) {
scalar_t *inputForPlane = input_ptr + n * input_c_ * input_h_ * input_w_;
random_sample_t *random_samplesForPlane = random_samples_ptr + n * input_c_ * kRandomSimplesThirdDimSize;
scalar_t *outputForPlane = output_ptr + n * input_c_ * output_h_ * output_w_;
int64_t *argmaxForPlane = argmax_ptr + n * input_c_ * output_h_ * output_w_;
FractionalMaxPoolWithFixedKsizeCompute<scalar_t, random_sample_t>(inputForPlane, random_samplesForPlane,
outputForPlane, argmaxForPlane);
}
};
CPUKernelUtils::ParallelFor(shard_fractional_max_pool_with_fixed_ksize, input_n_);
return true;
}
template <typename scalar_t, typename random_sample_t>
void FractionalMaxPoolWithFixedKsizeCPUKernelMod::FractionalMaxPoolWithFixedKsizeCompute(
scalar_t *inputForPlane, random_sample_t *random_samplesForPlane, scalar_t *outputForPlane, int64_t *argmaxForPlane) {
for (int64_t plane = 0; plane < input_c_; plane++) {
random_sample_t *random_samplesPlane = random_samplesForPlane + plane * 2;
auto sequenceW = GenerateIntervals<random_sample_t>(random_samplesPlane[0], input_w_, output_w_, ksize_w_);
auto sequenceH = GenerateIntervals<random_sample_t>(random_samplesPlane[1], input_h_, output_h_, ksize_h_);
scalar_t *inputPlane = inputForPlane + plane * input_h_ * input_w_;
scalar_t *outputPlane = outputForPlane + plane * output_h_ * output_w_;
int64_t *argmaxPlane = argmaxForPlane + plane * output_h_ * output_w_;
int h, w;
for (h = 0; h < output_h_; h++) {
int inputHStart = sequenceH[h];
for (w = 0; w < output_w_; w++) {
int inputWStart = sequenceW[w];
int h2 = inputHStart;
int w2 = inputWStart;
scalar_t maxValue = -std::numeric_limits<scalar_t>::infinity();
int64_t maxIndex = h2 * input_w_ + w2;
for (h2 = inputHStart; h2 < inputHStart + ksize_h_; h2++) {
for (w2 = inputWStart; w2 < inputWStart + ksize_w_; w2++) {
if (h2 < 0 || h2 >= input_h_) {
MS_EXCEPTION(ValueError) << "For '" << kernel_name_ << "', index H value is illegal.";
}
if (w2 < 0 || w2 >= input_w_) {
MS_EXCEPTION(ValueError) << "For '" << kernel_name_ << "', index W value is illegal.";
}
int index = h2 * input_w_ + w2;
scalar_t value = inputPlane[index];
if (value > maxValue) {
maxValue = value;
maxIndex = index;
}
}
}
outputPlane[h * output_w_ + w] = maxValue;
argmaxPlane[h * output_w_ + w] = maxIndex;
}
}
}
}
template <typename random_sample_t>
std::vector<int> FractionalMaxPoolWithFixedKsizeCPUKernelMod::GenerateIntervals(random_sample_t sample, int input_size,
int output_size, int kernel_size) {
std::vector<int> sequence(output_size);
if (output_size > 1) {
random_sample_t alpha =
static_cast<random_sample_t>(input_size - kernel_size) / static_cast<random_sample_t>(output_size - 1);
for (int i = 0; i < output_size - 1; i++) {
sequence[i] =
static_cast<int>((static_cast<random_sample_t>(i) + sample) * alpha) - static_cast<int>(sample * alpha);
}
}
sequence[output_size - 1] = input_size - kernel_size;
return sequence;
}
std::vector<KernelAttr> FractionalMaxPoolWithFixedKsizeCPUKernelMod::GetOpSupport() {
static std::vector<KernelAttr> kernel_attr_list = {
ADD_KERNEL(Int32, Float32, Int32, Int64), ADD_KERNEL(Int64, Float32, Int64, Int64),
ADD_KERNEL(Float16, Float32, Float16, Int64), ADD_KERNEL(Float32, Float32, Float32, Int64),
ADD_KERNEL(Float64, Float32, Float64, Int64), ADD_KERNEL(Int32, Float16, Int32, Int64),
ADD_KERNEL(Int64, Float16, Int64, Int64), ADD_KERNEL(Float16, Float16, Float16, Int64),
ADD_KERNEL(Float32, Float16, Float32, Int64), ADD_KERNEL(Float64, Float16, Float64, Int64),
ADD_KERNEL(Int32, Float64, Int32, Int64), ADD_KERNEL(Int64, Float64, Int64, Int64),
ADD_KERNEL(Float16, Float64, Float16, Int64), ADD_KERNEL(Float32, Float64, Float32, Int64),
ADD_KERNEL(Float64, Float64, Float64, Int64)};
return kernel_attr_list;
}
MS_KERNEL_FACTORY_REG(NativeCpuKernelMod, FractionalMaxPoolWithFixedKsize, FractionalMaxPoolWithFixedKsizeCPUKernelMod);
} // namespace kernel
} // namespace mindspore

View File

@ -0,0 +1,68 @@
/**
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_FRACTIONAL_MAX_POOL_WITH_FIXED_KSIZE_CPU_KERNEL_H_
#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_FRACTIONAL_MAX_POOL_WITH_FIXED_KSIZE_CPU_KERNEL_H_
#include <memory>
#include <vector>
#include <string>
#include "plugin/device/cpu/kernel/cpu_kernel.h"
#include "plugin/factory/ms_factory.h"
namespace mindspore {
namespace kernel {
class FractionalMaxPoolWithFixedKsizeCPUKernelMod : public DeprecatedNativeCpuKernelMod {
public:
FractionalMaxPoolWithFixedKsizeCPUKernelMod() = default;
~FractionalMaxPoolWithFixedKsizeCPUKernelMod() override = default;
void InitKernel(const CNodePtr &kernel_node) override;
bool Launch(const std::vector<AddressPtr> &inputs, const std::vector<AddressPtr> &workspace,
const std::vector<AddressPtr> &outputs) override;
protected:
std::vector<KernelAttr> GetOpSupport() override;
private:
template <typename scalar_t>
bool DoComputeWithRandomSamplesType(const std::vector<AddressPtr> &inputs, const std::vector<AddressPtr> &outputs,
TypeId random_samples_type);
template <typename scalar_t, typename random_sample_t>
bool ComputeTemplate(const std::vector<AddressPtr> &inputs, const std::vector<AddressPtr> &outputs);
template <typename scalar_t, typename random_sample_t>
void FractionalMaxPoolWithFixedKsizeCompute(scalar_t *inputForPlane, random_sample_t *random_samplesForPlane,
scalar_t *outputForPlane, int64_t *argmaxForPlane);
template <typename random_sample_t>
std::vector<int> GenerateIntervals(random_sample_t sample, int input_size, int output_size, int kernel_size);
std::vector<int64_t> input_shape_;
std::vector<int64_t> random_samples_shape_;
std::vector<int64_t> output_shape_;
std::vector<int64_t> ksize_;
std::string data_format_{"NCHW"};
TypeId input_type_;
TypeId random_samples_type_;
TypeId argmax_type_;
int64_t input_n_;
int64_t input_c_;
int64_t input_h_;
int64_t input_w_;
int64_t ksize_h_;
int64_t ksize_w_;
int64_t output_h_;
int64_t output_w_;
};
} // namespace kernel
} // namespace mindspore
#endif // MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_FRACTIONAL_MAX_POOL_WITH_FIXED_KSIZE_CPU_KERNEL_H_

View File

@ -206,6 +206,8 @@ constexpr auto kReverseV2 = "ReverseV2";
constexpr auto kSparseSparseMinimum = "SparseSparseMinimum";
// NN
constexpr auto kFractionalMaxPoolWithFixedKsize = "FractionalMaxPoolWithFixedKsize";
constexpr auto kFractionalMaxPoolGradWithFixedKsize = "FractionalMaxPoolGradWithFixedKsize";
constexpr auto kApplyAddSign = "ApplyAddSign";
constexpr auto kAdaptiveMaxPool3D = "AdaptiveMaxPool3D";
constexpr auto kFractionalMaxPool3DWithFixedKsize = "FractionalMaxPool3DWithFixedKsize";
@ -592,6 +594,10 @@ GVAR_DEF(PrimitivePtr, kPrimCropAndResize, std::make_shared<Primitive>(kCropAndR
GVAR_DEF(PrimitivePtr, kPrimCropAndResizeGradImage, std::make_shared<Primitive>(kCropAndResizeGradImage));
// NN
GVAR_DEF(PrimitivePtr, kPrimFractionalMaxPoolWithFixedKsize,
std::make_shared<Primitive>(kFractionalMaxPoolWithFixedKsize));
GVAR_DEF(PrimitivePtr, kPrimFractionalMaxPoolGradWithFixedKsize,
std::make_shared<Primitive>(kFractionalMaxPoolGradWithFixedKsize));
GVAR_DEF(PrimitivePtr, kPrimCeLU, std::make_shared<Primitive>("CeLU"));
GVAR_DEF(PrimitivePtr, kPrimAdam, std::make_shared<Primitive>("Adam"));
GVAR_DEF(PrimitivePtr, kPrimAdamWeightDecay, std::make_shared<Primitive>("AdamWeightDecay"));

View File

@ -0,0 +1,162 @@
/**
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "ops/fractional_max_pool_with_fixed_ksize.h"
#include <string>
#include <algorithm>
#include <memory>
#include <set>
#include <vector>
#include "ops/op_utils.h"
#include "mindapi/src/helper.h"
#include "utils/check_convert_utils.h"
#include "abstract/ops/primitive_infer_map.h"
namespace mindspore {
namespace ops {
namespace {
constexpr size_t kInputDimSize = 4;
constexpr size_t kRandomSamplesDimSize = 3;
constexpr size_t kRandomSamplesDimIndex2 = 2;
constexpr size_t kRandomSamplesLastDimSize = 2;
constexpr size_t kInputsDimIndex0 = 0;
constexpr size_t kInputsDimIndex1 = 1;
constexpr size_t kInputsDimIndex2 = 2;
constexpr size_t kInputsDimIndex3 = 3;
constexpr size_t kKsizeDimSize1 = 1;
constexpr size_t kKsizeDimSize2 = 2;
constexpr size_t kKsizeIndex0 = 0;
constexpr size_t kKsizeIndex1 = 1;
constexpr size_t kOutputShapeDimSize1 = 1;
constexpr size_t kOutputShapeDimSize2 = 2;
constexpr size_t kOutputShapeIndex0 = 0;
constexpr size_t kOutputShapeIndex1 = 1;
abstract::TupleShapePtr FractionalMaxPoolWithFixedKsizeInferShape(const PrimitivePtr &primitive,
const std::vector<AbstractBasePtr> &input_args) {
auto data_format = GetValue<std::string>(primitive->GetAttr(kFormat));
if (data_format != "NCHW") {
MS_EXCEPTION(ValueError) << "data_format must be NCHW." << data_format;
}
auto x_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[0]->GetShapeTrack())[kShape];
if (x_shape.size() != kInputDimSize) {
MS_EXCEPTION(ValueError) << "For FractionalMaxPoolWithFixedKsize, the dimension of input_x must be 4, but got "
<< x_shape.size();
}
auto random_samples_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[1]->GetShapeTrack())[kShape];
if (random_samples_shape.size() != kRandomSamplesDimSize) {
MS_EXCEPTION(ValueError) << "For FractionalMaxPoolWithFixedKsize, the dimension of random_samples must be 3, "
<< "but got " << random_samples_shape.size();
}
if (random_samples_shape[kRandomSamplesDimIndex2] != kRandomSamplesLastDimSize) {
MS_EXCEPTION(ValueError) << "For FractionalMaxPoolWithFixedKsize, the last dimension size of random_samples must "
<< "be 2, but got " << random_samples_shape[kRandomSamplesDimIndex2];
}
if (x_shape[kInputsDimIndex0] != random_samples_shape[kInputsDimIndex0]) {
MS_EXCEPTION(ValueError) << "The first dimension size of input_x and random_samples must be equal.";
}
if (x_shape[kInputsDimIndex1] != random_samples_shape[kInputsDimIndex1]) {
MS_EXCEPTION(ValueError) << "The second dimension size of input_x and random_samples must be equal.";
}
auto ksize = GetValue<std::vector<int64_t>>(primitive->GetAttr("ksize"));
if (std::any_of(ksize.begin(), ksize.end(), [](int64_t ksize) { return ksize <= 0; })) {
MS_EXCEPTION(ValueError) << "invalid ksize, ksize items must be all positive.";
}
int64_t ksize_h = 0;
int64_t ksize_w = 0;
if (ksize.size() == kKsizeDimSize1) {
ksize_h = ksize[kKsizeIndex0];
ksize_w = ksize[kKsizeIndex0];
} else if (ksize.size() == kKsizeDimSize2) {
ksize_h = ksize[kKsizeIndex0];
ksize_w = ksize[kKsizeIndex1];
} else {
MS_EXCEPTION(ValueError) << "For FractionalMaxPoolWithFixedKsize, the dimension of ksize must be 1 or 2, "
<< "but got " << ksize.size();
}
auto output_shape = GetValue<std::vector<int64_t>>(primitive->GetAttr("output_shape"));
if (std::any_of(output_shape.begin(), output_shape.end(), [](int64_t output_shape) { return output_shape <= 0; })) {
MS_EXCEPTION(ValueError) << "invalid output_shape, output_shape items must be all positive.";
}
int64_t output_h = 0;
int64_t output_w = 0;
if (output_shape.size() == kOutputShapeDimSize1) {
output_h = output_shape[kOutputShapeIndex0];
output_w = output_shape[kOutputShapeIndex0];
} else if (output_shape.size() == kOutputShapeDimSize2) {
output_h = output_shape[kOutputShapeIndex0];
output_w = output_shape[kOutputShapeIndex1];
} else {
MS_EXCEPTION(ValueError) << "For FractionalMaxPoolWithFixedKsize, the dimension of output_shape must be 1 or 2, "
<< "but got " << output_shape.size();
}
if (output_h + ksize_h - 1 > x_shape[kInputsDimIndex2]) {
MS_EXCEPTION(ValueError) << "For FractionalMaxPoolWithFixedKsize, ksize height [" << ksize_h
<< "] + output_shape_h [" << output_h << "] too large relative to input height ["
<< x_shape[kInputsDimIndex2]
<< "], conflict with the rule: ksize_h + output_shape_h - 1 <= input_h";
}
if (output_w + ksize_w - 1 > x_shape[kInputsDimIndex3]) {
MS_EXCEPTION(ValueError) << "For FractionalMaxPoolWithFixedKsize, ksize width [" << ksize_w
<< "] + output_shape_w [" << output_w << "] too large relative to input width ["
<< x_shape[kInputsDimIndex3]
<< "], conflict with the rule: ksize_w + output_shape_w - 1 <= input_w";
}
ShapeVector out_shape_vector = {x_shape[kInputsDimIndex0], x_shape[kInputsDimIndex1], output_h, output_w};
abstract::ShapePtr out_shape = std::make_shared<abstract::Shape>(out_shape_vector);
return std::make_shared<abstract::TupleShape>(std::vector<abstract::BaseShapePtr>{out_shape, out_shape});
}
TuplePtr FractionalMaxPoolWithFixedKsizeInferType(const PrimitivePtr &primitive,
const std::vector<AbstractBasePtr> &input_args) {
auto prim_name = primitive->name();
const std::set<TypePtr> random_samples_valid_types = {kFloat16, kFloat32, kFloat64};
auto random_samples_dtype = input_args[1]->BuildType();
CheckAndConvertUtils::CheckTensorTypeValid("random_samples dtype", random_samples_dtype, random_samples_valid_types,
prim_name);
const std::set<TypePtr> x_valid_types = {kFloat16, kFloat32, kFloat64, kInt32, kInt64};
auto x_dtype = input_args[0]->BuildType();
auto y_dtype = CheckAndConvertUtils::CheckTensorTypeValid("input_x dtype", x_dtype, x_valid_types, prim_name);
TypePtr argmax_dtype = kInt64;
return std::make_shared<Tuple>(std::vector<TypePtr>{y_dtype, argmax_dtype});
}
} // namespace
MIND_API_BASE_IMPL(FractionalMaxPoolWithFixedKsize, PrimitiveC, BaseOperator);
AbstractBasePtr FractionalMaxPoolWithFixedKsizeInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive,
const std::vector<AbstractBasePtr> &input_args) {
MS_EXCEPTION_IF_NULL(primitive);
const int64_t inputs_num = 2;
CheckAndConvertUtils::CheckInputArgs(input_args, kEqual, inputs_num, primitive->name());
auto types = FractionalMaxPoolWithFixedKsizeInferType(primitive, input_args);
auto shapes = FractionalMaxPoolWithFixedKsizeInferShape(primitive, input_args);
return abstract::MakeAbstract(shapes, types);
}
REGISTER_PRIMITIVE_EVAL_IMPL(FractionalMaxPoolWithFixedKsize, prim::kPrimFractionalMaxPoolWithFixedKsize,
FractionalMaxPoolWithFixedKsizeInfer, nullptr, true);
} // namespace ops
} // namespace mindspore

View File

@ -0,0 +1,49 @@
/**
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CORE_OPS_FRACTIONAL_MAX_POOL_WITH_FIXED_KSIZE_H_
#define MINDSPORE_CORE_OPS_FRACTIONAL_MAX_POOL_WITH_FIXED_KSIZE_H_
#include <algorithm>
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "ops/base_operator.h"
#include "mindapi/base/types.h"
namespace mindspore {
namespace ops {
constexpr auto kNameFractionalMaxPoolWithFixedKsize = "FractionalMaxPoolWithFixedKsize";
/// \brief Fractional max pooling operation.
/// Refer to Python API @ref mindspore.ops.FractionalMaxPoolWithFixedKsize for more details.
class MIND_API FractionalMaxPoolWithFixedKsize : public BaseOperator {
public:
MIND_API_BASE_MEMBER(FractionalMaxPoolWithFixedKsize);
/// \brief Constructor.
FractionalMaxPoolWithFixedKsize() : BaseOperator(kNameFractionalMaxPoolWithFixedKsize) {
InitIOName({"input_x", "random_samples"}, {"y", "argmax"});
}
};
abstract::AbstractBasePtr FractionalMaxPoolWithFixedKsizeInfer(
const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive,
const std::vector<abstract::AbstractBasePtr> &input_args);
} // namespace ops
} // namespace mindspore
#endif // MINDSPORE_CORE_OPS_FRACTIONAL_MAX_POOL_WITH_FIXED_KSIZE_H_

View File

@ -0,0 +1,117 @@
/**
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "ops/grad/fractional_max_pool_grad_with_fixed_ksize.h"
#include <string>
#include <algorithm>
#include <memory>
#include <set>
#include <vector>
#include "ops/op_utils.h"
#include "utils/check_convert_utils.h"
#include "abstract/ops/primitive_infer_map.h"
#include "mindapi/src/helper.h"
namespace mindspore {
namespace ops {
namespace {
constexpr size_t kInputsIndex0 = 0;
constexpr size_t kInputsIndex1 = 1;
constexpr size_t kInputsIndex2 = 2;
constexpr size_t kInputsDimSize = 4;
constexpr size_t kInputIndexN = 0;
constexpr size_t kInputIndexC = 1;
abstract::ShapePtr FractionalMaxPoolGradWithFixedKsizeInferShape(const PrimitivePtr &primitive,
const std::vector<AbstractBasePtr> &input_args) {
auto data_format = GetValue<std::string>(primitive->GetAttr(kFormat));
if (data_format != "NCHW") {
MS_EXCEPTION(ValueError) << "For FractionalMaxPoolGradWithFixedKsize, attr data_format must be NCHW.";
}
auto origin_input_shape =
CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[kInputsIndex0]->GetShapeTrack())[kShape];
auto out_backprop_shape =
CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[kInputsIndex1]->GetShapeTrack())[kShape];
auto argmax_shape =
CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[kInputsIndex2]->GetShapeTrack())[kShape];
if (origin_input_shape.size() != kInputsDimSize) {
MS_EXCEPTION(ValueError) << "For FractionalMaxPoolGradWithFixedKsize, the dimension of origin_input must be 4.";
}
if (out_backprop_shape.size() != kInputsDimSize) {
MS_EXCEPTION(ValueError) << "For FractionalMaxPoolGradWithFixedKsize, the dimension of out_backprop must be 4.";
}
if (argmax_shape.size() != kInputsDimSize) {
MS_EXCEPTION(ValueError) << "For FractionalMaxPoolGradWithFixedKsize, the dimension of argmax must be 4.";
}
for (size_t i = 0; i < kInputsDimSize; i++) {
if (out_backprop_shape[i] != argmax_shape[i]) {
MS_EXCEPTION(ValueError) << "For FractionalMaxPoolGradWithFixedKsize, out_backprop and argmax must have "
<< "the same shape.";
}
}
if (origin_input_shape[kInputIndexN] != out_backprop_shape[kInputIndexN]) {
MS_EXCEPTION(ValueError) << "For FractionalMaxPoolGradWithFixedKsize, the first dimension size of three inputs "
<< "must be equal.";
}
if (origin_input_shape[kInputIndexC] != out_backprop_shape[kInputIndexC]) {
MS_EXCEPTION(ValueError) << "For FractionalMaxPoolGradWithFixedKsize, the second dimension size of three inputs "
<< "must be equal.";
}
return std::make_shared<abstract::Shape>(origin_input_shape);
}
TypePtr FractionalMaxPoolGradWithFixedKsizeInferType(const PrimitivePtr &primitive,
const std::vector<AbstractBasePtr> &input_args) {
auto prim_name = primitive->name();
const std::set<TypePtr> origin_input_valid_types = {kInt32, kInt64};
CheckAndConvertUtils::CheckTensorTypeValid("origin_input dtype", input_args[kInputsIndex0]->BuildType(),
origin_input_valid_types, prim_name);
const std::set<TypePtr> out_backprop_valid_types = {kFloat16, kFloat32, kFloat64, kInt32, kInt64};
auto y_dtype = CheckAndConvertUtils::CheckTensorTypeValid(
"out_backprop dtype", input_args[kInputsIndex1]->BuildType(), out_backprop_valid_types, prim_name);
const std::set<TypePtr> argmax_valid_types = {kInt64};
CheckAndConvertUtils::CheckTensorTypeValid("argmax dtype", input_args[kInputsIndex2]->BuildType(), argmax_valid_types,
prim_name);
return std::make_shared<TensorType>(y_dtype);
}
} // namespace
MIND_API_BASE_IMPL(FractionalMaxPoolGradWithFixedKsize, PrimitiveC, BaseOperator);
AbstractBasePtr FractionalMaxPoolGradWithFixedKsizeInfer(const abstract::AnalysisEnginePtr &,
const PrimitivePtr &primitive,
const std::vector<AbstractBasePtr> &input_args) {
MS_EXCEPTION_IF_NULL(primitive);
const int64_t inputs_num = 3;
CheckAndConvertUtils::CheckInputArgs(input_args, kEqual, inputs_num, primitive->name());
auto infer_shape = FractionalMaxPoolGradWithFixedKsizeInferShape(primitive, input_args);
auto infer_type = FractionalMaxPoolGradWithFixedKsizeInferType(primitive, input_args);
return abstract::MakeAbstract(infer_shape, infer_type);
}
REGISTER_PRIMITIVE_EVAL_IMPL(FractionalMaxPoolGradWithFixedKsize, prim::kPrimFractionalMaxPoolGradWithFixedKsize,
FractionalMaxPoolGradWithFixedKsizeInfer, nullptr, true);
} // namespace ops
} // namespace mindspore

View File

@ -0,0 +1,45 @@
/**
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CORE_OPS_FRACTIONAL_MAX_POOL_GRAD_WITH_FIXED_KSIZE_H_
#define MINDSPORE_CORE_OPS_FRACTIONAL_MAX_POOL_GRAD_WITH_FIXED_KSIZE_H_
#include <algorithm>
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "ops/base_operator.h"
#include "mindapi/base/types.h"
namespace mindspore {
namespace ops {
constexpr auto kNameFractionalMaxPoolGradWithFixedKsize = "FractionalMaxPoolGradWithFixedKsize";
class MIND_API FractionalMaxPoolGradWithFixedKsize : public BaseOperator {
public:
MIND_API_BASE_MEMBER(FractionalMaxPoolGradWithFixedKsize);
FractionalMaxPoolGradWithFixedKsize() : BaseOperator(kNameFractionalMaxPoolGradWithFixedKsize) {
InitIOName({"origin_input", "out_backprop", "argmax"}, {"y"});
}
};
abstract::AbstractBasePtr FractionalMaxPoolGradWithFixedKsizeInfer(
const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive,
const std::vector<abstract::AbstractBasePtr> &input_args);
} // namespace ops
} // namespace mindspore
#endif // MINDSPORE_CORE_OPS_FRACTIONAL_MAX_POOL_GRAD_WITH_FIXED_KSIZE_H_

View File

@ -48,6 +48,8 @@ from ..operations.nn_ops import ReLUV3
from ..operations._grad_ops import ReluGrad
from ..operations.image_ops import ResizeLinear1D
from ..operations.nn_ops import MaxPool3DWithArgmax
from ..operations.nn_ops import FractionalMaxPoolWithFixedKsize
from ..operations._grad_ops import FractionalMaxPoolGradWithFixedKsize
@bprop_getters.register(P.CTCLossV2)
@ -377,3 +379,15 @@ def get_bprop_maxpool3dwithargmax(self):
return (dx,)
return bprop
@bprop_getters.register(FractionalMaxPoolWithFixedKsize)
def get_bprop_fractional_max_pool_with_fixed_ksize(self):
"""Grad definition for 'FractionalMaxPoolWithFixedKsize' operation."""
input_grad = FractionalMaxPoolGradWithFixedKsize(data_format=self.data_format)
def bprop(x, random_samples, out, dout):
dx = input_grad(x, dout[0], out[1])
return (dx, zeros_like(random_samples))
return bprop

View File

@ -13,6 +13,8 @@
# limitations under the License.
"""aicpu ops"""
from .fractional_max_pool_with_fixed_ksize import _fractional_max_pool_with_fixed_ksize_aicpu
from .fractional_max_pool_grad_with_fixed_ksize import _fractional_max_pool_grad_with_fixed_ksize_aicpu
from .extract_glimpse import _extract_glimpse_aicpu
from .nextafter import _nextafter_aicpu
from .fill_diagonal import _fill_diagonal_aicpu

View File

@ -0,0 +1,42 @@
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""FractionalMaxPoolGradWithFixedKsize op"""
from mindspore.ops.op_info_register import op_info_register, AiCPURegOp, DataType
fractional_max_pool_grad_with_fixed_ksize_op_info = AiCPURegOp("FractionalMaxPoolGradWithFixedKsize") \
.fusion_type("OPAQUE") \
.attr("data_format", "str", "NCHW") \
.input(0, "origin_input", "required") \
.input(1, "out_backprop", "required") \
.input(2, "argmax", "required") \
.output(0, "y", "required") \
.dtype_format(DataType.I32_NCHW, DataType.F16_NCHW, DataType.I64_Default, DataType.F16_NCHW) \
.dtype_format(DataType.I32_NCHW, DataType.F32_NCHW, DataType.I64_Default, DataType.F32_NCHW) \
.dtype_format(DataType.I32_NCHW, DataType.F64_NCHW, DataType.I64_Default, DataType.F64_NCHW) \
.dtype_format(DataType.I32_NCHW, DataType.I32_NCHW, DataType.I64_Default, DataType.I32_NCHW) \
.dtype_format(DataType.I32_NCHW, DataType.I64_NCHW, DataType.I64_Default, DataType.I64_NCHW) \
.dtype_format(DataType.I64_NCHW, DataType.F16_NCHW, DataType.I64_Default, DataType.F16_NCHW) \
.dtype_format(DataType.I64_NCHW, DataType.F32_NCHW, DataType.I64_Default, DataType.F32_NCHW) \
.dtype_format(DataType.I64_NCHW, DataType.F64_NCHW, DataType.I64_Default, DataType.F64_NCHW) \
.dtype_format(DataType.I64_NCHW, DataType.I32_NCHW, DataType.I64_Default, DataType.I32_NCHW) \
.dtype_format(DataType.I64_NCHW, DataType.I64_NCHW, DataType.I64_Default, DataType.I64_NCHW) \
.get_op_info()
@op_info_register(fractional_max_pool_grad_with_fixed_ksize_op_info)
def _fractional_max_pool_grad_with_fixed_ksize_aicpu():
"""FractionalMaxPoolGradWithFixedKsize aicpu register"""
return

View File

@ -0,0 +1,49 @@
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""FractionalMaxPoolWithFixedKsize op"""
from mindspore.ops.op_info_register import op_info_register, AiCPURegOp, DataType
fractional_max_pool_with_fixed_ksize_op_info = AiCPURegOp("FractionalMaxPoolWithFixedKsize") \
.fusion_type("OPAQUE") \
.attr("ksize", "listInt") \
.attr("output_shape", "listInt") \
.attr("data_format", "str", "NCHW") \
.input(0, "input_x", "required") \
.input(1, "random_samples", "required") \
.output(0, "y", "required") \
.output(1, "argmax", "optional") \
.dtype_format(DataType.F16_NCHW, DataType.F16_Default, DataType.F16_NCHW, DataType.I64_NCHW) \
.dtype_format(DataType.F16_NCHW, DataType.F32_Default, DataType.F16_NCHW, DataType.I64_NCHW) \
.dtype_format(DataType.F16_NCHW, DataType.F64_Default, DataType.F16_NCHW, DataType.I64_NCHW) \
.dtype_format(DataType.F32_NCHW, DataType.F16_Default, DataType.F32_NCHW, DataType.I64_NCHW) \
.dtype_format(DataType.F32_NCHW, DataType.F32_Default, DataType.F32_NCHW, DataType.I64_NCHW) \
.dtype_format(DataType.F32_NCHW, DataType.F64_Default, DataType.F32_NCHW, DataType.I64_NCHW) \
.dtype_format(DataType.F64_NCHW, DataType.F16_Default, DataType.F64_NCHW, DataType.I64_NCHW) \
.dtype_format(DataType.F64_NCHW, DataType.F32_Default, DataType.F64_NCHW, DataType.I64_NCHW) \
.dtype_format(DataType.F64_NCHW, DataType.F64_Default, DataType.F64_NCHW, DataType.I64_NCHW) \
.dtype_format(DataType.I32_NCHW, DataType.F16_Default, DataType.I32_NCHW, DataType.I64_NCHW) \
.dtype_format(DataType.I32_NCHW, DataType.F32_Default, DataType.I32_NCHW, DataType.I64_NCHW) \
.dtype_format(DataType.I32_NCHW, DataType.F64_Default, DataType.I32_NCHW, DataType.I64_NCHW) \
.dtype_format(DataType.I64_NCHW, DataType.F16_Default, DataType.I64_NCHW, DataType.I64_NCHW) \
.dtype_format(DataType.I64_NCHW, DataType.F32_Default, DataType.I64_NCHW, DataType.I64_NCHW) \
.dtype_format(DataType.I64_NCHW, DataType.F64_Default, DataType.I64_NCHW, DataType.I64_NCHW) \
.get_op_info()
@op_info_register(fractional_max_pool_with_fixed_ksize_op_info)
def _fractional_max_pool_with_fixed_ksize_aicpu():
"""FractionalMaxPoolWithFixedKsize aicpu register"""
return

View File

@ -3558,3 +3558,39 @@ class ResizeBicubicGrad(Primitive):
return {'shape': out_shape,
'dtype': original_image_dtype,
'value': None}
class FractionalMaxPoolGradWithFixedKsize(Primitive):
"""
Computes the gradients of FractionalMaxPoolWithFixedKsize.
Args:
data_format (str): The optional value for data format, is 'NCHW'. Default: "NCHW".
Inputs:
- **origin_input** (Tensor) - Tensor with data format "NCHW", data type must be int32 or int64.
- **out_backprop** (Tensor) - The gradients with respect to the output of FractionalMaxPoolWithFixedKsize
function. Tensor with data format "NCHW", whose data type is float16, float32, float64, int32 or int64.
- **argmax** (Tensor) - The second output of FractionalMaxPoolWithFixedKsize function, whose data
type is int64.
Outputs:
- **y** (Tensor) - Tensor, with the same shape as `origin_input`, and the same data type as
the input `out_backprop`.
Raises:
TypeError: If data type of `out_backprop` is not one of the following: float16, float32, float64, int32, int64.
TypeError: If data type of `argmax` is not int64.
ValueError: If the shape of `out_backprop` and `argmax` is not equal.
ValueError: If the first dimension size of `origin_input` and `out_backprop` is not equal.
ValueError: If the second dimension size of `origin_input` and `out_backprop` is not equal.
Supported Platforms:
``Ascend`` ``CPU``
"""
@prim_attr_register
def __init__(self, data_format="NCHW"):
self.data_format = validator.check_string(data_format, ['NCHW'], 'data_format', self.name)
self.add_prim_attr("data_format", self.data_format)
self.init_prim_io_names(inputs=['origin_input', 'out_backprop', 'argmax'], outputs=['y'])

View File

@ -10620,3 +10620,88 @@ class SparseApplyProximalGradientDescent(Primitive):
self.init_prim_io_names(inputs=['var', 'alpha', 'l1', 'l2', 'grad', 'indices'],
outputs=['var'])
validator.check_value_type("use_locking", use_locking, [bool], self.name)
class FractionalMaxPoolWithFixedKsize(Primitive):
r"""
Fractional max pooling operation.
Applies a 2D fractional max pooling to an input signal composed of multiple input planes.
The max-pooling operation is applied in kH × kW regions by a stochastic step size determined by
the target output size. For any input size, the size of the specified output is H x W. The number
of output features is equal to the number of input planes.
Fractional MaxPooling is described in the paper `Fractional Max-Pooling <https://arxiv.org/pdf/1412.6071>`_.
Args:
ksize (Union[int, tuple[int]]): The size of kernel window used to take the maximum value.
The target ksize is H x W. ksize can be a tuple, or a single K for K x K.
specifying the window size (H, W) of the input tensor.
output_shape (Union[int, tuple[int]]): The target output size is H x W.
output_shape can be a tuple, or a single H for H x H.
specifying the size (H, W) of the output tensor.
data_format (str): The optional value for data format, is 'NCHW'.
Default: "NCHW".
Inputs:
- **input_x** (Tensor) - Tensor of shape :math:`(N, C, H_{in}, W_{in})`,
with float16, float32, float64, int32, int64 data type.
- **random_samples** (Tensor) - Tensor of shape :math:`(N, C, 2)`.
with float16, float32, float64 data type.
Outputs:
- **y** (Tensor) - Has the same type as the `input_x`.
Has the shape :math:`(N, C, output\underline{~}shape{H}, output\underline{~}shape{W})`.
- **argmax** (Tensor) -A tensor whose data type must be int64. Has the same shape as the `y`.
Raises:
TypeError: If data type of `input_x` is not one of the following: float16, float32, float64, int32, int64.
TypeError: If data type of `random_samples` is not one of the following: float16, float32, float64.
ValueError: If `ksize` is not a number and `ksize` is not a tuple of length 2.
ValueError: If `output_shape` is not a number and `output_shape` is not a tuple of length 2.
ValueError: If the sum of `ksize`,`output_shape` and -1 is larger than the corresponding dimension of `input_x`.
ValueError: If the dimension of `random_samples` is not 3.
ValueError: If the first dimension size of `input_x` and `random_samples` is not equal.
ValueError: If the second dimension size of `input_x` and `random_samples` is not equal.
ValueError: If the third dimension size of `random_samples` is not 2.
Supported Platforms:
``Ascend`` ``CPU``
Examples:
>>> # the ksize is an int number and the output_shape is a tuple.
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor
>>> from mindspore.ops.operations import nn_ops
>>> ksize = 2
>>> output_shape = (2,2)
>>> data_format = "NCHW"
>>> input_x = Tensor(np.array([0.3220, 0.9545, 0.7879, 0.0975, 0.3698,
... 0.5135, 0.5740, 0.3435, 0.1895, 0.8764,
... 0.9581, 0.4760, 0.9014, 0.8522, 0.3664,
... 0.4980, 0.9673, 0.9879, 0.6988, 0.9022,
... 0.9304, 0.1558, 0.0153, 0.1559, 0.9852]).reshape([1, 1, 5, 5]), mstype.float32)
>>> random_samples = Tensor(np.array([[[0.8, 0.8]]]), mstype.float32)
>>> net = nn_ops.FractionalMaxPoolWithFixedKsize(ksize, output_shape, data_format)
>>> y, argmax = net(input_x, random_samples)
>>> print(y)
[[[[0.9545 0.8764]
[0.9673 0.9852]]]]
>>> print(argmax)
[[[[ 1 9]
[16 24]]]]
"""
@prim_attr_register
def __init__(self, ksize, output_shape, data_format="NCHW"):
"""Initialize FractionalMaxPoolWithFixedKsize."""
validator.check_value_type('ksize', ksize, [int, tuple], self.name)
self.ksize = _check_positive_int_or_tuple(
"ksize", ksize, self.name, allow_four=False, ret_four=False)
self.add_prim_attr("ksize", self.ksize)
validator.check_value_type('output_shape', output_shape, [int, tuple], self.name)
self.output_shape = _check_positive_int_or_tuple(
"output_shape", output_shape, self.name, allow_four=False, ret_four=False)
self.add_prim_attr("output_shape", self.output_shape)
self.data_format = validator.check_string(data_format, ['NCHW'], 'data_format', self.name)
self.init_prim_io_names(inputs=['input_x', 'random_samples'], outputs=['y', 'argmax'])

View File

@ -92,6 +92,8 @@ from mindspore.ops.operations.nn_ops import FractionalMaxPool, DataFormatVecPerm
from mindspore.ops.operations._grad_ops import FractionalMaxPoolGrad
from mindspore.ops.operations.nn_ops import FractionalMaxPool3DWithFixedKsize
from mindspore.ops.operations._grad_ops import FractionalMaxPool3DGradWithFixedKsize
from mindspore.ops.operations.nn_ops import FractionalMaxPoolWithFixedKsize
from mindspore.ops.operations._grad_ops import FractionalMaxPoolGradWithFixedKsize
from mindspore.ops.operations.nn_ops import FractionalAvgPool
from mindspore.ops.operations._grad_ops import FractionalAvgPoolGrad
from mindspore.ops.operations.image_ops import RGBToHSV
@ -2351,6 +2353,24 @@ test_case_math_ops = [
]
test_case_nn_ops = [
('FractionalMaxPoolWithFixedKsize_1', {
'block': FractionalMaxPoolWithFixedKsize(ksize=(2, 2), output_shape=(2, 2), data_format="NCHW"),
'desc_inputs': [([3, 4, 6, 6], {'dtype': np.int64}),
([3, 4, 2], {'dtype': np.float32})],
'desc_bprop': [([3, 4, 2, 2], {'dtype': np.int64}),
([3, 4, 2, 2], {'dtype': np.int64})]}),
('FractionalMaxPoolWithFixedKsize_2', {
'block': FractionalMaxPoolWithFixedKsize(ksize=2, output_shape=14, data_format="NCHW"),
'desc_inputs': [([100, 3, 28, 28], {'dtype': np.int64}),
([100, 3, 2], {'dtype': np.float32})],
'desc_bprop': [([100, 3, 14, 14], {'dtype': np.int64}),
([100, 3, 14, 14], {'dtype': np.int64})]}),
('FractionalMaxPoolGradWithFixedKsize', {
'block': FractionalMaxPoolGradWithFixedKsize(data_format="NCHW"),
'desc_inputs': [Tensor(np.random.randint(0, 100, size=(3, 4, 6, 6)).astype(np.int32)),
[3, 4, 2, 2],
Tensor(np.random.randint(0, 35, size=(3, 4, 2, 2)).astype(np.int64))],
'skip': ['backward']}),
('FractionalMaxPool', {
'block': FractionalMaxPool(pooling_ratio=[1.0, 1.4, 1.4, 1.0]),
'desc_inputs': [([1, 12, 12, 4], {'dtype': np.int64})],