!37129 MaxPoolWithArgmax & MaxPoolGradWithArgmax CPU Operator

Merge pull request !37129 from ivanshan_8170/max_pool_with_argmax
This commit is contained in:
i-robot 2022-07-28 12:46:17 +00:00 committed by Gitee
commit 805e418791
No known key found for this signature in database
GPG Key ID: 173E9B9CA92EEF8F
11 changed files with 1331 additions and 3 deletions

View File

@ -0,0 +1,193 @@
/**
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "plugin/device/cpu/kernel/max_pool_grad_with_argmax_cpu_kernel.h"
#include <algorithm>
#include "plugin/device/cpu/hal/device/cpu_device_address.h"
#include "mindspore/core/ops/grad/max_pool_grad_with_argmax.h"
namespace mindspore {
namespace kernel {
namespace {
constexpr size_t kMaxPoolGradWithArgmaxInputsNum = 3;
constexpr size_t kMaxPoolGradWithArgmaxOutputsNum = 1;
constexpr size_t kDimLowerLimit = 4;
constexpr size_t kInputDims = 4;
} // namespace
bool MaxPoolGradWithArgmaxCpuKernelMod::Init(const BaseOperatorPtr &base_operator,
const std::vector<KernelTensorPtr> &inputs,
const std::vector<KernelTensorPtr> &outputs) {
kernel_name_ = base_operator->name();
auto kernel_ptr = std::dynamic_pointer_cast<ops::MaxPoolGradWithArgmax>(base_operator);
if (kernel_ptr == nullptr) {
MS_LOG(ERROR) << "Cast op from BaseOperator to MaxPoolingGradWithArgmax failed.";
return false;
}
stride_height_ = LongToInt(kernel_ptr->get_strides()[kDim2]);
stride_width_ = LongToInt(kernel_ptr->get_strides()[kDim3]);
if (stride_height_ < 1 || stride_width_ < 1) {
MS_EXCEPTION(ValueError) << "For '" << kernel_name_
<< "', expected strides to be Union[int, tuple[int]] with value no less than 1 "
"but got the window height: "
<< stride_height_ << ", and the window width: " << stride_height_;
return false;
}
pad_mode_ = kernel_ptr->get_pad_mode();
// pair = [is_match, index]
auto kernel_attr = GetKernelAttrFromTensors(inputs, outputs);
auto pair = MatchKernelAttr(kernel_attr, GetOpSupport());
if (!pair.first) {
MS_LOG(ERROR) << "For '" << kernel_name_ << "' does not support this kernel type: " << kernel_attr;
return false;
}
kernel_func_ = func_list_[pair.second].second;
return true;
}
bool MaxPoolGradWithArgmaxCpuKernelMod::ResizedInputSize(const std::vector<KernelTensorPtr> &inputs) {
auto x_shape = inputs[kDim0]->GetShapeVector();
if (x_shape.size() != kInputDims) {
MS_EXCEPTION(ValueError) << "For '" << kernel_name_ << "', the input 'x' must be 4-dimensional.";
return false;
}
for (size_t i = 0; i < x_shape.size(); i++) {
if (x_shape[i] <= 0) {
MS_EXCEPTION(ValueError) << "For '" << kernel_name_
<< "', expected input 'x' has non-empty spatial dimensions, "
"but 'x' has sizes "
<< x_shape[i] << " wit the dimension " << i << " being empty.";
return false;
}
}
batch_ = LongToInt(x_shape[kDim0]);
channel_ = LongToInt(x_shape[kDim1]);
x_height_ = LongToInt(x_shape[kDim2]);
x_width_ = LongToInt(x_shape[kDim3]);
auto dy_shape = inputs[kDim1]->GetShapeVector();
// check the spatial dimensions of dy if needed
for (size_t i = 0; i < dy_shape.size(); i++) {
if (dy_shape[i] <= 0) {
MS_EXCEPTION(ValueError) << "For '" << kernel_name_
<< "', expected input 'dy' has non-empty spatial dimensions, "
"but 'dy' has sizes "
<< dy_shape[i] << " wit the dimension " << i << " being empty.";
return false;
}
}
dy_height_ = LongToInt(dy_shape[kDim2]);
dy_width_ = LongToInt(dy_shape[kDim3]);
auto index_shape = inputs[kDim2]->GetShapeVector();
for (size_t i = 0; i < index_shape.size(); i++) {
if (index_shape[i] <= 0) {
MS_EXCEPTION(ValueError) << "For '" << kernel_name_
<< "', expected input 'index' has non-empty spatial dimensions, "
"but 'index' has sizes "
<< index_shape[i] << " wit the dimension " << i << " being empty.";
return false;
}
}
if (x_shape.size() < kDimLowerLimit || dy_shape.size() < kDimLowerLimit) {
MS_LOG(EXCEPTION) << "For '" << kernel_name_ << "', the dimension of 'x' and 'dy' cannot be less than 4, but got "
<< "the dimension of 'x': " << x_shape.size() << ", the dimension of 'dy': " << dy_shape.size();
return false;
}
return true;
}
int MaxPoolGradWithArgmaxCpuKernelMod::Resize(const BaseOperatorPtr &base_operator,
const std::vector<KernelTensorPtr> &inputs,
const std::vector<KernelTensorPtr> &outputs,
const std::map<uint32_t, tensor::TensorPtr> &) {
CHECK_KERNEL_INPUTS_NUM(inputs.size(), kMaxPoolGradWithArgmaxInputsNum, kernel_name_);
CHECK_KERNEL_OUTPUTS_NUM(outputs.size(), kMaxPoolGradWithArgmaxOutputsNum, kernel_name_);
if (int ret = KernelMod::Resize(base_operator, inputs, outputs); ret != KRET_OK) {
return ret;
}
auto kernel_ptr = std::dynamic_pointer_cast<ops::MaxPoolGradWithArgmax>(base_operator);
if (kernel_ptr == nullptr) {
MS_LOG(ERROR) << "Cast op from BaseOperator to MaxPoolingGradWithArgmax failed.";
return KRET_RESIZE_FAILED;
}
if (!ResizedInputSize(inputs)) {
return KRET_RESIZE_FAILED;
}
return KRET_OK;
}
template <typename T, typename S>
bool MaxPoolGradWithArgmaxCpuKernelMod::LaunchKernel(const std::vector<kernel::AddressPtr> &inputs,
const std::vector<kernel::AddressPtr> &outputs) {
auto *input = reinterpret_cast<T *>(inputs.at(kDim0)->addr);
MS_EXCEPTION_IF_NULL(input);
auto *grad = reinterpret_cast<T *>(inputs.at(kDim1)->addr);
MS_EXCEPTION_IF_NULL(grad);
auto *index = reinterpret_cast<S *>(inputs.at(kDim2)->addr);
MS_EXCEPTION_IF_NULL(index);
auto *output = reinterpret_cast<T *>(outputs.at(kDim0)->addr);
MS_EXCEPTION_IF_NULL(output);
const int c = this->channel_;
const int xCHW = c * this->x_height_ * this->x_width_;
const int dyCHW = c * this->dy_height_ * this->dy_width_;
const int outputLength = this->batch_ * xCHW;
auto init = [output](size_t start, size_t end) {
const T zero = static_cast<T>(0);
for (size_t i = start; i < end; ++i) {
output[i] = zero;
}
};
ParallelLaunchAutoSearch(init, outputLength, this, &parallel_search_info_);
const int length = this->batch_ * dyCHW;
auto task = [input, output, grad, index, &xCHW, &dyCHW](size_t start, size_t end) {
for (size_t i = start; i < end; ++i) {
const int idx = static_cast<int>(index[i]);
const int posn = i / dyCHW;
*(output + posn * xCHW + idx) += grad[i];
}
};
ParallelLaunchAutoSearch(task, length, this, &parallel_search_info_);
return true;
}
std::vector<std::pair<KernelAttr, MaxPoolGradWithArgmaxCpuKernelMod::MaxPoolGradWithArgmaxFunc>>
MaxPoolGradWithArgmaxCpuKernelMod::func_list_ = {
{KernelAttr()
.AddInputAttr(kNumberTypeFloat32)
.AddInputAttr(kNumberTypeFloat32)
.AddInputAttr(kNumberTypeInt32)
.AddOutputAttr(kNumberTypeFloat32),
&MaxPoolGradWithArgmaxCpuKernelMod::LaunchKernel<float, int32_t>},
{KernelAttr()
.AddInputAttr(kNumberTypeFloat32)
.AddInputAttr(kNumberTypeFloat32)
.AddInputAttr(kNumberTypeInt64)
.AddOutputAttr(kNumberTypeFloat32),
&MaxPoolGradWithArgmaxCpuKernelMod::LaunchKernel<float, int64_t>},
};
std::vector<KernelAttr> MaxPoolGradWithArgmaxCpuKernelMod::GetOpSupport() {
std::vector<KernelAttr> support_list;
std::transform(func_list_.begin(), func_list_.end(), std::back_inserter(support_list),
[](const std::pair<KernelAttr, MaxPoolGradWithArgmaxFunc> &pair) { return pair.first; });
return support_list;
}
MS_KERNEL_FACTORY_REG(NativeCpuKernelMod, MaxPoolGradWithArgmax, MaxPoolGradWithArgmaxCpuKernelMod);
} // namespace kernel
} // namespace mindspore

View File

@ -0,0 +1,71 @@
/**
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_PLUGIN_DEVICE_CPU_KERNEL_MAX_POOL_GRAD_WITH_ARGMAX_CPU_KERNEL_H_
#define MINDSPORE_CCSRC_PLUGIN_DEVICE_CPU_KERNEL_MAX_POOL_GRAD_WITH_ARGMAX_CPU_KERNEL_H_
#include <map>
#include <vector>
#include <utility>
#include "mindspore/core/ops/grad/max_pool_grad_with_argmax.h"
#include "plugin/device/cpu/kernel/cpu_kernel.h"
#include "plugin/factory/ms_factory.h"
namespace mindspore {
namespace kernel {
class MaxPoolGradWithArgmaxCpuKernelMod : public NativeCpuKernelMod {
public:
MaxPoolGradWithArgmaxCpuKernelMod() {}
~MaxPoolGradWithArgmaxCpuKernelMod() override = default;
bool Launch(const std::vector<AddressPtr> &inputs, const std::vector<AddressPtr> &workspace,
const std::vector<AddressPtr> &outputs) override {
return kernel_func_(this, inputs, outputs);
}
bool Init(const BaseOperatorPtr &base_operator, const std::vector<KernelTensorPtr> &inputs,
const std::vector<KernelTensorPtr> &outputs) override;
int Resize(const BaseOperatorPtr &base_operator, const std::vector<KernelTensorPtr> &inputs,
const std::vector<KernelTensorPtr> &outputs, const std::map<uint32_t, tensor::TensorPtr> &) override;
bool ResizedInputSize(const std::vector<KernelTensorPtr> &inputs);
protected:
std::vector<KernelAttr> GetOpSupport() override;
private:
template <typename T, typename S>
bool LaunchKernel(const std::vector<kernel::AddressPtr> &inputs, const std::vector<kernel::AddressPtr> &outputs);
using MaxPoolGradWithArgmaxFunc =
std::function<bool(MaxPoolGradWithArgmaxCpuKernelMod *, const std::vector<kernel::AddressPtr> &,
const std::vector<kernel::AddressPtr> &)>;
static std::vector<std::pair<KernelAttr, MaxPoolGradWithArgmaxFunc>> func_list_;
MaxPoolGradWithArgmaxFunc kernel_func_;
int batch_ = 0;
int channel_ = 0;
int x_height_ = 0;
int x_width_ = 0;
int dy_height_ = 0;
int dy_width_ = 0;
int stride_height_ = 1;
int stride_width_ = 1;
PadMode pad_mode_ = PadMode::VALID;
};
} // namespace kernel
} // namespace mindspore
#endif // MINDSPORE_CCSRC_PLUGIN_DEVICE_CPU_KERNEL_MAX_POOL_GRAD_WITH_ARGMAX_CPU_KERNEL_H_

View File

@ -0,0 +1,230 @@
/**
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "plugin/device/cpu/kernel/max_pool_with_argmax_cpu_kernel.h"
#include <algorithm>
#include "plugin/device/cpu/hal/device/cpu_device_address.h"
#include "mindspore/core/ops/max_pool_with_argmax.h"
namespace mindspore {
namespace kernel {
namespace {
constexpr size_t kMaxPoolWithArgmaxInputsNum = 1;
constexpr size_t kMaxPoolWithArgmaxOutputsNum = 2;
constexpr size_t kInputRank = 4;
constexpr int kPadHalf = 2;
} // namespace
bool MaxPoolWithArgmaxCpuKernelMod::Init(const BaseOperatorPtr &base_operator,
const std::vector<KernelTensorPtr> &inputs,
const std::vector<KernelTensorPtr> &outputs) {
kernel_name_ = base_operator->name();
auto kernel_ptr = std::dynamic_pointer_cast<ops::MaxPoolWithArgmax>(base_operator);
data_format_ = kernel_ptr->get_format();
window_height_ = LongToInt(kernel_ptr->get_kernel_size()[kDim1]);
window_width_ = LongToInt(kernel_ptr->get_kernel_size()[kDim2]);
if (window_height_ < 1 || window_width_ < 1) {
MS_EXCEPTION(ValueError) << "For '" << kernel_name_
<< "', expected kernel_size to be Union[int, tuple[int]] with value no "
"less than 1 "
"but got the window height: "
<< window_height_ << ", and the window width: " << window_width_;
return false;
}
stride_height_ = LongToInt(kernel_ptr->get_strides()[kDim1]);
stride_width_ = LongToInt(kernel_ptr->get_strides()[kDim2]);
if (stride_height_ < 1 || stride_width_ < 1) {
MS_EXCEPTION(ValueError) << "For '" << kernel_name_
<< "', expected strides to be Union[int, tuple[int]] with value no "
"less than 1 "
"but got the window height: "
<< window_height_ << ", and the window width: " << window_width_;
return false;
}
pad_mode_ = kernel_ptr->get_pad_mode();
if (pad_mode_ == PadMode::SAME) {
int tmp_height = (input_height_ / stride_height_) * stride_height_ == input_height_
? (input_height_ / stride_height_)
: (input_height_ / stride_height_) + 1;
pad_height_ = std::max<int>(0, (tmp_height - 1) * stride_height_ + window_height_ - input_height_);
int tmp_width = (input_width_ / stride_width_) * stride_width_ == input_width_ ? (input_width_ / stride_width_)
: (input_width_ / stride_width_) + 1;
pad_width_ = std::max<int>(0, (tmp_width - 1) * stride_width_ + window_width_ - input_width_);
pad_top_ = pad_height_ / kPadHalf;
pad_left_ = pad_width_ / kPadHalf;
}
auto kernel_attr = GetKernelAttrFromTensors(inputs, outputs);
// pair = [is_match, index]
auto pair = MatchKernelAttr(kernel_attr, GetOpSupport());
if (!pair.first) {
MS_LOG(ERROR) << "For '" << kernel_name_ << "' does not support this kernel type: " << kernel_attr;
return false;
}
kernel_func_ = func_list_[pair.second].second;
return true;
}
bool MaxPoolWithArgmaxCpuKernelMod::ResizedInputSize(const std::vector<KernelTensorPtr> &inputs) {
auto x_shape = inputs[kIndex0]->GetShapeVector();
if (x_shape.size() != kInputRank) {
MS_EXCEPTION(ValueError) << "For '" << kernel_name_ << "', the input 'x' must be 4-dimensional.";
return false;
}
for (size_t i = 0; i < x_shape.size(); i++) {
if (x_shape[i] <= 0) {
MS_EXCEPTION(ValueError) << "For '" << kernel_name_
<< "', expected input have non-empty spatial dimensions, "
"but input has sizes "
<< x_shape[i] << " wit h dimension " << i << " being empty.";
return false;
}
}
batch_ = LongToInt(x_shape[kDim0]);
if (data_format_ == Format::NHWC) {
channel_ = LongToInt(x_shape[kDim3]);
input_height_ = LongToInt(x_shape[kDim1]);
input_width_ = LongToInt(x_shape[kDim2]);
} else {
// data_format_ == Format::NCHW
channel_ = LongToInt(x_shape[kDim1]);
input_height_ = LongToInt(x_shape[kDim2]);
input_width_ = LongToInt(x_shape[kDim3]);
}
return true;
}
bool MaxPoolWithArgmaxCpuKernelMod::ResizedOutputSize(const std::vector<KernelTensorPtr> &outputs) {
auto output_shape = outputs[kIndex0]->GetShapeVector();
output_height_ = LongToInt(output_shape[kDim2]);
output_width_ = LongToInt(output_shape[kDim3]);
std::vector<int64_t> mask_shape = outputs[kIndex1]->GetShapeVector();
if (mask_shape != output_shape) {
MS_EXCEPTION(ValueError) << "For '" << kernel_name_
<< "', expected output and mask have the same shape "
"but output has shape "
<< output_shape << ", and mask shape: " << mask_shape;
return false;
}
return true;
}
int MaxPoolWithArgmaxCpuKernelMod::Resize(const BaseOperatorPtr &base_operator,
const std::vector<KernelTensorPtr> &inputs,
const std::vector<KernelTensorPtr> &outputs,
const std::map<uint32_t, tensor::TensorPtr> &inputsOnHost) {
CHECK_KERNEL_INPUTS_NUM(inputs.size(), kMaxPoolWithArgmaxInputsNum, kernel_name_);
CHECK_KERNEL_OUTPUTS_NUM(outputs.size(), kMaxPoolWithArgmaxOutputsNum, kernel_name_);
if (int ret = KernelMod::Resize(base_operator, inputs, outputs, inputsOnHost); ret != KRET_OK) {
return ret;
}
auto kernel_ptr = std::dynamic_pointer_cast<ops::MaxPoolWithArgmax>(base_operator);
if (kernel_ptr == nullptr) {
MS_LOG(ERROR) << "Cast op from BaseOperator to MaxPoolWithArgmax failed.";
return KRET_RESIZE_FAILED;
}
if (!ResizedInputSize(inputs)) {
return KRET_RESIZE_FAILED;
}
if (!ResizedOutputSize(outputs)) {
return KRET_RESIZE_FAILED;
}
return KRET_OK;
}
template <typename T>
bool MaxPoolWithArgmaxCpuKernelMod::LaunchKernel(const std::vector<kernel::AddressPtr> &inputs,
const std::vector<kernel::AddressPtr> &outputs) {
auto *x = reinterpret_cast<T *>(inputs.at(kIndex0)->addr);
MS_EXCEPTION_IF_NULL(x);
auto *output = reinterpret_cast<T *>(outputs.at(kIndex0)->addr);
MS_EXCEPTION_IF_NULL(output);
auto *mask = reinterpret_cast<int32_t *>(outputs.at(kIndex1)->addr);
MS_EXCEPTION_IF_NULL(mask);
int cWeight, hWeight, wWeight;
if (data_format_ == Format::NHWC) {
cWeight = 1;
wWeight = channel_ * cWeight;
hWeight = input_height_ * wWeight;
} else {
// data_format == NCHW
wWeight = 1;
hWeight = input_width_ * wWeight;
cWeight = input_height_ * hWeight;
}
const int batch = this->batch_;
const int channel = this->channel_;
const int i_h = this->input_height_;
const int i_w = this->input_width_;
const int s_h = this->stride_height_;
const int s_w = this->stride_width_;
const int w_h = this->window_height_;
const int w_w = this->window_width_;
const int pad_top = this->pad_top_;
const int pad_left = this->pad_left_;
const int o_h = this->output_height_;
const int o_w = this->output_width_;
const size_t length = batch * channel * o_h * o_w;
auto task = [x, output, mask, &batch, &channel, &i_h, &i_w, &s_h, &s_w, &w_h, &w_w, &pad_top, &pad_left, &o_h, &o_w,
&wWeight, &hWeight, &cWeight](size_t start, size_t end) {
for (size_t i = start; i < end; ++i) {
const int posn = i / (channel * o_h * o_w);
const int posc = i / (o_h * o_w) % channel;
const int posh = i / o_w % o_h;
const int posw = i % o_w;
int hstart = posh * s_h - pad_top;
int wstart = posw * s_w - pad_left;
const int hend = std::min<int>(hstart + w_h, i_h);
const int wend = std::min<int>(wstart + w_w, i_w);
hstart = std::max<int>(hstart, 0);
wstart = std::max<int>(wstart, 0);
int32_t inputStart = posn * channel * i_h * i_w;
int32_t maxIdx = posc * cWeight + hstart * hWeight + wstart * wWeight;
T maxData = x[inputStart + maxIdx];
for (int hcur = hstart; hcur < hend; ++hcur) {
for (int wcur = wstart; wcur < wend; ++wcur) {
int32_t inputIdx = posc * cWeight + hcur * hWeight + wcur * wWeight;
T inputData = x[inputStart + inputIdx];
if (inputData > maxData) {
maxIdx = inputIdx;
maxData = inputData;
}
}
}
output[i] = maxData;
mask[i] = maxIdx - posc * cWeight;
}
};
ParallelLaunchAutoSearch(task, length, this, &parallel_search_info_);
return true;
}
std::vector<std::pair<KernelAttr, MaxPoolWithArgmaxCpuKernelMod::MaxPoolWithArgmaxFunc>>
MaxPoolWithArgmaxCpuKernelMod::func_list_ = {
{KernelAttr().AddInputAttr(kNumberTypeFloat32).AddOutputAttr(kNumberTypeFloat32).AddOutputAttr(kNumberTypeInt32),
&MaxPoolWithArgmaxCpuKernelMod::LaunchKernel<float>},
};
std::vector<KernelAttr> MaxPoolWithArgmaxCpuKernelMod::GetOpSupport() {
std::vector<KernelAttr> support_list;
std::transform(func_list_.begin(), func_list_.end(), std::back_inserter(support_list),
[](const std::pair<KernelAttr, MaxPoolWithArgmaxFunc> &pair) { return pair.first; });
return support_list;
}
MS_KERNEL_FACTORY_REG(NativeCpuKernelMod, MaxPoolWithArgmax, MaxPoolWithArgmaxCpuKernelMod);
} // namespace kernel
} // namespace mindspore

View File

@ -0,0 +1,79 @@
/**
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_PLUGIN_DEVICE_CPU_KERNEL_MAX_POOL_WITH_ARGMAX_CPU_KERNEL_H_
#define MINDSPORE_CCSRC_PLUGIN_DEVICE_CPU_KERNEL_MAX_POOL_WITH_ARGMAX_CPU_KERNEL_H_
#include <map>
#include <utility>
#include <vector>
#include "mindspore/core/ops/max_pool_with_argmax.h"
#include "plugin/device/cpu/kernel/cpu_kernel.h"
#include "plugin/factory/ms_factory.h"
namespace mindspore {
namespace kernel {
class MaxPoolWithArgmaxCpuKernelMod : public NativeCpuKernelMod {
public:
MaxPoolWithArgmaxCpuKernelMod() {}
~MaxPoolWithArgmaxCpuKernelMod() override = default;
bool Launch(const std::vector<AddressPtr> &inputs, const std::vector<AddressPtr> &workspace,
const std::vector<AddressPtr> &outputs) override {
return kernel_func_(this, inputs, outputs);
}
bool Init(const BaseOperatorPtr &base_operator, const std::vector<KernelTensorPtr> &inputs,
const std::vector<KernelTensorPtr> &outputs) override;
int Resize(const BaseOperatorPtr &base_operator, const std::vector<KernelTensorPtr> &inputs,
const std::vector<KernelTensorPtr> &outputs,
const std::map<uint32_t, tensor::TensorPtr> &inputsOnHost) override;
bool ResizedInputSize(const std::vector<KernelTensorPtr> &inputs);
bool ResizedOutputSize(const std::vector<KernelTensorPtr> &outputs);
protected:
std::vector<KernelAttr> GetOpSupport() override;
private:
template <typename T>
bool LaunchKernel(const std::vector<kernel::AddressPtr> &inputs, const std::vector<kernel::AddressPtr> &outputs);
using MaxPoolWithArgmaxFunc = std::function<bool(
MaxPoolWithArgmaxCpuKernelMod *, const std::vector<kernel::AddressPtr> &, const std::vector<kernel::AddressPtr> &)>;
static std::vector<std::pair<KernelAttr, MaxPoolWithArgmaxFunc>> func_list_;
MaxPoolWithArgmaxFunc kernel_func_;
int batch_ = 0;
int channel_ = 0;
int input_height_ = 0;
int input_width_ = 0;
int window_height_ = 1;
int window_width_ = 1;
int stride_height_ = 1;
int stride_width_ = 1;
PadMode pad_mode_ = PadMode::VALID;
Format data_format_ = Format::NCHW;
int pad_height_ = 0;
int pad_width_ = 0;
int pad_top_ = 0;
int pad_left_ = 0;
int output_height_ = 0;
int output_width_ = 0;
};
} // namespace kernel
} // namespace mindspore
#endif // MINDSPORE_CCSRC_PLUGIN_DEVICE_CPU_KERNEL_MAX_POOL_WITH_ARGMAX_CPU_KERNEL_H_

View File

@ -0,0 +1,101 @@
/**
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "ops/grad/max_pool_grad_with_argmax.h"
#include <algorithm>
#include <string>
#include <memory>
#include <set>
#include <vector>
#include "ops/op_utils.h"
#include "utils/check_convert_utils.h"
#include "abstract/ops/primitive_infer_map.h"
#include "mindapi/src/helper.h"
namespace mindspore {
namespace ops {
void MaxPoolGradWithArgmax::set_pad_mode(const PadMode &pad_mode) {
int64_t swi = pad_mode;
(void)this->AddAttr(kPadMode, api::MakeValue(swi));
}
PadMode MaxPoolGradWithArgmax::get_pad_mode() const {
auto value_ptr = GetAttr(kPadMode);
MS_EXCEPTION_IF_NULL(value_ptr);
auto mode_str = GetValue<std::string>(value_ptr);
std::transform(mode_str.begin(), mode_str.end(), mode_str.begin(), ::toupper);
MS_EXCEPTION_IF_CHECK_FAIL((mode_str == "SAME" || mode_str == "VALID"),
"MaxPoolGradWithArgmax only supports pad mode 'SAME' or 'VALID', but get " + mode_str);
return mode_str == "SAME" ? PadMode::SAME : PadMode::VALID;
}
void MaxPoolGradWithArgmax::set_kernel_size(const std::vector<int64_t> &kernel_size) {
(void)this->AddAttr(
kKernelSize, api::MakeValue(CheckAndConvertUtils::CheckPositiveVector(kKernelSize, kernel_size, this->name())));
}
std::vector<int64_t> MaxPoolGradWithArgmax::get_kernel_size() const {
return GetValue<std::vector<int64_t>>(GetAttr(kKernelSize));
}
void MaxPoolGradWithArgmax::set_strides(const std::vector<int64_t> &strides) {
(void)this->AddAttr(kStrides,
api::MakeValue(CheckAndConvertUtils::CheckPositiveVector(kStrides, strides, this->name())));
}
std::vector<int64_t> MaxPoolGradWithArgmax::get_strides() const {
return GetValue<std::vector<int64_t>>(GetAttr(kStrides));
}
void MaxPoolGradWithArgmax::Init(const std::vector<int64_t> &kernel_size, const std::vector<int64_t> &stride,
const PadMode &pad_mode, const Format &format) {
this->set_pad_mode(pad_mode);
this->set_kernel_size(kernel_size);
this->set_strides(stride);
}
namespace {
abstract::ShapePtr MaxPoolGradWithArgmaxInferShape(const PrimitivePtr &primitive,
const std::vector<AbstractBasePtr> &input_args) {
auto x_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[kDim0]->BuildShape())[kShape];
constexpr int64_t kXRank = 4;
CheckAndConvertUtils::CheckInteger("x_rank", SizeToLong(x_shape.size()), kEqual, kXRank, kNameMaxPoolGradWithArgmax);
return std::make_shared<abstract::Shape>(x_shape);
}
TypePtr MaxPoolGradWithArgmaxInferType(const PrimitivePtr &prim, const std::vector<AbstractBasePtr> &input_args) {
auto name = prim->name();
const std::set<TypePtr> valid_grad_types = {kFloat32, kFloat16};
auto grad_type = input_args[kDim1]->BuildType();
auto inferred_type = CheckAndConvertUtils::CheckTensorTypeValid("x", grad_type, valid_grad_types, name);
return inferred_type;
}
} // namespace
MIND_API_OPERATOR_IMPL(MaxPoolGradWithArgmax, BaseOperator);
AbstractBasePtr MaxPoolGradWithArgmaxInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive,
const std::vector<AbstractBasePtr> &input_args) {
MS_EXCEPTION_IF_NULL(primitive);
const int64_t input_num = 3;
CheckAndConvertUtils::CheckInputArgs(input_args, kEqual, input_num, primitive->name());
auto maxpool_with_argmax_infer_type = MaxPoolGradWithArgmaxInferType(primitive, input_args);
auto maxpool_with_argmax_infer_shape = MaxPoolGradWithArgmaxInferShape(primitive, input_args);
return std::make_shared<abstract::AbstractTensor>(maxpool_with_argmax_infer_type, maxpool_with_argmax_infer_shape);
}
REGISTER_PRIMITIVE_EVAL_IMPL(MaxPoolGradWithArgmax, prim::kPrimMaxPoolGradWithArgmax, MaxPoolGradWithArgmaxInfer,
nullptr, true);
} // namespace ops
} // namespace mindspore

View File

@ -0,0 +1,63 @@
/**
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CORE_OPS_GRAD_MAX_POOL_GRAD_WITH_ARGMAX_H_
#define MINDSPORE_CORE_OPS_GRAD_MAX_POOL_GRAD_WITH_ARGMAX_H_
#include <vector>
#include <string>
#include <memory>
#include "ops/base_operator.h"
#include "mindapi/base/types.h"
#include "mindapi/base/format.h"
namespace mindspore {
namespace ops {
constexpr auto kNameMaxPoolGradWithArgmax = "MaxPoolGradWithArgmax";
/// \brief Max pooling operation.
class MIND_API MaxPoolGradWithArgmax : public BaseOperator {
public:
MIND_API_BASE_MEMBER(MaxPoolGradWithArgmax);
/// \brief Constructor.
MaxPoolGradWithArgmax() : BaseOperator(kNameMaxPoolGradWithArgmax) {
InitIOName({"input", "grad", "argmax"}, {"output"});
}
/// \brief Init. Refer to the parameters of Python API @ref mindspore.ops.MaxPoolWithArgmax for the inputs.
void Init(const std::vector<int64_t> &kernel_size = {1}, const std::vector<int64_t> &stride = {1},
const PadMode &pad_mode = VALID, const Format &format = NCHW);
/// \brief Set pad_mode.
void set_pad_mode(const PadMode &pad_mode);
/// \brief Set kernel_size.
void set_kernel_size(const std::vector<int64_t> &kernel_size);
/// \brief Set strides.
void set_strides(const std::vector<int64_t> &strides);
/// \return kernel_size.
std::vector<int64_t> get_kernel_size() const;
/// \return pad_mode
PadMode get_pad_mode() const;
/// \return strides.
std::vector<int64_t> get_strides() const;
};
abstract::AbstractBasePtr MaxPoolGradWithArgmaxInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive,
const std::vector<abstract::AbstractBasePtr> &input_args);
} // namespace ops
} // namespace mindspore
#endif // MINDSPORE_CORE_OPS_MAX_POOL_GRAD_WITH_ARGMAX_H_

View File

@ -0,0 +1,188 @@
/**
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "ops/max_pool_with_argmax.h"
#include <string>
#include <algorithm>
#include <memory>
#include <set>
#include <vector>
#include "ops/op_utils.h"
#include "utils/check_convert_utils.h"
#include "abstract/ops/primitive_infer_map.h"
#include "mindapi/src/helper.h"
namespace mindspore {
namespace ops {
constexpr auto kPadmodeSame = "1";
constexpr auto kPadModeValid = "2";
constexpr auto kSAME = "SAME";
constexpr auto kVALID = "VALID";
void MaxPoolWithArgmax::set_pad_mode(const PadMode &pad_mode) {
int64_t swi = pad_mode;
(void)this->AddAttr(kPadMode, api::MakeValue(swi));
}
PadMode MaxPoolWithArgmax::get_pad_mode() const {
auto value_ptr = GetAttr(kPadMode);
MS_EXCEPTION_IF_NULL(value_ptr);
auto mode_str = GetValue<std::string>(value_ptr);
std::transform(mode_str.begin(), mode_str.end(), mode_str.begin(), ::toupper);
MS_EXCEPTION_IF_CHECK_FAIL((mode_str == kSAME || mode_str == kVALID),
"MaxPoolWithArgmax only supports pad mode 'SAME' or 'VALID', but get " + mode_str);
return mode_str == kSAME ? PadMode::SAME : PadMode::VALID;
}
void MaxPoolWithArgmax::set_kernel_size(const std::vector<int64_t> &kernel_size) {
(void)this->AddAttr(
kKernelSize, api::MakeValue(CheckAndConvertUtils::CheckPositiveVector(kKernelSize, kernel_size, this->name())));
}
std::vector<int64_t> MaxPoolWithArgmax::get_kernel_size() const {
return GetValue<std::vector<int64_t>>(GetAttr(kKernelSize));
}
void MaxPoolWithArgmax::set_strides(const std::vector<int64_t> &strides) {
(void)this->AddAttr(kStrides,
api::MakeValue(CheckAndConvertUtils::CheckPositiveVector(kStrides, strides, this->name())));
}
std::vector<int64_t> MaxPoolWithArgmax::get_strides() const {
return GetValue<std::vector<int64_t>>(GetAttr(kStrides));
}
void MaxPoolWithArgmax::set_format(const Format &format) {
int64_t f = format;
(void)this->AddAttr(kFormat, api::MakeValue(f));
}
Format MaxPoolWithArgmax::get_format() const {
auto value_ptr = GetAttr(kFormat);
MS_EXCEPTION_IF_NULL(value_ptr);
auto format_str = GetValue<std::string>(value_ptr);
std::transform(format_str.begin(), format_str.end(), format_str.begin(), ::toupper);
MS_EXCEPTION_IF_CHECK_FAIL((format_str == kFormatNHWC || format_str == kFormatNCHW),
"MaxPoolWithArgmax only supports data format 'NHWC' or 'NCHW', but get " + format_str);
return format_str == kFormatNHWC ? Format::NHWC : Format::NCHW;
}
void MaxPoolWithArgmax::Init(const std::vector<int64_t> &kernel_size, const std::vector<int64_t> &stride,
const PadMode &pad_mode, const Format &format) {
this->set_pad_mode(pad_mode);
this->set_kernel_size(kernel_size);
this->set_strides(stride);
this->set_format(format);
}
namespace {
abstract::TupleShapePtr MaxPoolWithArgmaxInferShape(const PrimitivePtr &primitive,
const std::vector<AbstractBasePtr> &input_args) {
auto op_name = primitive->name();
auto in_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[kDim0]->BuildShape())[kShape];
Format format = Format(CheckAndConvertUtils::GetAndCheckFormat(primitive->GetAttr(kFormat)));
const int64_t x_rank = 4;
(void)CheckAndConvertUtils::CheckInteger("x_rank", SizeToLong(in_shape.size()), kEqual, x_rank, op_name);
auto kernel_size = GetValue<std::vector<int64_t>>(primitive->GetAttr(kKernelSize));
auto mode_str = primitive->GetAttr(kPadMode)->ToString();
std::transform(mode_str.begin(), mode_str.end(), mode_str.begin(), ::toupper);
PadMode pad_mode = PadMode::PAD;
if (mode_str == kPadmodeSame || mode_str == kSAME) {
pad_mode = PadMode::SAME;
} else if (mode_str == kPadModeValid || mode_str == kVALID) {
pad_mode = PadMode::VALID;
}
MS_EXCEPTION_IF_CHECK_FAIL((pad_mode == PadMode::SAME || pad_mode == PadMode::VALID),
"MaxPoolWithArgmax only supports pad mode 'SANE' or 'VALID', but get " + mode_str);
auto strides = GetValue<std::vector<int64_t>>(primitive->GetAttr(kStrides));
const int64_t attr_size = 4;
(void)CheckAndConvertUtils::CheckInteger("kernel size", SizeToLong(kernel_size.size()), kEqual, attr_size, op_name);
(void)CheckAndConvertUtils::CheckInteger("strides size", SizeToLong(strides.size()), kEqual, attr_size, op_name);
int64_t batch = 0, in_h = 0, in_w = 0, channel = 0;
int64_t kernel_h = 0, kernel_w = 0;
int64_t stride_h = 0, stride_w = 0;
kernel_h = kernel_size[kDim1];
kernel_w = kernel_size[kDim2];
stride_h = strides[kDim1];
stride_w = strides[kDim2];
if (format == Format::NCHW) {
batch = in_shape[kDim0];
channel = in_shape[kDim1];
in_h = in_shape[kDim2];
in_w = in_shape[kDim3];
} else if (format == Format::NHWC) {
batch = in_shape[kDim0];
in_h = in_shape[kDim1];
in_w = in_shape[kDim2];
channel = in_shape[kDim3];
}
int64_t out_h = abstract::Shape::SHP_ANY;
int64_t out_w = abstract::Shape::SHP_ANY;
if (pad_mode == PadMode::VALID) {
out_h = static_cast<int64_t>(std::ceil((in_h - (kernel_h - 1)) / static_cast<float>(stride_h)));
out_w = static_cast<int64_t>(std::ceil((in_w - (kernel_w - 1)) / static_cast<float>(stride_w)));
} else if (pad_mode == PadMode::SAME) {
out_h = static_cast<int64_t>(std::ceil(in_h / static_cast<float>(stride_h)));
out_w = static_cast<int64_t>(std::ceil(in_w / static_cast<float>(stride_w)));
}
std::vector<int64_t> out_shape = {batch, channel, out_h, out_w};
// Process attr mapping problems from mindspore to tbe
// kernel_size -> ksize
// pad_mode -> padding
std::vector<int64_t> ksize = {kernel_size[kDim0], kernel_size[kDim1], kernel_size[kDim2], kernel_size[kDim3]};
if (format == NHWC) {
(void)primitive->AddAttr("ksize", MakeValue(ksize));
(void)primitive->AddAttr("data_format", MakeValue(kFormatNHWC));
} else if (format == NCHW) {
(void)primitive->AddAttr("ksize", MakeValue(ksize));
(void)primitive->AddAttr("data_format", MakeValue(kFormatNCHW));
}
if (pad_mode == PadMode::VALID) {
(void)primitive->AddAttr(kPadding, MakeValue(kVALID));
} else if (pad_mode == PadMode::SAME) {
(void)primitive->AddAttr(kPadding, MakeValue(kSAME));
}
ShapeVector shape = out_shape;
ShapeVector argmax_shape = shape;
std::vector<abstract::BaseShapePtr> shape_list = {std::make_shared<abstract::Shape>(shape),
std::make_shared<abstract::Shape>(argmax_shape)};
return std::make_shared<abstract::TupleShape>(shape_list);
}
TypePtr MaxPoolWithArgmaxInferType(const PrimitivePtr &primitive, const std::vector<AbstractBasePtr> &input_args) {
if (std::any_of(input_args.begin(), input_args.end(), [](const AbstractBasePtr &a) { return a == nullptr; })) {
MS_EXCEPTION(TypeError) << "For '" << primitive->name()
<< "', the input args used for infer shape and type is necessary, but missing it.";
}
const std::set<TypePtr> valid_types = {kFloat32, kFloat16};
auto input_type = input_args[kDim0]->BuildType();
(void)CheckAndConvertUtils::CheckTensorTypeValid("input", input_type, valid_types, primitive->name());
std::vector<TypePtr> type_list = {input_type, kInt32};
return std::make_shared<Tuple>(type_list);
}
} // namespace
MIND_API_OPERATOR_IMPL(MaxPoolWithArgmax, BaseOperator);
AbstractBasePtr MaxPoolWithArgmaxInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive,
const std::vector<AbstractBasePtr> &input_args) {
MS_EXCEPTION_IF_NULL(primitive);
const int64_t input_num = 1;
(void)CheckAndConvertUtils::CheckInputArgs(input_args, kEqual, input_num, primitive->name());
auto infer_type = MaxPoolWithArgmaxInferType(primitive, input_args);
auto infer_shape = MaxPoolWithArgmaxInferShape(primitive, input_args);
return abstract::MakeAbstract(infer_shape, infer_type);
}
REGISTER_PRIMITIVE_EVAL_IMPL(MaxPoolWithArgmax, prim::kPrimMaxPoolWithArgmax, MaxPoolWithArgmaxInfer, nullptr, true);
} // namespace ops
} // namespace mindspore

View File

@ -0,0 +1,66 @@
/**
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CORE_OPS_MAX_POOL_WITH_ARGMAX_H_
#define MINDSPORE_CORE_OPS_MAX_POOL_WITH_ARGMAX_H_
#include <vector>
#include <string>
#include <memory>
#include "ops/base_operator.h"
#include "mindapi/base/types.h"
#include "mindapi/base/format.h"
namespace mindspore {
namespace ops {
constexpr auto kNameMaxPoolWithArgmax = "MaxPoolWithArgmax";
/// \brief Max pooling operation. Refer to Python API @ref mindspore.ops.MaxPoolWithArgmax for more details
class MIND_API MaxPoolWithArgmax : public BaseOperator {
public:
MIND_API_BASE_MEMBER(MaxPoolWithArgmax);
/// \brief Constructor.
MaxPoolWithArgmax() : BaseOperator(kNameMaxPoolWithArgmax) { InitIOName({"x"}, {"output", "mask"}); }
/// \brief Init. Refer to the parameters of Python API @ref mindspore.ops.MaxPoolWithArgmax for the inputs.
void Init(const std::vector<int64_t> &kernel_size = {1}, const std::vector<int64_t> &stride = {1},
const PadMode &pad_mode = VALID, const Format &format = NCHW);
/// \brief Set pad_mode.
void set_pad_mode(const PadMode &pad_mode);
/// \brief Set kernel_size.
void set_kernel_size(const std::vector<int64_t> &kernel_size);
/// \brief Set strides.
void set_strides(const std::vector<int64_t> &strides);
/// \brief Set format.
void set_format(const Format &format);
/// \return kernel_size.
std::vector<int64_t> get_kernel_size() const;
/// \return pad_mode
PadMode get_pad_mode() const;
/// \return strides.
std::vector<int64_t> get_strides() const;
/// \return format.
Format get_format() const;
};
abstract::AbstractBasePtr MaxPoolWithArgmaxInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive,
const std::vector<abstract::AbstractBasePtr> &input_args);
} // namespace ops
} // namespace mindspore
#endif // MINDSPORE_CORE_OPS_MAX_POOL_WITH_ARGMAX_H_

View File

@ -1539,9 +1539,9 @@ class DepthwiseConv2dNative(PrimitiveWithInfer):
pad_top, pad_bottom, pad_left, pad_right = self.padding
h_out = 1 + (x_shape[2] + pad_top + pad_bottom - kernel_size_h - (kernel_size_h - 1) * (dilation_h - 1)) \
/ stride_h
/ stride_h
w_out = 1 + (x_shape[3] + pad_left + pad_right - kernel_size_w - (kernel_size_w - 1) * (dilation_w - 1)) \
/ stride_w
/ stride_w
h_out = math.floor(h_out)
w_out = math.floor(w_out)
@ -1838,6 +1838,7 @@ class MaxPoolWithArgmax(_Pool):
- valid: Adopts the way of discarding. The possible largest height and width of output
will be returned without padding. Extra pixels will be discarded.
data_format (str) : The optional value for data format, is 'NHWC' or 'NCHW'.
Default: 'NCHW'.
@ -1858,7 +1859,7 @@ class MaxPoolWithArgmax(_Pool):
TypeError: If `x` is not a Tensor.
Supported Platforms:
``Ascend`` ``GPU``
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> x = Tensor(np.arange(1 * 3 * 3 * 4).reshape((1, 3, 3, 4)), mindspore.float32)

View File

@ -0,0 +1,157 @@
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
import mindspore as ms
from mindspore import Tensor
import mindspore.context as context
import mindspore.nn as nn
from mindspore.ops.operations import _grad_ops as G
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
class MaxPoolGradWithArgmax(nn.Cell):
def __init__(self, kernel_size, strides, pad_mode):
super(MaxPoolGradWithArgmax, self).__init__()
self.grad = G.MaxPoolGradWithArgmax(kernel_size=kernel_size, strides=strides, pad_mode=pad_mode)
def construct(self, x, grad, argmax):
return self.grad(x, grad, argmax)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_maxpool_grad_with_argmax():
"""
Feature: test maxPoolGradWithArgmax cpu version.
Description: comparing to gpu version
Expectation: expect correct result.
"""
x = Tensor(np.array([[[
[0, 1, 2, 3, 4, 5],
[6, 7, 8, 9, 10, 11],
[12, 13, 14, 15, 16, 17],
[18, 19, 20, 21, 22, 23],
[24, 25, 26, 27, 28, 29],
[30, 31, 32, 33, 34, 35]
]]]).astype(np.float32))
dy = Tensor(np.array([[[
[0.7, 0.9, 0.11],
[0.19, 0.21, 0.23],
[0.31, 0.33, 0.35]
]]]).astype(np.float32))
index = Tensor(np.array([[[
[7, 9, 11],
[19, 21, 23],
[31, 33, 35]
]]]).astype(np.int32))
expect_result = (np.array([[[
[0., 0., 0., 0., 0., 0.],
[0., 0.7, 0., 0.9, 0., 0.11],
[0., 0., 0., 0., 0., 0.],
[0., 0.19, 0., 0.21, 0., 0.23],
[0., 0., 0., 0., 0., 0.],
[0., 0.31, 0., 0.33, 0., 0.35]]]]).astype(np.float32))
grad_max_pool = MaxPoolGradWithArgmax(kernel_size=2, strides=2, pad_mode="VALID")
actual_output = grad_max_pool(x, dy, index)
assert (actual_output.asnumpy() == expect_result).all()
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_maxpool_grad_with_argmax_fp16():
"""
Feature: test maxPoolGradWithArgmax cpu version.
Description: comparing to gpu version float16
Expectation: expect correct result.
"""
x = Tensor(np.array([[[
[0, 1, 2, 3, 4, 5],
[6, 7, 8, 9, 10, 11],
[12, 13, 14, 15, 16, 17],
[18, 19, 20, 21, 22, 23],
[24, 25, 26, 27, 28, 29],
[30, 31, 32, 33, 34, 35]
]]]).astype(np.float16))
dy = Tensor(np.array([[[
[0.7, 0.9, 0.11],
[0.19, 0.21, 0.23],
[0.31, 0.33, 0.35]
]]]).astype(np.float16))
index = Tensor(np.array([[[
[7, 9, 11],
[19, 21, 23],
[31, 33, 35]
]]]).astype(np.int32))
expect_result = np.array([[[
[0., 0., 0., 0., 0., 0.],
[0., 0.7, 0., 0.9, 0., 0.11],
[0., 0., 0., 0., 0., 0.],
[0., 0.19, 0., 0.21, 0., 0.23],
[0., 0., 0., 0., 0., 0.],
[0., 0.31, 0., 0.33, 0., 0.35]
]]]).astype(np.float16)
grad_max_pool = MaxPoolGradWithArgmax(kernel_size=2, strides=2, pad_mode="VALID")
actual_output = grad_max_pool(x, dy, index)
assert (actual_output.asnumpy() == expect_result).all()
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_maxpool_grad_with_argmax_x_dynamic_shape():
"""
Feature: test maxPoolGradWithArgmax cpu version.
Description: comparing to gpu version float16
Expectation: expect correct result.
"""
x = Tensor(np.array([[[
[0, 1, 2, 3, 4, 5],
[6, 7, 8, 9, 10, 11],
[12, 13, 14, 15, 16, 17],
[18, 19, 20, 21, 22, 23],
[24, 25, 26, 27, 28, 29],
[30, 31, 32, 33, 34, 35]
]]]), dtype=ms.float16)
dy = Tensor(np.array([[[
[0.7, 0.9, 0.11],
[0.19, 0.21, 0.23],
[0.31, 0.33, 0.35]
]]]), dtype=ms.float16)
index = Tensor(np.array([[[
[7, 9, 11],
[19, 21, 23],
[31, 33, 35]
]]]), dtype=ms.int32)
expect_result = np.array([[[
[0., 0., 0., 0., 0., 0.],
[0., 0.7, 0., 0.9, 0., 0.11],
[0., 0., 0., 0., 0., 0.],
[0., 0.19, 0., 0.21, 0., 0.23],
[0., 0., 0., 0., 0., 0.],
[0., 0.31, 0., 0.33, 0., 0.35]
]]]).astype(np.float16)
grad_max_pool = MaxPoolGradWithArgmax(kernel_size=2, strides=2, pad_mode="VALID")
x_dyn = Tensor(shape=[x.shape[0], None, x.shape[2], x.shape[3]], dtype=ms.float16)
dy_dyn = Tensor(shape=[dy.shape[0], None, None, dy.shape[3]], dtype=ms.float16)
index_dyn = Tensor(shape=[index.shape[0], index.shape[1], index.shape[2], None], dtype=ms.int32)
grad_max_pool.set_inputs(x_dyn, dy_dyn, index_dyn)
actual_output = grad_max_pool(x, dy, index)
assert (actual_output.asnumpy() == expect_result).all()

View File

@ -0,0 +1,179 @@
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
import mindspore as ms
from mindspore import Tensor
import mindspore.context as context
import mindspore.nn as nn
import mindspore.ops.operations as P
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
class MaxPoolWithArgmaxOp(nn.Cell):
def __init__(self, kernel_size=1, strides=1, pad_mode="valid", data_format="NCHW"):
super(MaxPoolWithArgmaxOp, self).__init__()
self.max_pool_op = P.MaxPoolWithArgmax(
kernel_size=kernel_size, strides=strides, pad_mode=pad_mode, data_format=data_format)
def construct(self, x):
return self.max_pool_op(x)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_maxpool_with_argmax_valid():
"""
Feature: test maxPoolWithArgmax cpu op.
Description: document test with 'VALID' padding
Expectation: expect correct result.
"""
x = Tensor(np.array([[[[10, 1, 2, 3, -4, -5],
[6, 7, 8, 9, -10, -11],
[12, 13, 24, -15, -16, -17],
[18, 19, 20, 21, 22, 23],
[32, 25, 26, 27, 28, 40],
[30, 31, 35, 33, 34, 35]]]]).astype(np.float32))
maxpool_with_argmax = MaxPoolWithArgmaxOp(kernel_size=2, strides=2, pad_mode="valid", data_format="NCHW")
actual_output, argmax = maxpool_with_argmax(x)
expect_output = np.array([[[[10, 9, -4],
[19, 24, 23],
[32, 35, 40]]]]).astype(np.float32)
expect_argmax = np.array([[[[0, 9, 4],
[19, 14, 23],
[24, 32, 29]]]]).astype(np.int32)
assert (actual_output.asnumpy() == expect_output).all()
assert (argmax.asnumpy() == expect_argmax).all()
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_maxpool_with_argmax_same():
"""
Feature: test maxPoolWithArgmax cpu.
Description: document test with 'SAME' padding
Expectation: expect correct result.
"""
x = Tensor(np.array([[[[0, 1, 2, 3, -4, -5],
[6, 7, 8, 9, -10, -11],
[12, 13, 14, -15, -16, -17],
[18, 19, 20, 21, 22, 23],
[24, 25, 26, 27, 28, 29],
[30, 31, 32, 33, 34, 35]]]]).astype(np.float32))
maxpool_with_argmax = MaxPoolWithArgmaxOp(kernel_size=3, strides=2, pad_mode="same", data_format="NCHW")
actual_output, argmax = maxpool_with_argmax(x)
expect_output = np.array([[[[14, 14, -4],
[26, 28, 29],
[32, 34, 35]]]])
expect_argmax = np.array([[[[14, 14, 4],
[26, 28, 29],
[32, 34, 35]]]])
assert (actual_output.asnumpy() == expect_output).all()
assert (argmax.asnumpy() == expect_argmax).all()
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_maxpool_with_argmax_pytorch():
"""
Feature: test maxPoolWithArgmax op.
Description: comparing to pytorch's maxpool2d with 'VALID' padding
Expectation: expect correct result.
"""
x = Tensor(np.array([[[[80., 70., -19.],
[14., 46., -67.],
[15., -41., -80.]],
[[-80., 62., 85.],
[-3., -68., 35.],
[84., 54., 32.]]],
[[[-65., 57., 10.],
[-1., 38., -43.],
[36., -64., 5.]],
[[32., 8., 70.],
[-20., -92., -31.],
[-73., -27., -87.]]]]).astype(np.float32))
maxpool_with_argmax = MaxPoolWithArgmaxOp(kernel_size=2, strides=1, pad_mode="valid", data_format="NCHW")
actual_output, argmax = maxpool_with_argmax(x)
expect_output = np.array([[[[80., 70.],
[46., 46.]],
[[62., 85.],
[84., 54.]]],
[[[57., 57.],
[38., 38.]],
[[32., 70.],
[-20., -27.]]]])
expect_argmax = np.array([[[[0, 1],
[4, 4]],
[[1, 2],
[6, 7]]],
[[[1, 1],
[4, 4]],
[[0, 2],
[3, 7]]]])
assert (actual_output.asnumpy() == expect_output).all()
assert (argmax.asnumpy() == expect_argmax).all()
assert np.allclose(actual_output.asnumpy(), expect_output)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_maxpool_with_argmax_dynamic_shape():
"""
Feature: test maxPoolWithArgmax op with dynamic shapes.
Description: input x has the dynamic shapes
Expectation: expect correct result.
"""
x = Tensor(np.array([[[[80., 70., -19.],
[14., 46., -67.],
[15., -41., -80.]],
[[-80., 62., 85.],
[-3., -68., 35.],
[84., 54., 32.]]],
[[[-65., 57., 10.],
[-1., 38., -43.],
[36., -64., 5.]],
[[32., 8., 70.],
[-20., -92., -31.],
[-73., -27., -87.]]]]), dtype=ms.float32)
x_dyn = Tensor(shape=[2, None, 3, 3], dtype=ms.float32)
maxpool_with_argmax = MaxPoolWithArgmaxOp(kernel_size=2, strides=1, pad_mode="valid", data_format="NCHW")
maxpool_with_argmax.set_inputs(x_dyn)
actual_output, argmax = maxpool_with_argmax(x)
expect_output = np.array([[[[80., 70.],
[46., 46.]],
[[62., 85.],
[84., 54.]]],
[[[57., 57.],
[38., 38.]],
[[32., 70.],
[-20., -27.]]]])
expect_argmax = np.array([[[[0, 1],
[4, 4]],
[[1, 2],
[6, 7]]],
[[[1, 1],
[4, 4]],
[[0, 2],
[3, 7]]]])
assert (actual_output.asnumpy() == expect_output).all()
assert (argmax.asnumpy() == expect_argmax).all()