!21710 fix cropandresize, randomchoicewithmask and pad in CPU

Merge pull request !21710 from huangbo/pclint_fix_master
This commit is contained in:
i-robot 2021-08-13 01:27:17 +00:00 committed by Gitee
commit 114f419d0a
13 changed files with 75 additions and 41 deletions

View File

@ -144,8 +144,7 @@ bool CropAndResizeCPUKernel<T>::Launch(const std::vector<kernel::AddressPtr> &in
const int bottom_y_index = ceilf(target_y);
const int left_x_index = floorf(target_x);
const int right_x_index = ceilf(target_x);
const float y_lerp = target_y - top_y_index;
const float x_lerp = target_x - left_x_index;
const float top_left = static_cast<float>(
input_image[((box_index * input_height_ + top_y_index) * input_width_ + left_x_index) * channel_ +
pos_channel]);
@ -158,9 +157,9 @@ bool CropAndResizeCPUKernel<T>::Launch(const std::vector<kernel::AddressPtr> &in
const float bottom_right = static_cast<float>(
input_image[((box_index * input_height_ + bottom_y_index) * input_width_ + right_x_index) * channel_ +
pos_channel]);
const float top = top_left + (top_right - top_left) * x_lerp;
const float bottom = bottom_left + (bottom_right - bottom_left) * x_lerp;
output[pos] = top + (bottom - top) * y_lerp;
const float top = top_left + (top_right - top_left) * (target_x - left_x_index);
const float bottom = bottom_left + (bottom_right - bottom_left) * (target_x - left_x_index);
output[pos] = top + (bottom - top) * (target_y - top_y_index);
} else if (method_ == 3) {
int y1h = static_cast<int>(y1 * input_height_);
int x1w = static_cast<int>(x1 * input_width_);
@ -170,36 +169,37 @@ bool CropAndResizeCPUKernel<T>::Launch(const std::vector<kernel::AddressPtr> &in
int h = ((y2h - y1h + 1) > 1) ? y2h - y1h + 1 : 1;
float y_point = (pos_y + 0.5) * (h / static_cast<float>(final_height_)) - 0.5;
int top_y_index = floorf(y_point);
top_y_index = std::min(std::max(0, top_y_index), h - 1);
int bottom_y_index = ceilf(y_point);
bottom_y_index = std::min(std::max(0, bottom_y_index), h - 1);
int top_y_index = std::min(std::max(0, static_cast<int>(floorf(y_point))), h - 1);
int bottom_y_index = std::min(std::max(0, static_cast<int>(ceilf(y_point))), h - 1);
float x_point = (pos_x + 0.5) * (w / static_cast<float>(final_width_)) - 0.5;
int left_x_index = floorf(x_point);
left_x_index = std::min(std::max(0, left_x_index), w - 1);
int right_x_index = ceilf(x_point);
right_x_index = std::min(std::max(0, right_x_index), w - 1);
int left_x_index = std::min(std::max(0, static_cast<int>(floorf(x_point))), w - 1);
int right_x_index = std::min(std::max(0, static_cast<int>(ceilf(x_point))), w - 1);
const float y_lerp = y_point - top_y_index;
const float x_lerp = x_point - left_x_index;
const int y_top_index = box_index * input_height_ + y1h + top_y_index;
const int y_bottom_index = box_index * input_height_ + y1h + bottom_y_index;
const float top_left =
static_cast<float>(input_image[(y_top_index * input_width_ + x1w + left_x_index) * channel_ + pos_channel]);
const float top_right =
static_cast<float>(input_image[(y_top_index * input_width_ + x1w + right_x_index) * channel_ + pos_channel]);
const int y_top_index = std::max(0, y1h + top_y_index);
const int y_bottom_index = std::max(0, y1h + bottom_y_index);
const int x_left_index = std::max(0, x1w + left_x_index);
const int x_right_index = std::max(0, x1w + right_x_index);
const float top_left = static_cast<float>(
input_image[((box_index * input_height_ + y_top_index) * input_width_ + x_left_index) * channel_ +
pos_channel]);
const float top_right = static_cast<float>(
input_image[((box_index * input_height_ + y_top_index) * input_width_ + x_right_index) * channel_ +
pos_channel]);
const float bottom_left = static_cast<float>(
input_image[(y_bottom_index * input_width_ + x1w + left_x_index) * channel_ + pos_channel]);
input_image[((box_index * input_height_ + y_bottom_index) * input_width_ + x_left_index) * channel_ +
pos_channel]);
const float bottom_right = static_cast<float>(
input_image[(y_bottom_index * input_width_ + x1w + right_x_index) * channel_ + pos_channel]);
input_image[((box_index * input_height_ + y_bottom_index) * input_width_ + x_right_index) * channel_ +
pos_channel]);
output[pos] = top_left * (1 - y_lerp) * (1 - x_lerp) + bottom_right * y_lerp * x_lerp +
top_right * (1 - y_lerp) * x_lerp + bottom_left * y_lerp * (1 - x_lerp);
float ret = top_left * (1 - y_lerp) * (1 - x_lerp) + bottom_right * y_lerp * x_lerp +
top_right * (1 - y_lerp) * x_lerp + bottom_left * y_lerp * (1 - x_lerp);
output[pos] = ret;
} else {
// Nearest Neighbour
const int closest_x_index = roundf(target_x);

View File

@ -35,15 +35,14 @@ class CropAndResizeCPUKernel : public CPUKernel {
const std::vector<AddressPtr> &outputs) override;
private:
int method_;
float extrapolation_value_;
int input_crop_size_;
int output_size_;
int input_height_;
int input_width_;
int final_height_;
int final_width_;
int channel_;
int method_{1};
float extrapolation_value_{0.0};
int output_size_{0};
int input_height_{0};
int input_width_{0};
int final_height_{0};
int final_width_{0};
int channel_{0};
};
MS_REG_CPU_KERNEL_T(CropAndResize,

View File

@ -86,6 +86,8 @@ bool MirrorPadCPUKernel::Launch(const std::vector<kernel::AddressPtr> &inputs, c
LaunchKernel<float16>(inputs, outputs);
} else if (dtype_ == kNumberTypeFloat32) {
LaunchKernel<float>(inputs, outputs);
} else if (dtype_ == kNumberTypeFloat64) {
LaunchKernel<double>(inputs, outputs);
} else if (dtype_ == kNumberTypeInt32) {
LaunchKernel<int>(inputs, outputs);
} else {

View File

@ -74,6 +74,11 @@ MS_REG_CPU_KERNEL(
KernelAttr().AddInputAttr(kNumberTypeFloat32).AddInputAttr(kNumberTypeInt64).AddOutputAttr(kNumberTypeFloat32),
MirrorPadCPUKernel);
MS_REG_CPU_KERNEL(
MirrorPad,
KernelAttr().AddInputAttr(kNumberTypeFloat64).AddInputAttr(kNumberTypeInt64).AddOutputAttr(kNumberTypeFloat64),
MirrorPadCPUKernel);
MS_REG_CPU_KERNEL(
MirrorPad, KernelAttr().AddInputAttr(kNumberTypeInt32).AddInputAttr(kNumberTypeInt64).AddOutputAttr(kNumberTypeInt32),
MirrorPadCPUKernel);
@ -88,6 +93,11 @@ MS_REG_CPU_KERNEL(
KernelAttr().AddInputAttr(kNumberTypeFloat32).AddInputAttr(kNumberTypeInt32).AddOutputAttr(kNumberTypeFloat32),
MirrorPadCPUKernel);
MS_REG_CPU_KERNEL(
MirrorPad,
KernelAttr().AddInputAttr(kNumberTypeFloat64).AddInputAttr(kNumberTypeInt32).AddOutputAttr(kNumberTypeFloat64),
MirrorPadCPUKernel);
MS_REG_CPU_KERNEL(
MirrorPad, KernelAttr().AddInputAttr(kNumberTypeInt32).AddInputAttr(kNumberTypeInt32).AddOutputAttr(kNumberTypeInt32),
MirrorPadCPUKernel);

View File

@ -110,6 +110,8 @@ bool MirrorPadGradCPUKernel::Launch(const std::vector<kernel::AddressPtr> &input
LaunchKernel<float16>(inputs, workspace, outputs);
} else if (dtype_ == kNumberTypeFloat32) {
LaunchKernel<float>(inputs, workspace, outputs);
} else if (dtype_ == kNumberTypeFloat64) {
LaunchKernel<double>(inputs, workspace, outputs);
} else if (dtype_ == kNumberTypeInt32) {
LaunchKernel<int>(inputs, workspace, outputs);
} else {
@ -130,6 +132,8 @@ void MirrorPadGradCPUKernel::InitInputOutputSize(const CNodePtr &kernel_node) {
InitWorkspaceSize<float16>();
} else if (dtype_ == kNumberTypeFloat32) {
InitWorkspaceSize<float>();
} else if (dtype_ == kNumberTypeFloat64) {
InitWorkspaceSize<double>();
} else if (dtype_ == kNumberTypeInt32) {
InitWorkspaceSize<int>();
}

View File

@ -90,6 +90,11 @@ MS_REG_CPU_KERNEL(
KernelAttr().AddInputAttr(kNumberTypeFloat32).AddInputAttr(kNumberTypeInt64).AddOutputAttr(kNumberTypeFloat32),
MirrorPadGradCPUKernel);
MS_REG_CPU_KERNEL(
MirrorPadGrad,
KernelAttr().AddInputAttr(kNumberTypeFloat64).AddInputAttr(kNumberTypeInt64).AddOutputAttr(kNumberTypeFloat64),
MirrorPadGradCPUKernel);
MS_REG_CPU_KERNEL(
MirrorPadGrad,
KernelAttr().AddInputAttr(kNumberTypeInt32).AddInputAttr(kNumberTypeInt64).AddOutputAttr(kNumberTypeInt32),
@ -105,6 +110,11 @@ MS_REG_CPU_KERNEL(
KernelAttr().AddInputAttr(kNumberTypeFloat32).AddInputAttr(kNumberTypeInt32).AddOutputAttr(kNumberTypeFloat32),
MirrorPadGradCPUKernel);
MS_REG_CPU_KERNEL(
MirrorPadGrad,
KernelAttr().AddInputAttr(kNumberTypeFloat64).AddInputAttr(kNumberTypeInt32).AddOutputAttr(kNumberTypeFloat64),
MirrorPadGradCPUKernel);
MS_REG_CPU_KERNEL(
MirrorPadGrad,
KernelAttr().AddInputAttr(kNumberTypeInt32).AddInputAttr(kNumberTypeInt32).AddOutputAttr(kNumberTypeInt32),

View File

@ -63,6 +63,8 @@ bool PadCPUKernel::Launch(const std::vector<kernel::AddressPtr> &inputs, const s
LaunchKernel<float16>(inputs, outputs);
} else if (dtype_ == kNumberTypeFloat32) {
LaunchKernel<float>(inputs, outputs);
} else if (dtype_ == kNumberTypeFloat64) {
LaunchKernel<double>(inputs, outputs);
} else if (dtype_ == kNumberTypeInt32) {
LaunchKernel<int>(inputs, outputs);
} else {

View File

@ -151,8 +151,6 @@ bool RandomChoiceWithMaskCPUKernel::Launch(const std::vector<kernel::AddressPtr>
return false;
}
std::mt19937 gen(seedc);
std::uniform_int_distribution<> dis(0, non_zero_num - 1);
int *mask_dim = new (std::nothrow) int[output_length];
if (mask_dim == nullptr) {
MS_LOG(EXCEPTION) << "Malloc memory failed!";
@ -163,8 +161,12 @@ bool RandomChoiceWithMaskCPUKernel::Launch(const std::vector<kernel::AddressPtr>
(void)memset_s(mask_dim, output_length, 0X00, output_length);
(void)memset_s(tmp_output, output_length, 0X00, output_length);
std::vector<int32_t> all_nums(non_zero_num);
std::iota(begin(all_nums), end(all_nums), 0);
shuffle(all_nums.begin(), all_nums.end(), std::default_random_engine(seedc));
for (int32_t i = 0; i < output_non_zero_length; i++) {
int32_t mean = dis(gen);
int32_t mean = all_nums[i];
tmp_output[i] = input_dim[mean];
mask_dim[i] = 1;
}

View File

@ -103,7 +103,7 @@ void SearchSortedCPUKernel<S, T>::CheckParam(const std::vector<AddressPtr> &inpu
}
}
};
CPUKernelUtils::ParallelFor(task, list_count);
CPUKernelUtils::ParallelFor(task, static_cast<size_t>(list_count));
}
} // namespace kernel
} // namespace mindspore

View File

@ -21,9 +21,11 @@ mirror_pad_op_info = CpuRegOp("MirrorPad") \
.output(0, "y", "required") \
.dtype_format(DataType.F16_Default, DataType.I64_Default, DataType.F16_Default) \
.dtype_format(DataType.F32_Default, DataType.I64_Default, DataType.F32_Default) \
.dtype_format(DataType.F64_Default, DataType.I64_Default, DataType.F64_Default) \
.dtype_format(DataType.I32_Default, DataType.I64_Default, DataType.I32_Default) \
.dtype_format(DataType.F16_Default, DataType.I32_Default, DataType.F16_Default) \
.dtype_format(DataType.F32_Default, DataType.I32_Default, DataType.F32_Default) \
.dtype_format(DataType.F64_Default, DataType.I32_Default, DataType.F64_Default) \
.dtype_format(DataType.I32_Default, DataType.I32_Default, DataType.I32_Default) \
.get_op_info()

View File

@ -21,9 +21,11 @@ mirror_pad_grad_op_info = CpuRegOp("MirrorPadGrad") \
.output(0, "y", "required") \
.dtype_format(DataType.F16_Default, DataType.I64_Default, DataType.F16_Default) \
.dtype_format(DataType.F32_Default, DataType.I64_Default, DataType.F32_Default) \
.dtype_format(DataType.F64_Default, DataType.I64_Default, DataType.F64_Default) \
.dtype_format(DataType.I32_Default, DataType.I64_Default, DataType.I32_Default) \
.dtype_format(DataType.F16_Default, DataType.I32_Default, DataType.F16_Default) \
.dtype_format(DataType.F32_Default, DataType.I32_Default, DataType.F32_Default) \
.dtype_format(DataType.F64_Default, DataType.I32_Default, DataType.F64_Default) \
.dtype_format(DataType.I32_Default, DataType.I32_Default, DataType.I32_Default) \
.get_op_info()

View File

@ -21,6 +21,7 @@ pad_op_info = CpuRegOp("Pad") \
.output(0, "y", "required") \
.dtype_format(DataType.F16_Default, DataType.F16_Default) \
.dtype_format(DataType.F32_Default, DataType.F32_Default) \
.dtype_format(DataType.F64_Default, DataType.F64_Default) \
.dtype_format(DataType.I32_Default, DataType.I32_Default) \
.get_op_info()

View File

@ -109,8 +109,8 @@ def test_RCWM_1D():
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
input_tensor = Tensor(
np.array([1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1]).astype(np.bool))
expect_index = np.array([[0], [7], [9], [8], [8], [0],
[2], [7], [0], [0]]).astype(np.int32)
expect_index = np.array([[11], [0], [8], [2], [9], [7],
[10], [15], [0], [0]]).astype(np.int32)
expect_mask = np.array(
[True, True, True, True, True, True, True, True, False, False])
rcwm = RCWM_1D()