!49791 remove the deformable_offsets

Merge pull request !49791 from huangxinjing/code_docs_remove_deformable
This commit is contained in:
huangxinjing 2023-03-05 02:20:43 +00:00 committed by Gitee
commit fc7346ffad
No known key found for this signature in database
GPG Key ID: 173E9B9CA92EEF8F
2 changed files with 0 additions and 314 deletions

View File

@ -1,97 +0,0 @@
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
import mindspore.context as context
import mindspore.nn as nn
from mindspore import Tensor
from mindspore.ops.composite import GradOperation
from mindspore.ops import operations as P
from mindspore.ops.operations import nn_ops as NN
class Net(nn.Cell):
def __init__(self, out_channel, kernel_size, pad, stride, dilation):
super(Net, self).__init__()
self.net = NN.DeformableOffsets(ksize=(kernel_size, kernel_size),
pads=(pad, pad, pad, pad),
strides=(stride, stride, stride, stride),
dilations=(dilation, dilation, dilation, dilation),
deformable_groups=1,
modulated=True,
data_format="NCHW")
self.conv = P.Conv2D(out_channel,
kernel_size,
mode=1,
pad_mode="pad",
pad=pad,
stride=kernel_size,
dilation=1,
group=1,
data_format="NCHW")
def construct(self, x, w, offset):
x = self.net(x, offset)
return self.conv(x, w)
class Grad(nn.Cell):
def __init__(self, network):
super(Grad, self).__init__()
self.grad = GradOperation(get_all=True, sens_param=True)
self.network = network
def construct(self, x, w, offset, output_grad):
return self.grad(self.network)(x, w, offset, output_grad)
@pytest.mark.level0
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.env_onecard
def test_deformable_conv2d_grad():
""""
Feature: deformable_conv2d_grad function
Description: Test case for simplest deformable_conv2d_grad
Expectation: The results are as expected
"""
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend", save_graphs=True)
kernel_size = 2
stride = 1
pad = 0
dilation = 1
# x shape [1, 64, 2, 2]
x = Tensor(np.ones([1, 64, 2, 2]).astype(np.float32) * 0.1)
# weight shape [1, 64, 2, 2]
weight = Tensor(np.ones([1, 64, 2, 2]).astype(np.float32) * 0.1)
# offsets shape [1, 12, 1, 1]
offsets = Tensor(np.ones([1, 12, 1, 1]).astype(np.float32) * 0.1)
# out_channel, kernel_size, pad, stride, dilation
dfm_conv2d_net = Net(1, kernel_size, pad, stride, dilation)
out = dfm_conv2d_net(x, weight, offsets)
grad_net = Grad(dfm_conv2d_net)
grad_output = grad_net(x, weight, offsets, out)
expect_out = np.array([[[[0.2310471]]]]).astype(np.float32)
expect_grad_x = np.array([[[[0.00187125, 0.00207916], [0.00207916, 0.00231018]]] * 64]).astype(np.float32)
expect_grad_weight = np.array([[[[0.00231128, 0.00208033], [0.00208033, 0.0018723]]] * 64]).astype((np.float32))
expect_grad_offset = np.array([[[0]], [[-0.01478]], [[0]], [[-0.01331]],
[[0]], [[0]], [[-0.01478]], [[-0.01331]],
[[0.14785]], [[0.13307]], [[0.13307]], [[0.11976]]]).astype((np.float32))
assert np.allclose(out.asnumpy(), expect_out, 0.0001, 0.0001)
assert np.allclose(grad_output[0].asnumpy(), expect_grad_x, 0.0001, 0.0001)
assert np.allclose(grad_output[1].asnumpy(), expect_grad_weight, 0.0001, 0.0001)
assert np.allclose(grad_output[2].asnumpy(), expect_grad_offset, 0.0001, 0.0001)

View File

@ -1,217 +0,0 @@
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import pytest
import numpy as np
from mindspore.ops.operations import _grad_ops as G
from mindspore.ops import composite as C
from mindspore import nn
from mindspore import Tensor
from mindspore import dtype
from mindspore.ops.operations import nn_ops
from mindspore.ops import functional as F
grad_all = C.GradOperation(get_all=True)
class TestNetwork(nn.Cell):
def __init__(self):
super(TestNetwork, self).__init__()
stride = (1, 1, 1, 1)
pad = (0, 0, 0, 0)
ksize = (2, 2)
self.deformable_offsets_grad_op = G.DeformableOffsetsGrad(stride, pad, ksize)
def construct(self, dout, x, offsets):
output = self.deformable_offsets_grad_op(dout, x, offsets)
return output
def test_grad_infer():
"""
Feature: CPU operation.
Description: Test of CPU operation: DeformableOffsetsGrad
Expectation: No exception raised.
"""
dout = Tensor(np.ones([1, 1, 2, 2]), dtype.float32)
x = Tensor(np.ones([1, 1, 2, 2]), dtype.float32)
offsets = Tensor(np.array([0.1] * 12).astype(np.float32).reshape([1, 12, 1, 1]))
net = TestNetwork()
grad = net(dout, x, offsets)
print("grad_x:", grad[0])
print("grad_offset:", grad[1])
return grad
class ForwardNet(nn.Cell):
def __init__(self):
super(ForwardNet, self).__init__()
stride = (1, 1, 1, 1)
pad = (0, 0, 0, 0)
ksize = (2, 2)
self.deformable_offsets_grad_op = nn_ops.DeformableOffsets(stride, pad, ksize)
def construct(self, x, offsets):
output = self.deformable_offsets_grad_op(x, offsets)
return output
class BackwardNet(nn.Cell):
def __init__(self, net):
super(BackwardNet, self).__init__()
self.net = net
def construct(self, *inputs):
out = self.net(*inputs)
return out, grad_all(self.net)(*inputs)
def test_auto_diff():
"""
Feature: CPU operation.
Description: Test of CPU operation: DeformableOffsetsGrad by auto diff.
Expectation: No exception raised.
"""
x = Tensor(np.ones([1, 1, 2, 2]), dtype.float32)
offsets = Tensor(np.array([0.1] * 12).astype(np.float32).reshape([1, 12, 1, 1]))
forward_net = ForwardNet()
net = BackwardNet(forward_net)
grad = net(x, offsets)
print("grad_x:", grad[0])
print("grad_offset:", grad[1])
return grad
class NetDeformableOffsetsGrad(nn.Cell):
def __init__(self, data_format):
super(NetDeformableOffsetsGrad, self).__init__()
strides = (1, 1, 1, 1)
pads = (0, 0, 0, 0)
ksize = (3, 3)
self.grad_op = G.DeformableOffsetsGrad(strides, pads, ksize, data_format=data_format)
def construct(self, grad, input_x, offsets):
return self.grad_op(grad, input_x, offsets)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
@pytest.mark.parametrize('data_type', [np.float16, np.float32])
def test_deformable_offsets_grad_nchw(data_type):
"""
Feature: DeformableOffsetsGrad cpu kernel
Description: test the rightness of DeformableOffsetsGrad gpu kernel
Expectation: the output is same as expected result
"""
net = NetDeformableOffsetsGrad(data_format="NCHW")
dout = Tensor(np.ones([1, 2, 3, 3]).astype(data_type))
x = Tensor(np.ones([1, 2, 4, 4]).astype(data_type))
offsets = Tensor(np.ones([1, 27, 1, 1]).astype(data_type) * 0.1)
output = net(dout, x, offsets)
expect_grad_x = np.array([[[0.081, 0.09, 0.09, 0.009],
[0.09, 0.1, 0.1, 0.01],
[0.09, 0.1, 0.1, 0.01],
[0.009, 0.01, 0.01, 0.001]],
[[0.081, 0.09, 0.09, 0.009],
[0.09, 0.1, 0.1, 0.01],
[0.09, 0.1, 0.1, 0.01],
[0.009, 0.01, 0.01, 0.001]]]
).astype(data_type)
expect_grad_offset = np.array([0] * 18 + [2.0] * 9).astype(data_type).reshape([1, 27, 1, 1])
rtol = 1e-5
if data_type == np.float16:
rtol = 1e-3
assert np.allclose(output[0].asnumpy(), expect_grad_x, rtol)
assert np.allclose(output[1].asnumpy(), expect_grad_offset, rtol)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
@pytest.mark.parametrize('data_type', [np.float16, np.float32])
def test_deformable_offsets_grad_nhwc(data_type):
"""
Feature: DeformableOffsetsGrad cpu kernel
Description: test the rightness of DeformableOffsetsGrad gpu kernel
Expectation: the output is same as expected result
"""
net = NetDeformableOffsetsGrad(data_format="NHWC")
dout = Tensor(np.ones([1, 3, 3, 2]).astype(data_type))
x = Tensor(np.ones([1, 4, 4, 2]).astype(data_type))
offsets = Tensor(np.ones([1, 1, 1, 27]).astype(data_type) * 0.1)
output = net(dout, x, offsets)
expect_grad_x = np.array([[[0.081, 0.081],
[0.09, 0.09],
[0.09, 0.09],
[0.009, 0.009]],
[[0.09, 0.09],
[0.1, 0.1],
[0.1, 0.1],
[0.01, 0.01]],
[[0.09, 0.09],
[0.1, 0.1],
[0.1, 0.1],
[0.01, 0.01]],
[[0.009, 0.009],
[0.01, 0.01],
[0.01, 0.01],
[0.001, 0.001]]
]
).astype(data_type)
expect_grad_offset = np.array([0] * 18 + [2.0] * 9).astype(data_type).reshape([1, 1, 1, 27])
rtol = 1e-5
if data_type == np.float16:
rtol = 1e-3
assert np.allclose(output[0].asnumpy(), expect_grad_x, rtol)
assert np.allclose(output[1].asnumpy(), expect_grad_offset, rtol)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_vmap():
""""
Feature: Feature: DeformableOffsetsGrad cpu kernel
Description: Test case with vmap.
Expectation: The results are as expected.
"""
def cal_deformable_offsets_grad(dout, x, offsets):
net = NetDeformableOffsetsGrad(data_format="NCHW")
return net(dout, x, offsets)
dout = Tensor(np.arange(2 * 1 * 2 * 3 * 3).reshape(2, 1, 2, 3, 3), dtype.float32)
x = Tensor(np.arange(2 * 1 * 2 * 4 * 4).reshape(2, 1, 2, 4, 4), dtype.float32)
offsets = Tensor(np.arange(2 * 1 * 27 * 1 * 1).reshape(2, 1, 27, 1, 1) * 0.1, dtype.float32)
vmap_deformable_offset_grad = F.vmap(cal_deformable_offsets_grad, in_axes=(0, 0, 0), out_axes=0)
out1 = vmap_deformable_offset_grad(dout, x, offsets)
def manually_batched(dout, x, offsets):
output_dx = []
output_d_offsets = []
for i in range(x.shape[0]):
dx, d_offsets = cal_deformable_offsets_grad(dout[i], x[i], offsets[i])
output_dx.append(dx)
output_d_offsets.append(d_offsets)
return F.stack(output_dx), F.stack(output_d_offsets)
out2 = manually_batched(dout, x, offsets)
assert np.allclose(out1[0].asnumpy(), out2[0].asnumpy())
assert np.allclose(out1[1].asnumpy(), out2[1].asnumpy())