From 8621c032d9bffe5d9c1c55b3924efc51fc1cd59a Mon Sep 17 00:00:00 2001 From: yanzhenxiang2020 Date: Tue, 2 Jun 2020 20:40:58 +0800 Subject: [PATCH] add pack op for aicpu --- .../kernel/aicpu/aicpu_kernel_metadata.cc | 4 +- mindspore/ccsrc/kernel/aicpu/aicpu_util.h | 1 + mindspore/ops/_op_impl/aicpu/__init__.py | 1 + mindspore/ops/_op_impl/aicpu/pack.py | 41 ++++ mindspore/ops/_op_impl/aicpu/topk.py | 1 + .../st/ops/ascend/test_aicpu_ops/test_pack.py | 176 ++++++++++++++++++ 6 files changed, 222 insertions(+), 2 deletions(-) create mode 100644 mindspore/ops/_op_impl/aicpu/pack.py create mode 100644 tests/st/ops/ascend/test_aicpu_ops/test_pack.py diff --git a/mindspore/ccsrc/kernel/aicpu/aicpu_kernel_metadata.cc b/mindspore/ccsrc/kernel/aicpu/aicpu_kernel_metadata.cc index e8636ffa2e3..3670a2d76f7 100644 --- a/mindspore/ccsrc/kernel/aicpu/aicpu_kernel_metadata.cc +++ b/mindspore/ccsrc/kernel/aicpu/aicpu_kernel_metadata.cc @@ -38,10 +38,10 @@ void AicpuMetadataInfo(const CNodePtr &kernel_node, std::vector inputs_format{}; std::vector inputs_type{}; - if (op_name == kPrint) { + if (op_name == kPrint || op_name == kPack) { for (size_t input_index = 0; input_index < AnfAlgo::GetInputTensorNum(kernel_node); ++input_index) { inputs_format.emplace_back(kOpFormat_DEFAULT); inputs_type.push_back(AnfAlgo::GetPrevNodeOutputInferDataType(kernel_node, input_index)); diff --git a/mindspore/ccsrc/kernel/aicpu/aicpu_util.h b/mindspore/ccsrc/kernel/aicpu/aicpu_util.h index cfb8fefcb37..bf8025de2cd 100644 --- a/mindspore/ccsrc/kernel/aicpu/aicpu_util.h +++ b/mindspore/ccsrc/kernel/aicpu/aicpu_util.h @@ -28,6 +28,7 @@ constexpr auto kInitDataSetQueue = "InitDataSetQueue"; constexpr auto kInitData = "InitData"; constexpr auto kGetNext = "GetNext"; constexpr auto kPrint = "Print"; +constexpr auto kPack = "Pack"; constexpr auto kOutputTypes = "output_types"; constexpr auto kOutputShapes = "output_shapes"; constexpr auto kChannelName = "channel_name"; diff --git a/mindspore/ops/_op_impl/aicpu/__init__.py b/mindspore/ops/_op_impl/aicpu/__init__.py index 37d008940d9..475200e7ef8 100644 --- a/mindspore/ops/_op_impl/aicpu/__init__.py +++ b/mindspore/ops/_op_impl/aicpu/__init__.py @@ -24,3 +24,4 @@ from .flatten import _flatten_aicpu from .squeeze import _squeeze_aicpu from .expand_dims import _expand_dims_aicpu from .random_choice_with_mask import _random_choice_with_mask_aicpu +from .pack import _pack_aicpu diff --git a/mindspore/ops/_op_impl/aicpu/pack.py b/mindspore/ops/_op_impl/aicpu/pack.py new file mode 100644 index 00000000000..179651d884e --- /dev/null +++ b/mindspore/ops/_op_impl/aicpu/pack.py @@ -0,0 +1,41 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +"""Pack op""" +from mindspore.ops.op_info_register import op_info_register, AiCPURegOp, DataType + +pack_op_info = AiCPURegOp("Pack") \ + .fusion_type("OPAQUE") \ + .attr("axis", "int") \ + .input(0, "x", "dynamic") \ + .output(0, "y", "required") \ + .dtype_format(DataType.I8_Default, DataType.I8_Default) \ + .dtype_format(DataType.I16_Default, DataType.I16_Default) \ + .dtype_format(DataType.I32_Default, DataType.I32_Default) \ + .dtype_format(DataType.I64_Default, DataType.I64_Default) \ + .dtype_format(DataType.U8_Default, DataType.U8_Default) \ + .dtype_format(DataType.U16_Default, DataType.U16_Default) \ + .dtype_format(DataType.U32_Default, DataType.U32_Default) \ + .dtype_format(DataType.U64_Default, DataType.U64_Default) \ + .dtype_format(DataType.F16_Default, DataType.F16_Default) \ + .dtype_format(DataType.F32_Default, DataType.F32_Default) \ + .dtype_format(DataType.F64_Default, DataType.F64_Default) \ + .dtype_format(DataType.BOOL_Default, DataType.BOOL_Default) \ + .get_op_info() + +@op_info_register(pack_op_info) +def _pack_aicpu(): + """Pack AiCPU register""" + return diff --git a/mindspore/ops/_op_impl/aicpu/topk.py b/mindspore/ops/_op_impl/aicpu/topk.py index a68ae3557de..80cf1c5203f 100644 --- a/mindspore/ops/_op_impl/aicpu/topk.py +++ b/mindspore/ops/_op_impl/aicpu/topk.py @@ -24,6 +24,7 @@ top_k_op_info = AiCPURegOp("TopK") \ .output(0, "values", "required") \ .output(1, "indices", "required") \ .dtype_format(DataType.F16_Default, DataType.I32_Default, DataType.F16_Default, DataType.I32_Default) \ + .dtype_format(DataType.F32_Default, DataType.I32_Default, DataType.F32_Default, DataType.I32_Default) \ .dtype_format(DataType.I32_Default, DataType.I32_Default, DataType.I32_Default, DataType.I32_Default) \ .get_op_info() diff --git a/tests/st/ops/ascend/test_aicpu_ops/test_pack.py b/tests/st/ops/ascend/test_aicpu_ops/test_pack.py new file mode 100644 index 00000000000..750b9114373 --- /dev/null +++ b/tests/st/ops/ascend/test_aicpu_ops/test_pack.py @@ -0,0 +1,176 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +import numpy as np + +import mindspore.context as context +import mindspore.nn as nn +from mindspore import Tensor +from mindspore.ops import operations as P + +context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") + + +class Net(nn.Cell): + def __init__(self, x, axis): + super(Net, self).__init__() + self.pack = P.Pack(axis) + self.x = x + + def construct(self): + return self.pack(self.x) + + +def test_net_bool(): + x = np.random.randn(3, 5, 4) > 0 + y = np.random.randn(3, 5, 4) > 0 + axis = -1 + net = Net((Tensor(x), Tensor(y)), axis) + output = net() + print(x) + print(y) + print(output.asnumpy()) + assert np.array_equal(output.asnumpy(), np.stack([x, y], axis)) + + +def test_net_int8(): + x = np.random.randn(3, 5, 4).astype(np.int8) + y = np.random.randn(3, 5, 4).astype(np.int8) + axis = -1 + net = Net((Tensor(x), Tensor(y)), axis) + output = net() + print(x) + print(y) + print(output.asnumpy()) + assert np.array_equal(output.asnumpy(), np.stack([x, y], axis)) + + +def test_net_uint8(): + x = np.random.randn(3, 5, 4).astype(np.uint8) + y = np.random.randn(3, 5, 4).astype(np.uint8) + axis = -1 + net = Net((Tensor(x), Tensor(y)), axis) + output = net() + print(x) + print(y) + print(output.asnumpy()) + assert np.array_equal(output.asnumpy(), np.stack([x, y], axis)) + + +def test_net_int16(): + x = np.random.randn(3, 5, 4).astype(np.int16) + y = np.random.randn(3, 5, 4).astype(np.int16) + axis = -1 + net = Net((Tensor(x), Tensor(y)), axis) + output = net() + print(x) + print(y) + print(output.asnumpy()) + assert np.array_equal(output.asnumpy(), np.stack([x, y], axis)) + + +def test_net_uint16(): + x = np.random.randn(3, 5, 4).astype(np.uint16) + y = np.random.randn(3, 5, 4).astype(np.uint16) + axis = -1 + net = Net((Tensor(x), Tensor(y)), axis) + output = net() + print(x) + print(y) + print(output.asnumpy()) + assert np.array_equal(output.asnumpy(), np.stack([x, y], axis)) + + +def test_net_int32(): + x = np.random.randn(3, 5, 4).astype(np.int32) + y = np.random.randn(3, 5, 4).astype(np.int32) + axis = -1 + net = Net((Tensor(x), Tensor(y)), axis) + output = net() + print(x) + print(y) + print(output.asnumpy()) + assert np.array_equal(output.asnumpy(), np.stack([x, y], axis)) + + +def test_net_uint32(): + x = np.random.randn(3, 5, 4).astype(np.uint32) + y = np.random.randn(3, 5, 4).astype(np.uint32) + axis = -1 + net = Net((Tensor(x), Tensor(y)), axis) + output = net() + print(x) + print(y) + print(output.asnumpy()) + assert np.array_equal(output.asnumpy(), np.stack([x, y], axis)) + + +def test_net_int64(): + x = np.random.randn(3, 5, 4).astype(np.int64) + y = np.random.randn(3, 5, 4).astype(np.int64) + axis = -1 + net = Net((Tensor(x), Tensor(y)), axis) + output = net() + print(x) + print(y) + print(output.asnumpy()) + assert np.array_equal(output.asnumpy(), np.stack([x, y], axis)) + + +def test_net_uint64(): + x = np.random.randn(3, 5, 4).astype(np.uint64) + y = np.random.randn(3, 5, 4).astype(np.uint64) + axis = -1 + net = Net((Tensor(x), Tensor(y)), axis) + output = net() + print(x) + print(y) + print(output.asnumpy()) + assert np.array_equal(output.asnumpy(), np.stack([x, y], axis)) + + +def test_net_float16(): + x = np.random.randn(3, 5, 4).astype(np.float16) + y = np.random.randn(3, 5, 4).astype(np.float16) + axis = -1 + net = Net((Tensor(x), Tensor(y)), axis) + output = net() + print(x) + print(y) + print(output.asnumpy()) + assert np.array_equal(output.asnumpy(), np.stack([x, y], axis)) + + +def test_net_float32(): + x = np.random.randn(3, 5, 4).astype(np.float32) + y = np.random.randn(3, 5, 4).astype(np.float32) + axis = -1 + net = Net((Tensor(x), Tensor(y)), axis) + output = net() + print(x) + print(y) + print(output.asnumpy()) + assert np.array_equal(output.asnumpy(), np.stack([x, y], axis)) + + +def test_net_float64(): + x = np.random.randn(3, 5, 4).astype(np.float64) + y = np.random.randn(3, 5, 4).astype(np.float64) + axis = -1 + net = Net((Tensor(x), Tensor(y)), axis) + output = net() + print(x) + print(y) + print(output.asnumpy()) + assert np.array_equal(output.asnumpy(), np.stack([x, y], axis))