From 9d5f70d97a9f7c36be79591e7916dbce996355bc Mon Sep 17 00:00:00 2001 From: xiao_yao1994 Date: Mon, 29 Aug 2022 20:54:43 +0800 Subject: [PATCH] add op for ocr --- .jenkins/check/config/filter_pylint.txt | 1 + .../ccsrc/transform/graph_ir/op_adapter_map.h | 17 +++- .../transform/graph_ir/op_adapter_util.cc | 7 ++ .../graph_ir/op_declare/array_ops_declare.cc | 6 ++ .../graph_ir/op_declare/array_ops_declare.h | 3 + .../elewise_calculation_ops_declare.cc | 5 + .../elewise_calculation_ops_declare.h | 3 + .../op_declare/functional_ops_declare.cc | 13 +++ .../op_declare/functional_ops_declare.h | 5 + .../graph_ir/op_declare/image_ops_declare.cc | 8 ++ .../graph_ir/op_declare/image_ops_declare.h | 3 + .../matrix_calculation_ops_declare.cc | 4 +- .../graph_ir/op_declare/ocr_ops_declare.cc | 96 +++++++++++++++++++ .../graph_ir/op_declare/ocr_ops_declare.h | 53 ++++++++++ .../graph_ir/op_declare/reduce_ops_declare.cc | 6 ++ .../graph_ir/op_declare/reduce_ops_declare.h | 3 + .../graph_ir/op_declare/rnn_declare.cc | 26 +++++ .../graph_ir/op_declare/rnn_declare.h | 3 + .../op_declare/selection_ops_declare.cc | 12 +++ .../op_declare/selection_ops_declare.h | 6 ++ .../graph_ir/op_declare/string_ops_declare.cc | 11 ++- .../graph_ir/op_declare/string_ops_declare.h | 8 ++ .../op_declare/transformation_ops_declare.cc | 6 ++ .../op_declare/transformation_ops_declare.h | 2 + mindspore/core/ops/core_ops.h | 5 + mindspore/core/ops/scatter_elements.cc | 53 ++++++++++ mindspore/core/ops/scatter_elements.h | 48 ++++++++++ mindspore/core/ops/tensor_scatter_elements.h | 4 +- .../mindspore/ops/operations/_inner_ops.py | 55 +++++++++++ .../mindspore/ops/operations/_ocr_ops.py | 2 +- .../mindspore/ops/operations/array_ops.py | 9 +- 31 files changed, 476 insertions(+), 7 deletions(-) create mode 100644 mindspore/ccsrc/transform/graph_ir/op_declare/ocr_ops_declare.cc create mode 100644 mindspore/ccsrc/transform/graph_ir/op_declare/ocr_ops_declare.h create mode 100644 mindspore/core/ops/scatter_elements.cc create mode 100644 mindspore/core/ops/scatter_elements.h diff --git a/.jenkins/check/config/filter_pylint.txt b/.jenkins/check/config/filter_pylint.txt index ad1761e9470..a254799f6d1 100644 --- a/.jenkins/check/config/filter_pylint.txt +++ b/.jenkins/check/config/filter_pylint.txt @@ -19,6 +19,7 @@ "mindspore/mindspore/python/mindspore/ops/operations" "super-init-not-called" "mindspore/mindspore/python/mindspore/ops/operations/_quant_ops.py" "unused-import" "mindspore/mindspore/python/mindspore/ops/operations/nn_ops.py" "redefined-builtin" +"mindspore/mindspore/python/mindspore/ops/operations/_inner_ops.py" "dangerous-default-value" "mindspore/mindspore/python/mindspore/ops/operations/_thor_ops.py" "dangerous-default-value" "mindspore/mindspore/python/mindspore/ops/operations/_thor_ops.py" "redefined-outer-name" "mindspore/mindspore/python/mindspore/ops/operations/_thor_ops.py" "unused-import" diff --git a/mindspore/ccsrc/transform/graph_ir/op_adapter_map.h b/mindspore/ccsrc/transform/graph_ir/op_adapter_map.h index 3bca6443842..80983833d2f 100644 --- a/mindspore/ccsrc/transform/graph_ir/op_adapter_map.h +++ b/mindspore/ccsrc/transform/graph_ir/op_adapter_map.h @@ -101,6 +101,7 @@ constexpr const char kNameElu[] = "Elu"; constexpr const char kNameEluGrad[] = "EluGrad"; constexpr const char kNameTensorScatterUpdate[] = "TensorScatterUpdate"; constexpr const char kNameTensorScatterElements[] = "TensorScatterElements"; +constexpr const char kNameScatterElements[] = "ScatterElements"; constexpr const char kNameNonZero[] = "NonZero"; constexpr const char kNameNonZeroWithValue[] = "NonZeroWithValue"; constexpr const char kNameNonZeroWithValueShape[] = "NonZeroWithValueShape"; @@ -356,7 +357,6 @@ constexpr const char kNameWhile[] = "While"; constexpr const char kNameKMeansCentroids[] = "KMeansCentroids"; constexpr const char kNameIsNan[] = "IsNan"; constexpr const char kNameKLDiv[] = "KLDivLoss"; -constexpr const char kNameStringLength[] = "StringLength"; constexpr const char kNameGetShape[] = "GetShape"; constexpr const char kNameKlDivLossGrad[] = "KLDivLossGrad"; constexpr const char kNameRandomStandardNormal[] = "RandomStandardNormal"; @@ -364,6 +364,21 @@ constexpr const char kNameUnsortedSegmentSum[] = "UnsortedSegmentSum"; constexpr const char kNameSpaceToBatchTF[] = "SpaceToBatchTF"; constexpr const char kNameBatchToSpaceTF[] = "BatchToSpaceTF"; constexpr const char kNameMaskedSelect[] = "MaskedSelect"; +constexpr const char kNamePartitionedCall[] = "PartitionedCall"; +constexpr const char kNameRangeV2[] = "RangeV2"; +constexpr const char kNameOCRDetectionPreHandle[] = "OCRDetectionPreHandle"; +constexpr const char kNameOCRFindContours[] = "OCRFindContours"; +constexpr const char kNameBatchDilatePolys[] = "BatchDilatePolys"; +constexpr const char kNameResizeAndClipPolys[] = "ResizeAndClipPolys"; +constexpr const char kNameOCRDetectionPostHandle[] = "OCRDetectionPostHandle"; +constexpr const char kNameOCRIdentifyPreHandle[] = "OCRIdentifyPreHandle"; +constexpr const char kNameBatchEnqueue[] = "BatchEnqueue"; +constexpr const char kNameDequeue[] = "Dequeue"; +constexpr const char kNameOCRRecognitionPreHandle[] = "OCRRecognitionPreHandle"; +constexpr const char kNameStringUpper[] = "StringUpper"; +constexpr const char kNameStringLength[] = "StringLength"; +constexpr const char kNameDecodeImage[] = "DecodeImage"; +constexpr const char kNameDecodeBase64[] = "DecodeBase64"; class OpAdapterDesc; diff --git a/mindspore/ccsrc/transform/graph_ir/op_adapter_util.cc b/mindspore/ccsrc/transform/graph_ir/op_adapter_util.cc index 4fbeefac84b..3dcaf08e25c 100644 --- a/mindspore/ccsrc/transform/graph_ir/op_adapter_util.cc +++ b/mindspore/ccsrc/transform/graph_ir/op_adapter_util.cc @@ -189,6 +189,13 @@ GeTensor ConvertAnyUtil(const ValuePtr &value, const AnyTraits) { auto v = GetValue(value); desc.SetRealDimCnt(0); return GeTensor(desc, reinterpret_cast(&v), sizeof(int32_t)); + } else if (value->isa()) { + // convert scalar UInt to GeTensor + MS_LOG(INFO) << "Convert scalar to tensor with data type = UInt32"; + GeTensorDesc desc(GeShape(), ::ge::FORMAT_NCHW, ::ge::DT_UINT32); + auto v = GetValue(value); + desc.SetRealDimCnt(0); + return GeTensor(desc, reinterpret_cast(&v), sizeof(uint32_t)); } else if (value->isa()) { // convert scalar Int64 to GeTensor MS_LOG(INFO) << "convert scalar to tensor with data type = Int64"; diff --git a/mindspore/ccsrc/transform/graph_ir/op_declare/array_ops_declare.cc b/mindspore/ccsrc/transform/graph_ir/op_declare/array_ops_declare.cc index 8a86660e0db..14842f9a28a 100644 --- a/mindspore/ccsrc/transform/graph_ir/op_declare/array_ops_declare.cc +++ b/mindspore/ccsrc/transform/graph_ir/op_declare/array_ops_declare.cc @@ -85,6 +85,12 @@ ATTR_MAP(MirrorPadGrad) = {{"mode", ATTR_DESC(mode, AnyTraits())}}; OUTPUT_MAP(MirrorPadGrad) = {{0, OUTPUT_DESC(y)}}; REG_ADPT_DESC(MirrorPadGrad, kNameMirrorPadGrad, ADPT_DESC(MirrorPadGrad)) +// Expand +INPUT_MAP(Expand) = {{1, INPUT_DESC(x)}, {2, INPUT_DESC(shape)}}; +ATTR_MAP(Expand) = EMPTY_ATTR_MAP; +OUTPUT_MAP(Expand) = {{0, OUTPUT_DESC(y)}}; +REG_ADPT_DESC(Expand, "Expand", ADPT_DESC(Expand)) + // ExpandDims INPUT_MAP(ExpandDims) = {{1, INPUT_DESC(x)}, {2, INPUT_DESC(axis)}}; ATTR_MAP(ExpandDims) = EMPTY_ATTR_MAP; diff --git a/mindspore/ccsrc/transform/graph_ir/op_declare/array_ops_declare.h b/mindspore/ccsrc/transform/graph_ir/op_declare/array_ops_declare.h index 3e8d8b9b20d..c245560a95b 100644 --- a/mindspore/ccsrc/transform/graph_ir/op_declare/array_ops_declare.h +++ b/mindspore/ccsrc/transform/graph_ir/op_declare/array_ops_declare.h @@ -42,6 +42,9 @@ DECLARE_OP_USE_OUTPUT(MirrorPad) DECLARE_OP_ADAPTER(MirrorPadGrad) DECLARE_OP_USE_OUTPUT(MirrorPadGrad) +DECLARE_OP_ADAPTER(Expand) +DECLARE_OP_USE_OUTPUT(Expand) + DECLARE_OP_ADAPTER(ExpandDims) DECLARE_OP_USE_OUTPUT(ExpandDims) diff --git a/mindspore/ccsrc/transform/graph_ir/op_declare/elewise_calculation_ops_declare.cc b/mindspore/ccsrc/transform/graph_ir/op_declare/elewise_calculation_ops_declare.cc index f1a8959079c..1458f65bb18 100644 --- a/mindspore/ccsrc/transform/graph_ir/op_declare/elewise_calculation_ops_declare.cc +++ b/mindspore/ccsrc/transform/graph_ir/op_declare/elewise_calculation_ops_declare.cc @@ -20,6 +20,11 @@ #include namespace mindspore::transform { +INPUT_MAP(ClipByValue) = {{1, INPUT_DESC(x)}, {2, INPUT_DESC(clip_value_min)}, {3, INPUT_DESC(clip_value_max)}}; +ATTR_MAP(ClipByValue) = EMPTY_ATTR_MAP; +OUTPUT_MAP(ClipByValue) = {{0, OUTPUT_DESC(y)}}; +REG_ADPT_DESC(ClipByValue, "Clip", ADPT_DESC(ClipByValue)) + // Assign INPUT_MAP(Assign) = {{1, INPUT_DESC(ref)}, {2, INPUT_DESC(value)}}; ATTR_MAP(Assign) = EMPTY_ATTR_MAP; diff --git a/mindspore/ccsrc/transform/graph_ir/op_declare/elewise_calculation_ops_declare.h b/mindspore/ccsrc/transform/graph_ir/op_declare/elewise_calculation_ops_declare.h index e206e67054e..2accdd84526 100644 --- a/mindspore/ccsrc/transform/graph_ir/op_declare/elewise_calculation_ops_declare.h +++ b/mindspore/ccsrc/transform/graph_ir/op_declare/elewise_calculation_ops_declare.h @@ -22,6 +22,9 @@ #include "ops/elewise_calculation_ops.h" namespace mindspore::transform { +DECLARE_OP_ADAPTER(ClipByValue) +DECLARE_OP_USE_OUTPUT(ClipByValue) + DECLARE_OP_ADAPTER(AccumulateNV2) DECLARE_OP_USE_DYN_INPUT(AccumulateNV2) DECLARE_OP_USE_OUTPUT(AccumulateNV2) diff --git a/mindspore/ccsrc/transform/graph_ir/op_declare/functional_ops_declare.cc b/mindspore/ccsrc/transform/graph_ir/op_declare/functional_ops_declare.cc index 67122508fd1..5b89b2f4e39 100644 --- a/mindspore/ccsrc/transform/graph_ir/op_declare/functional_ops_declare.cc +++ b/mindspore/ccsrc/transform/graph_ir/op_declare/functional_ops_declare.cc @@ -15,6 +15,7 @@ */ #include "transform/graph_ir/op_declare/functional_ops_declare.h" +#include namespace mindspore::transform { // Case @@ -31,4 +32,16 @@ ATTR_MAP(While) = {{"parallel_iterations", ATTR_DESC(parallel_iterations, AnyTra DYN_OUTPUT_MAP(While) = {{0, DYN_OUTPUT_DESC(output)}}; SUBGRAPH_MAP(While) = {{0, SUBGRAPH_DESC(cond)}, {1, SUBGRAPH_DESC(body)}}; REG_ADPT_DESC(While, kNameWhile, ADPT_DESC(While)); + +// PartitionedCall +INPUT_MAP(PartitionedCall) = EMPTY_INPUT_MAP; +DYN_INPUT_MAP(PartitionedCall) = {{1, DYN_INPUT_DESC(args)}}; +ATTR_MAP(PartitionedCall) = { + {"config", ATTR_DESC(config, AnyTraits())}, + {"config_proto", ATTR_DESC(config_proto, AnyTraits())}, + {"executor_type", ATTR_DESC(executor_type, AnyTraits())}, +}; +DYN_OUTPUT_MAP(PartitionedCall) = {{0, DYN_OUTPUT_DESC(output)}}; +SUBGRAPH_MAP(PartitionedCall) = {{0, SUBGRAPH_DESC(f)}}; +REG_ADPT_DESC(PartitionedCall, kNamePartitionedCall, ADPT_DESC(PartitionedCall)) } // namespace mindspore::transform diff --git a/mindspore/ccsrc/transform/graph_ir/op_declare/functional_ops_declare.h b/mindspore/ccsrc/transform/graph_ir/op_declare/functional_ops_declare.h index 84b2b202182..cc0525e64ca 100644 --- a/mindspore/ccsrc/transform/graph_ir/op_declare/functional_ops_declare.h +++ b/mindspore/ccsrc/transform/graph_ir/op_declare/functional_ops_declare.h @@ -32,5 +32,10 @@ DECLARE_OP_ATTR(While) DECLARE_OP_USE_DYN_INPUT(While) DECLARE_OP_USE_SUBGRAPH(While) DECLARE_OP_USE_DYN_OUTPUT(While) + +DECLARE_OP_ADAPTER(PartitionedCall) +DECLARE_OP_USE_DYN_INPUT(PartitionedCall) +DECLARE_OP_USE_SUBGRAPH(PartitionedCall) +DECLARE_OP_USE_DYN_OUTPUT(PartitionedCall) } // namespace mindspore::transform #endif // MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_DECLARE_FUNCTIONAL_OPS_DECLARE_H_ diff --git a/mindspore/ccsrc/transform/graph_ir/op_declare/image_ops_declare.cc b/mindspore/ccsrc/transform/graph_ir/op_declare/image_ops_declare.cc index 6c9fd06ca66..e260df0236e 100644 --- a/mindspore/ccsrc/transform/graph_ir/op_declare/image_ops_declare.cc +++ b/mindspore/ccsrc/transform/graph_ir/op_declare/image_ops_declare.cc @@ -60,4 +60,12 @@ ATTR_MAP(CropAndResize) = {{"extrapolation_value", ATTR_DESC(extrapolation_value {"method", ATTR_DESC(method, AnyTraits())}}; OUTPUT_MAP(CropAndResize) = {{0, OUTPUT_DESC(y)}}; REG_ADPT_DESC(CropAndResize, kNameCropAndResize, ADPT_DESC(CropAndResize)) + +// DecodeImage +INPUT_MAP(DecodeImage) = {{1, INPUT_DESC(contents)}}; +ATTR_MAP(DecodeImage) = {{"channels", ATTR_DESC(channels, AnyTraits())}, + {"dtype", ATTR_DESC(dtype, AnyTraits())}, + {"expand_animations", ATTR_DESC(expand_animations, AnyTraits())}}; +OUTPUT_MAP(DecodeImage) = {{0, OUTPUT_DESC(image)}}; +REG_ADPT_DESC(DecodeImage, kNameDecodeImage, ADPT_DESC(DecodeImage)) } // namespace mindspore::transform diff --git a/mindspore/ccsrc/transform/graph_ir/op_declare/image_ops_declare.h b/mindspore/ccsrc/transform/graph_ir/op_declare/image_ops_declare.h index ae4c841e76a..a46c9e40422 100644 --- a/mindspore/ccsrc/transform/graph_ir/op_declare/image_ops_declare.h +++ b/mindspore/ccsrc/transform/graph_ir/op_declare/image_ops_declare.h @@ -39,5 +39,8 @@ DECLARE_OP_USE_OUTPUT(ResizeBilinearV2Grad) DECLARE_OP_ADAPTER(CropAndResize) DECLARE_OP_USE_OUTPUT(CropAndResize) + +DECLARE_OP_ADAPTER(DecodeImage) +DECLARE_OP_USE_OUTPUT(DecodeImage) } // namespace mindspore::transform #endif // MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_DECLARE_IMAGE_OPS_DECLARE_H_ diff --git a/mindspore/ccsrc/transform/graph_ir/op_declare/matrix_calculation_ops_declare.cc b/mindspore/ccsrc/transform/graph_ir/op_declare/matrix_calculation_ops_declare.cc index 45a56f49105..37e16d2962d 100644 --- a/mindspore/ccsrc/transform/graph_ir/op_declare/matrix_calculation_ops_declare.cc +++ b/mindspore/ccsrc/transform/graph_ir/op_declare/matrix_calculation_ops_declare.cc @@ -96,6 +96,7 @@ ATTR_MAP(MatMulV2) = {{"transpose_a", ATTR_DESC(transpose_x1, AnyTraits()) {"transpose_b", ATTR_DESC(transpose_x2, AnyTraits())}}; OUTPUT_MAP(MatMulV2) = {{0, OUTPUT_DESC(y)}}; REG_ADPT_DESC(MatMulV2, prim::kPrimMatMul->name(), ADPT_DESC(MatMulV2)) +REG_ADPT_DESC(MatMulV2Duplicate, prim::kPrimMatMulV2->name(), ADPT_DESC(MatMulV2)) // MatrixDiag INPUT_MAP(MatrixDiag) = {{1, INPUT_DESC(x)}}; @@ -145,7 +146,8 @@ REG_ADPT_DESC(L2Loss, kNameL2Loss, ADPT_DESC(L2Loss)) INPUT_MAP(ScatterElements) = {{1, INPUT_DESC(data)}, {2, INPUT_DESC(indices)}, {3, INPUT_DESC(updates)}}; ATTR_MAP(ScatterElements) = {{"axis", ATTR_DESC(axis, AnyTraits())}}; OUTPUT_MAP(ScatterElements) = {{0, OUTPUT_DESC(y)}}; -REG_ADPT_DESC(ScatterElements, kNameTensorScatterElements, ADPT_DESC(ScatterElements)) +REG_ADPT_DESC(TensorScatterElements, kNameTensorScatterElements, ADPT_DESC(ScatterElements)) +REG_ADPT_DESC(ScatterElements, kNameScatterElements, ADPT_DESC(ScatterElements)) // FullyConnection INPUT_MAP(FullyConnection) = {{1, INPUT_DESC(x)}, {2, INPUT_DESC(w)}, {3, INPUT_DESC(b)}, {4, INPUT_DESC(offset_w)}}; diff --git a/mindspore/ccsrc/transform/graph_ir/op_declare/ocr_ops_declare.cc b/mindspore/ccsrc/transform/graph_ir/op_declare/ocr_ops_declare.cc new file mode 100644 index 00000000000..cbb8d747e5f --- /dev/null +++ b/mindspore/ccsrc/transform/graph_ir/op_declare/ocr_ops_declare.cc @@ -0,0 +1,96 @@ +/** + * Copyright 2022 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "transform/graph_ir/op_declare/ocr_ops_declare.h" +#include + +namespace mindspore::transform { +INPUT_MAP(OCRDetectionPreHandle) = {{1, INPUT_DESC(img)}}; +ATTR_MAP(OCRDetectionPreHandle) = {{"format", ATTR_DESC(data_format, AnyTraits())}}; +OUTPUT_MAP(OCRDetectionPreHandle) = { + {0, OUTPUT_DESC(resized_img)}, {1, OUTPUT_DESC(h_scale)}, {2, OUTPUT_DESC(w_scale)}}; +REG_ADPT_DESC(OCRDetectionPreHandle, kNameOCRDetectionPreHandle, ADPT_DESC(OCRDetectionPreHandle)) + +INPUT_MAP(OCRFindContours) = {{1, INPUT_DESC(img)}}; +ATTR_MAP(OCRFindContours) = {{"value_mode", ATTR_DESC(value_mode, AnyTraits())}}; +OUTPUT_MAP(OCRFindContours) = { + {0, OUTPUT_DESC(polys_data)}, {1, OUTPUT_DESC(polys_offset)}, {2, OUTPUT_DESC(polys_size)}}; +REG_ADPT_DESC(OCRFindContours, kNameOCRFindContours, ADPT_DESC(OCRFindContours)) + +INPUT_MAP(BatchDilatePolys) = {{1, INPUT_DESC(polys_data)}, {2, INPUT_DESC(polys_offset)}, + {3, INPUT_DESC(polys_size)}, {4, INPUT_DESC(score)}, + {5, INPUT_DESC(min_border)}, {6, INPUT_DESC(min_area_thr)}, + {7, INPUT_DESC(score_thr)}, {8, INPUT_DESC(expands_cale)}}; +ATTR_MAP(BatchDilatePolys) = EMPTY_ATTR_MAP; +OUTPUT_MAP(BatchDilatePolys) = { + {0, OUTPUT_DESC(dilated_polys_data)}, {1, OUTPUT_DESC(dilated_polys_offset)}, {2, OUTPUT_DESC(dilated_polys_size)}}; +REG_ADPT_DESC(BatchDilatePolys, kNameBatchDilatePolys, ADPT_DESC(BatchDilatePolys)) + +INPUT_MAP(ResizeAndClipPolys) = { + {1, INPUT_DESC(polys_data)}, {2, INPUT_DESC(polys_offset)}, {3, INPUT_DESC(polys_size)}, {4, INPUT_DESC(h_scale)}, + {5, INPUT_DESC(w_scale)}, {6, INPUT_DESC(img_h)}, {7, INPUT_DESC(img_w)}}; +ATTR_MAP(ResizeAndClipPolys) = EMPTY_ATTR_MAP; +OUTPUT_MAP(ResizeAndClipPolys) = {{0, OUTPUT_DESC(clipped_polys_data)}, + {1, OUTPUT_DESC(clipped_polys_offset)}, + {2, OUTPUT_DESC(clipped_polys_size)}, + {3, OUTPUT_DESC(clipped_polys_num)}}; +REG_ADPT_DESC(ResizeAndClipPolys, kNameResizeAndClipPolys, ADPT_DESC(ResizeAndClipPolys)) + +INPUT_MAP(OCRDetectionPostHandle) = { + {1, INPUT_DESC(img)}, {2, INPUT_DESC(polys_data)}, {3, INPUT_DESC(polys_offset)}, {4, INPUT_DESC(polys_size)}}; +ATTR_MAP(OCRDetectionPostHandle) = {{"format", ATTR_DESC(data_format, AnyTraits())}}; +OUTPUT_MAP(OCRDetectionPostHandle) = {{0, OUTPUT_DESC(imgs_data)}, + {1, OUTPUT_DESC(imgs_offset)}, + {2, OUTPUT_DESC(imgs_size)}, + {3, OUTPUT_DESC(rect_points)}}; +REG_ADPT_DESC(OCRDetectionPostHandle, kNameOCRDetectionPostHandle, ADPT_DESC(OCRDetectionPostHandle)) + +INPUT_MAP(OCRIdentifyPreHandle) = { + {1, INPUT_DESC(imgs_data)}, {2, INPUT_DESC(imgs_offset)}, {3, INPUT_DESC(imgs_size)}}; +ATTR_MAP(OCRIdentifyPreHandle) = {{"size", ATTR_DESC(size, AnyTraits>())}, + {"format", ATTR_DESC(data_format, AnyTraits())}}; +OUTPUT_MAP(OCRIdentifyPreHandle) = {{0, OUTPUT_DESC(resized_imgs)}}; +REG_ADPT_DESC(OCRIdentifyPreHandle, kNameOCRIdentifyPreHandle, ADPT_DESC(OCRIdentifyPreHandle)) + +INPUT_MAP(BatchEnqueue) = {{1, INPUT_DESC(x)}, {2, INPUT_DESC(queue_id)}}; +ATTR_MAP(BatchEnqueue) = {{"batch_size", ATTR_DESC(batch_size, AnyTraits())}, + {"queue_name", ATTR_DESC(queue_name, AnyTraits())}, + {"queue_depth", ATTR_DESC(queue_depth, AnyTraits())}, + {"pad_mode", ATTR_DESC(pad_mode, AnyTraits())}}; +OUTPUT_MAP(BatchEnqueue) = {{0, OUTPUT_DESC(enqueue_count)}}; +REG_ADPT_DESC(BatchEnqueue, kNameBatchEnqueue, ADPT_DESC(BatchEnqueue)) + +INPUT_MAP(Dequeue) = {{1, INPUT_DESC(queue_id)}}; +ATTR_MAP(Dequeue) = {{"output_type", ATTR_DESC(output_type, AnyTraits())}, + {"output_shape", ATTR_DESC(output_shape, AnyTraits>())}, + {"queue_name", ATTR_DESC(queue_name, AnyTraits())}}; +OUTPUT_MAP(Dequeue) = {{0, OUTPUT_DESC(data)}}; +REG_ADPT_DESC(Dequeue, kNameDequeue, ADPT_DESC(Dequeue)) + +INPUT_MAP(OCRRecognitionPreHandle) = {{1, INPUT_DESC(imgs_data)}, + {2, INPUT_DESC(imgs_offset)}, + {3, INPUT_DESC(imgs_size)}, + {4, INPUT_DESC(langs)}, + {5, INPUT_DESC(langs_score)}}; +ATTR_MAP(OCRRecognitionPreHandle) = {{"batch_size", ATTR_DESC(batch_size, AnyTraits())}, + {"format", ATTR_DESC(data_format, AnyTraits())}, + {"pad_mode", ATTR_DESC(pad_mode, AnyTraits())}}; +OUTPUT_MAP(OCRRecognitionPreHandle) = {{0, OUTPUT_DESC(imgs)}, + {1, OUTPUT_DESC(imgs_relation)}, + {2, OUTPUT_DESC(imgs_lang)}, + {3, OUTPUT_DESC(imgs_piece_fillers)}}; +REG_ADPT_DESC(OCRRecognitionPreHandle, kNameOCRRecognitionPreHandle, ADPT_DESC(OCRRecognitionPreHandle)) +} // namespace mindspore::transform diff --git a/mindspore/ccsrc/transform/graph_ir/op_declare/ocr_ops_declare.h b/mindspore/ccsrc/transform/graph_ir/op_declare/ocr_ops_declare.h new file mode 100644 index 00000000000..a14ffd5f8e0 --- /dev/null +++ b/mindspore/ccsrc/transform/graph_ir/op_declare/ocr_ops_declare.h @@ -0,0 +1,53 @@ +/** + * Copyright 2022 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_DECLARE_OCR_OPS_DECLARE_H_ +#define MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_DECLARE_OCR_OPS_DECLARE_H_ + +#include +#include +#include "transform/graph_ir/op_declare/op_declare_macro.h" +#include "ops/ocr_ops.h" + +namespace mindspore::transform { +DECLARE_OP_ADAPTER(OCRDetectionPreHandle) +DECLARE_OP_USE_OUTPUT(OCRDetectionPreHandle) + +DECLARE_OP_ADAPTER(OCRFindContours) +DECLARE_OP_USE_OUTPUT(OCRFindContours) + +DECLARE_OP_ADAPTER(BatchDilatePolys) +DECLARE_OP_USE_OUTPUT(BatchDilatePolys) + +DECLARE_OP_ADAPTER(ResizeAndClipPolys) +DECLARE_OP_USE_OUTPUT(ResizeAndClipPolys) + +DECLARE_OP_ADAPTER(OCRDetectionPostHandle) +DECLARE_OP_USE_OUTPUT(OCRDetectionPostHandle) + +DECLARE_OP_ADAPTER(OCRIdentifyPreHandle) +DECLARE_OP_USE_OUTPUT(OCRIdentifyPreHandle) + +DECLARE_OP_ADAPTER(BatchEnqueue) +DECLARE_OP_USE_OUTPUT(BatchEnqueue) + +DECLARE_OP_ADAPTER(Dequeue) +DECLARE_OP_USE_OUTPUT(Dequeue) + +DECLARE_OP_ADAPTER(OCRRecognitionPreHandle) +DECLARE_OP_USE_OUTPUT(OCRRecognitionPreHandle) +} // namespace mindspore::transform +#endif // MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_DECLARE_OCR_OPS_DECLARE_H_ diff --git a/mindspore/ccsrc/transform/graph_ir/op_declare/reduce_ops_declare.cc b/mindspore/ccsrc/transform/graph_ir/op_declare/reduce_ops_declare.cc index 0138c2193c6..25acc5aa9e7 100644 --- a/mindspore/ccsrc/transform/graph_ir/op_declare/reduce_ops_declare.cc +++ b/mindspore/ccsrc/transform/graph_ir/op_declare/reduce_ops_declare.cc @@ -89,6 +89,7 @@ INPUT_MAP(ReduceMean) = {{1, INPUT_DESC(x)}, {2, INPUT_DESC(axes)}}; ATTR_MAP(ReduceMean) = {{"keep_dims", ATTR_DESC(keep_dims, AnyTraits())}}; OUTPUT_MAP(ReduceMean) = {{0, OUTPUT_DESC(y)}}; REG_ADPT_DESC(ReduceMean, prim::kPrimReduceMean->name(), ADPT_DESC(ReduceMean)) +REG_ADPT_DESC(ReduceMeanV1, "ReduceMeanV1", ADPT_DESC(ReduceMean)) // ReduceMinD INPUT_MAP(ReduceMinD) = {{1, INPUT_DESC(x)}}; @@ -105,4 +106,9 @@ INPUT_ATTR_MAP(ReduceMaxD) = { ATTR_MAP(ReduceMaxD) = {{"keep_dims", ATTR_DESC(keep_dims, AnyTraits())}}; OUTPUT_MAP(ReduceMaxD) = {{0, OUTPUT_DESC(y)}}; REG_ADPT_DESC(ReduceMaxD, prim::kPrimReduceMax->name(), ADPT_DESC(ReduceMaxD)) + +INPUT_MAP(ReduceMax) = {{1, INPUT_DESC(x)}, {2, INPUT_DESC(axes)}}; +ATTR_MAP(ReduceMax) = {{"keep_dims", ATTR_DESC(keep_dims, AnyTraits())}}; +OUTPUT_MAP(ReduceMax) = {{0, OUTPUT_DESC(y)}}; +REG_ADPT_DESC(ReduceMax, "ReduceMaxV1", ADPT_DESC(ReduceMax)) } // namespace mindspore::transform diff --git a/mindspore/ccsrc/transform/graph_ir/op_declare/reduce_ops_declare.h b/mindspore/ccsrc/transform/graph_ir/op_declare/reduce_ops_declare.h index dd420a9a2e5..0bf6a655825 100644 --- a/mindspore/ccsrc/transform/graph_ir/op_declare/reduce_ops_declare.h +++ b/mindspore/ccsrc/transform/graph_ir/op_declare/reduce_ops_declare.h @@ -33,6 +33,9 @@ DECLARE_OP_ADAPTER(ReduceMaxD) DECLARE_OP_USE_INPUT_ATTR(ReduceMaxD) DECLARE_OP_USE_OUTPUT(ReduceMaxD) +DECLARE_OP_ADAPTER(ReduceMax) +DECLARE_OP_USE_OUTPUT(ReduceMax) + DECLARE_OP_ADAPTER(ReduceAllD) DECLARE_OP_USE_INPUT_ATTR(ReduceAllD) DECLARE_OP_USE_OUTPUT(ReduceAllD) diff --git a/mindspore/ccsrc/transform/graph_ir/op_declare/rnn_declare.cc b/mindspore/ccsrc/transform/graph_ir/op_declare/rnn_declare.cc index 5429ef6651c..a65b7278af7 100644 --- a/mindspore/ccsrc/transform/graph_ir/op_declare/rnn_declare.cc +++ b/mindspore/ccsrc/transform/graph_ir/op_declare/rnn_declare.cc @@ -16,6 +16,7 @@ #include "transform/graph_ir/op_declare/rnn_declare.h" #include +#include namespace mindspore::transform { // BasicLSTMCell @@ -139,4 +140,29 @@ ATTR_MAP(DynamicGRUV2Grad) = {{"direction", ATTR_DESC(direction, AnyTraits>())}, + {"activation_beta", ATTR_DESC(activation_beta, AnyTraits>())}, + {"activations", ATTR_DESC(activations, AnyTraits>())}, + {"clip", ATTR_DESC(clip, AnyTraits())}, + {"direction", ATTR_DESC(direction, AnyTraits())}, + {"hidden_size", ATTR_DESC(hidden_size, AnyTraits())}, + {"input_forget", ATTR_DESC(input_forget, AnyTraits())}, +}; +OUTPUT_MAP(CommonLSTM) = { + {0, OUTPUT_DESC(y)}, + {1, OUTPUT_DESC(y_h)}, + {2, OUTPUT_DESC(y_c)}, +}; +REG_ADPT_DESC(CommonLSTM, "CommonLSTM", ADPT_DESC(CommonLSTM)) } // namespace mindspore::transform diff --git a/mindspore/ccsrc/transform/graph_ir/op_declare/rnn_declare.h b/mindspore/ccsrc/transform/graph_ir/op_declare/rnn_declare.h index c634bb427b3..c815ed24570 100644 --- a/mindspore/ccsrc/transform/graph_ir/op_declare/rnn_declare.h +++ b/mindspore/ccsrc/transform/graph_ir/op_declare/rnn_declare.h @@ -48,5 +48,8 @@ DECLARE_OP_USE_OUTPUT(DynamicGRUV2) DECLARE_OP_ADAPTER(DynamicGRUV2Grad) DECLARE_OP_USE_OUTPUT(DynamicGRUV2Grad) + +DECLARE_OP_ADAPTER(CommonLSTM) +DECLARE_OP_USE_OUTPUT(CommonLSTM) } // namespace mindspore::transform #endif // MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_DECLARE_RNN_DECLARE_H_ diff --git a/mindspore/ccsrc/transform/graph_ir/op_declare/selection_ops_declare.cc b/mindspore/ccsrc/transform/graph_ir/op_declare/selection_ops_declare.cc index cf5a9cf1b5e..cdeacffa2c9 100644 --- a/mindspore/ccsrc/transform/graph_ir/op_declare/selection_ops_declare.cc +++ b/mindspore/ccsrc/transform/graph_ir/op_declare/selection_ops_declare.cc @@ -30,6 +30,7 @@ REG_ADPT_DESC(CumsumD, kNameCumSum, ADPT_DESC(CumsumD)) INPUT_MAP(GatherV2) = {{1, INPUT_DESC(x)}, {2, INPUT_DESC(indices)}, {3, INPUT_DESC(axis)}}; ATTR_MAP(GatherV2) = EMPTY_ATTR_MAP; OUTPUT_MAP(GatherV2) = {{0, OUTPUT_DESC(y)}}; +REG_ADPT_DESC(GatherV2, prim::kPrimGatherV2->name(), ADPT_DESC(GatherV2)) // CumprodD INPUT_MAP(CumprodD) = {{1, INPUT_DESC(x)}}; @@ -39,6 +40,11 @@ ATTR_MAP(CumprodD) = {{"exclusive", ATTR_DESC(exclusive, AnyTraits())}, OUTPUT_MAP(CumprodD) = {{0, OUTPUT_DESC(y)}}; REG_ADPT_DESC(CumprodD, kNameCumProd, ADPT_DESC(CumprodD)) +INPUT_MAP(Tile) = {{1, INPUT_DESC(x)}, {2, INPUT_DESC(multiples)}}; +ATTR_MAP(Tile) = EMPTY_ATTR_MAP; +OUTPUT_MAP(Tile) = {{0, OUTPUT_DESC(y)}}; +REG_ADPT_DESC(Tile, "TileV1", ADPT_DESC(Tile)) + INPUT_MAP(Slice) = {{1, INPUT_DESC(x)}, {2, INPUT_DESC(offsets)}, {3, INPUT_DESC(size)}}; ATTR_MAP(Slice) = EMPTY_ATTR_MAP; OUTPUT_MAP(Slice) = {{0, OUTPUT_DESC(y)}}; @@ -109,6 +115,12 @@ ATTR_MAP(RangeD) = {{"start", ATTR_DESC(start, AnyTraits())}, OUTPUT_MAP(RangeD) = {{0, OUTPUT_DESC(y)}}; REG_ADPT_DESC(RangeD, kNameRange, ADPT_DESC(RangeD)) +// RangeV2 +INPUT_MAP(Range) = {{1, INPUT_DESC(start)}, {2, INPUT_DESC(limit)}, {3, INPUT_DESC(delta)}}; +ATTR_MAP(Range) = EMPTY_ATTR_MAP; +OUTPUT_MAP(Range) = {{0, OUTPUT_DESC(y)}}; +REG_ADPT_DESC(RangeV2, kNameRangeV2, ADPT_DESC(Range)) + // InplaceAddD INPUT_MAP(InplaceAddD) = {{1, INPUT_DESC(x)}, {2, INPUT_DESC(v)}}; ATTR_MAP(InplaceAddD) = {{"indices", ATTR_DESC(indices, AnyTraits>())}}; diff --git a/mindspore/ccsrc/transform/graph_ir/op_declare/selection_ops_declare.h b/mindspore/ccsrc/transform/graph_ir/op_declare/selection_ops_declare.h index d5028ea0af1..f0ba41c5116 100644 --- a/mindspore/ccsrc/transform/graph_ir/op_declare/selection_ops_declare.h +++ b/mindspore/ccsrc/transform/graph_ir/op_declare/selection_ops_declare.h @@ -73,6 +73,9 @@ DECLARE_OP_ADAPTER(CumprodD) DECLARE_OP_USE_INPUT_ATTR(CumprodD) DECLARE_OP_USE_OUTPUT(CumprodD) +DECLARE_OP_ADAPTER(Tile) +DECLARE_OP_USE_OUTPUT(Tile) + DECLARE_OP_ADAPTER(TileD) DECLARE_OP_USE_INPUT_ATTR(TileD) DECLARE_OP_USE_OUTPUT(TileD) @@ -87,6 +90,9 @@ DECLARE_OP_USE_OUTPUT(GatherV2D) DECLARE_OP_ADAPTER(RangeD) DECLARE_OP_USE_OUTPUT(RangeD) +DECLARE_OP_ADAPTER(Range) +DECLARE_OP_USE_OUTPUT(Range) + DECLARE_OP_ADAPTER(InplaceAddD) DECLARE_OP_USE_OUTPUT(InplaceAddD) diff --git a/mindspore/ccsrc/transform/graph_ir/op_declare/string_ops_declare.cc b/mindspore/ccsrc/transform/graph_ir/op_declare/string_ops_declare.cc index b034d3d638f..ec0edd6c3a6 100644 --- a/mindspore/ccsrc/transform/graph_ir/op_declare/string_ops_declare.cc +++ b/mindspore/ccsrc/transform/graph_ir/op_declare/string_ops_declare.cc @@ -16,11 +16,20 @@ #include "transform/graph_ir/op_declare/string_ops_declare.h" #include -#include namespace mindspore::transform { +INPUT_MAP(StringUpper) = {{1, INPUT_DESC(input)}}; +ATTR_MAP(StringUpper) = {{"encoding", ATTR_DESC(encoding, AnyTraits())}}; +OUTPUT_MAP(StringUpper) = {{0, OUTPUT_DESC(output)}}; +REG_ADPT_DESC(StringUpper, kNameStringUpper, ADPT_DESC(StringUpper)) + INPUT_MAP(StringLength) = {{1, INPUT_DESC(x)}}; ATTR_MAP(StringLength) = {{"unit", ATTR_DESC(unit, AnyTraits())}}; OUTPUT_MAP(StringLength) = {{0, OUTPUT_DESC(y)}}; REG_ADPT_DESC(StringLength, kNameStringLength, ADPT_DESC(StringLength)) + +INPUT_MAP(DecodeBase64) = {{1, INPUT_DESC(x)}}; +ATTR_MAP(DecodeBase64) = EMPTY_ATTR_MAP; +OUTPUT_MAP(DecodeBase64) = {{0, OUTPUT_DESC(y)}}; +REG_ADPT_DESC(DecodeBase64, kNameDecodeBase64, ADPT_DESC(DecodeBase64)) } // namespace mindspore::transform diff --git a/mindspore/ccsrc/transform/graph_ir/op_declare/string_ops_declare.h b/mindspore/ccsrc/transform/graph_ir/op_declare/string_ops_declare.h index e989a9b4867..6b472cc9424 100644 --- a/mindspore/ccsrc/transform/graph_ir/op_declare/string_ops_declare.h +++ b/mindspore/ccsrc/transform/graph_ir/op_declare/string_ops_declare.h @@ -17,11 +17,19 @@ #ifndef MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_DECLARE_STRING_OPS_DECLARE_H_ #define MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_DECLARE_STRING_OPS_DECLARE_H_ +#include +#include #include "transform/graph_ir/op_declare/op_declare_macro.h" #include "ops/string_ops.h" namespace mindspore::transform { +DECLARE_OP_ADAPTER(StringUpper) +DECLARE_OP_USE_OUTPUT(StringUpper) + DECLARE_OP_ADAPTER(StringLength) DECLARE_OP_USE_OUTPUT(StringLength) + +DECLARE_OP_ADAPTER(DecodeBase64) +DECLARE_OP_USE_OUTPUT(DecodeBase64) } // namespace mindspore::transform #endif // MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_DECLARE_STRING_OPS_DECLARE_H_ diff --git a/mindspore/ccsrc/transform/graph_ir/op_declare/transformation_ops_declare.cc b/mindspore/ccsrc/transform/graph_ir/op_declare/transformation_ops_declare.cc index 538f6d9ca6e..b4085ac8ba3 100644 --- a/mindspore/ccsrc/transform/graph_ir/op_declare/transformation_ops_declare.cc +++ b/mindspore/ccsrc/transform/graph_ir/op_declare/transformation_ops_declare.cc @@ -48,6 +48,12 @@ ATTR_MAP(TransposeD) = EMPTY_ATTR_MAP; // Do not set Transpose operator output descriptor REG_ADPT_DESC(TransposeD, prim::kPrimTranspose->name(), ADPT_DESC(TransposeD)) +// Transpose +INPUT_MAP(Transpose) = {{1, INPUT_DESC(x)}, {2, INPUT_DESC(perm)}}; +ATTR_MAP(Transpose) = EMPTY_ATTR_MAP; +// Do not set Transpose operator output descriptor +REG_ADPT_DESC(Transpose, "TransposeV1", ADPT_DESC(Transpose)) + // SpaceToDepth INPUT_MAP(SpaceToDepth) = {{1, INPUT_DESC(x)}}; ATTR_MAP(SpaceToDepth) = {{"block_size", ATTR_DESC(block_size, AnyTraits())}}; diff --git a/mindspore/ccsrc/transform/graph_ir/op_declare/transformation_ops_declare.h b/mindspore/ccsrc/transform/graph_ir/op_declare/transformation_ops_declare.h index 4ab0ffee4ba..c7520eaec2b 100644 --- a/mindspore/ccsrc/transform/graph_ir/op_declare/transformation_ops_declare.h +++ b/mindspore/ccsrc/transform/graph_ir/op_declare/transformation_ops_declare.h @@ -31,6 +31,8 @@ DECLARE_OP_USE_DYN_OUTPUT(Unpack) DECLARE_OP_ADAPTER(TransposeD) DECLARE_OP_USE_INPUT_ATTR(TransposeD) +DECLARE_OP_ADAPTER(Transpose) + DECLARE_OP_ADAPTER(Flatten) DECLARE_OP_USE_OUTPUT(Flatten) diff --git a/mindspore/core/ops/core_ops.h b/mindspore/core/ops/core_ops.h index 840affa0562..bda10c3f71e 100644 --- a/mindspore/core/ops/core_ops.h +++ b/mindspore/core/ops/core_ops.h @@ -568,6 +568,7 @@ GVAR_DEF(PrimitivePtr, kPrimScatterNdMin, std::make_shared("ScatterNd GVAR_DEF(PrimitivePtr, kPrimScatterNdMul, std::make_shared("ScatterNdMul")); GVAR_DEF(PrimitivePtr, kPrimScatterNdDiv, std::make_shared("ScatterNdDiv")); GVAR_DEF(PrimitivePtr, kPrimScatterUpdate, std::make_shared("ScatterUpdate")); +GVAR_DEF(PrimitivePtr, kPrimScatterElements, std::make_shared("ScatterElements")); GVAR_DEF(PrimitivePtr, kPrimScatterAddWithAxis, std::make_shared(kScatterAddWithAxis)); GVAR_DEF(PrimitivePtr, kPrimTensorScatterElements, std::make_shared("TensorScatterElements")); GVAR_DEF(PrimitivePtr, kPrimTensorScatterUpdate, std::make_shared("TensorScatterUpdate")); @@ -1443,11 +1444,15 @@ GVAR_DEF(PrimitivePtr, kPrimTensorArrayStack, std::make_shared("Tenso GVAR_DEF(PrimitivePtr, kPrimTensorArray, std::make_shared("TensorArray")); GVAR_DEF(PrimitivePtr, kPrimTensorArrayWrite, std::make_shared("TensorArrayWrite")); GVAR_DEF(PrimitivePtr, kPrimTensorArrayGather, std::make_shared("TensorArrayGather")); +GVAR_DEF(PrimitivePtr, kPrimPartitionedCall, std::make_shared("PartitionedCall")); +GVAR_DEF(PrimitivePtr, kPrimDecodeImage, std::make_shared("DecodeImage")); +GVAR_DEF(PrimitivePtr, kPrimStridedSliceV2, std::make_shared("StridedSliceV2")); GVAR_DEF(PrimitivePtr, kPrimKMeansCentroids, std::make_shared("KMeansCentroids")); GVAR_DEF(PrimitivePtr, kPrimReservoirReplayBufferCreate, std::make_shared("ReservoirReplayBufferCreate")); GVAR_DEF(PrimitivePtr, kPrimReservoirReplayBufferPush, std::make_shared("ReservoirReplayBufferPush")); GVAR_DEF(PrimitivePtr, kPrimReservoirReplayBufferSample, std::make_shared("ReservoirReplayBufferSample")); GVAR_DEF(PrimitivePtr, kPrimReservoirReplayBufferDestroy, std::make_shared("ReservoirReplayBufferDestroy")); +GVAR_DEF(PrimitivePtr, kPrimOCRDetectionPreHandle, std::make_shared("OCRDetectionPreHandle")); // AdamApplyOne GVAR_DEF(PrimitivePtr, kPrimAdamApplyOne, std::make_shared("AdamApplyOne")); diff --git a/mindspore/core/ops/scatter_elements.cc b/mindspore/core/ops/scatter_elements.cc new file mode 100644 index 00000000000..5ec0cecc73e --- /dev/null +++ b/mindspore/core/ops/scatter_elements.cc @@ -0,0 +1,53 @@ +/** + * Copyright 2022 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops/scatter_elements.h" +#include +#include +#include "ops/op_utils.h" +#include "utils/check_convert_utils.h" +#include "abstract/ops/primitive_infer_map.h" +#include "mindapi/src/helper.h" + +namespace { +constexpr size_t kScatterElementsArgSize = 3; +} // namespace + +namespace mindspore { +namespace ops { +int64_t ScatterElements::get_axis() const { return axis_; } + +void ScatterElements::set_axis(const int64_t axis) { axis_ = axis; } + +AbstractBasePtr ScatterElementsInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const abstract::AbstractBasePtrList &args_spec_list) { + const std::string op_name = primitive->name(); + CheckRequiredArgsSize(op_name, args_spec_list, kScatterElementsArgSize); + auto x = abstract::CheckArg(op_name, args_spec_list, 0); + MS_EXCEPTION_IF_NULL(x); + MS_EXCEPTION_IF_NULL(x->shape()); + ShapeVector shape = x->shape()->shape(); + ShapeVector min_shape = x->shape()->min_shape(); + ShapeVector max_shape = x->shape()->max_shape(); + abstract::CheckMinMaxShape(shape, &min_shape, &max_shape); + return std::make_shared(x->element(), + std::make_shared(shape, min_shape, max_shape)); +} + +MIND_API_OPERATOR_IMPL(ScatterElements, BaseOperator); +REGISTER_PRIMITIVE_EVAL_IMPL(ScatterElements, prim::kPrimScatterElements, ScatterElementsInfer, nullptr, true); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/scatter_elements.h b/mindspore/core/ops/scatter_elements.h new file mode 100644 index 00000000000..fceaf665e74 --- /dev/null +++ b/mindspore/core/ops/scatter_elements.h @@ -0,0 +1,48 @@ +/** + * Copyright 2022 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_SCATTER_ELEMENTS_H_ +#define MINDSPORE_CORE_OPS_SCATTER_ELEMENTS_H_ +#include +#include + +#include "ops/base_operator.h" +#include "mindapi/base/types.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameScatterElements = "ScatterElements"; +class MIND_API ScatterElements : public BaseOperator { + public: + MIND_API_BASE_MEMBER(ScatterElements); + /// \brief Constructor. + ScatterElements() : BaseOperator(kNameScatterElements) { InitIOName({"indices", "update", "shape"}, {"output"}); } + /// \brief Init. + void Init() const {} + + void set_axis(const int64_t axis); + + int64_t get_axis() const; + + private: + int64_t axis_ = 0; +}; +abstract::AbstractBasePtr ScatterElementsInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_SCATTER_ELEMENTS_H_ diff --git a/mindspore/core/ops/tensor_scatter_elements.h b/mindspore/core/ops/tensor_scatter_elements.h index 99e579a9966..ad09bbf863a 100644 --- a/mindspore/core/ops/tensor_scatter_elements.h +++ b/mindspore/core/ops/tensor_scatter_elements.h @@ -25,14 +25,14 @@ namespace mindspore { namespace ops { -constexpr auto kNameScatterElements = "TensorScatterElements"; +constexpr auto kNameTensorScatterElements = "TensorScatterElements"; /// \brief Updates tensor values by using input indices and value. /// Refer to Python API @ref mindspore.ops.TensorScatterElements for more details. class MIND_API TensorScatterElements : public BaseOperator { public: MIND_API_BASE_MEMBER(TensorScatterElements); /// \brief Constructor. - TensorScatterElements() : BaseOperator(kNameScatterElements) { + TensorScatterElements() : BaseOperator(kNameTensorScatterElements) { InitIOName({"input_x", "indices", "update"}, {"output"}); } /// \brief Init. Refer to the parameters of Python API @ref mindspore.ops.TensorScatterElements for the inputs. diff --git a/mindspore/python/mindspore/ops/operations/_inner_ops.py b/mindspore/python/mindspore/ops/operations/_inner_ops.py index 975cd06d1a0..397d7304d0a 100755 --- a/mindspore/python/mindspore/ops/operations/_inner_ops.py +++ b/mindspore/python/mindspore/ops/operations/_inner_ops.py @@ -1521,6 +1521,34 @@ class NonZeroWithValueShape(Primitive): self.init_prim_io_names(inputs=['value', 'index', 'count'], outputs=['out_value', 'out_index']) +class DecodeImage(PrimitiveWithInfer): + """ + Returns image data that parse from string Tensor. + + Inputs: + - **x** (Tensor), a Tensor of type string. 0-D. The jPEG, GIF, PNG, BMP-encoded image. + + Outputs: + A Tensor of type uint8, uint16, float. + + Supported Platforms: + ``Ascend`` + + Examples: + """ + @prim_attr_register + def __init__(self, channels=0, dtype=mstype.uint8, expand_animations=False, _op_max_shape="8192,8192,3", + _op_max_size=[8000000]): + self.init_prim_io_names(inputs=["contents"], outputs=["image"]) + self.res_type = dtype + + def infer_shape(self, x): + return (-1, -1, 3) + + def infer_dtype(self, x): + return self.res_type + + class SliceGetItem(Primitive): """ using SliceGetItem to get slice's attribute of 'start' 'stop' 'step' @@ -1727,6 +1755,33 @@ class ParallelResizeBilinear(PrimitiveWithInfer): 'value': None} +class PartitionedCall(PrimitiveWithInfer): + """ + Pass the input tensors to the subgraph and return the output tensors. + + Inputs: + - **inputs** (Tuple), the input tensors, which will be passed to subgraph. + + Outputs: + - outputs(Tuple), the output tensor returned by subgraph. + + Supported Platforms: + ``Ascend`` + + Examples: + """ + @prim_attr_register + def __init__(self, graph, executor_type=""): + self.add_prim_attr("executor_type", executor_type) + self.graph = graph + + def infer_shape(self, *inputs): + return NotImplementedError + + def infer_dtype(self, *inputs): + return NotImplementedError + + class CellBackwardHook(PrimitiveWithInfer): r""" This operator is used to hook input gradient and output gradient of Cell object. diff --git a/mindspore/python/mindspore/ops/operations/_ocr_ops.py b/mindspore/python/mindspore/ops/operations/_ocr_ops.py index f41c25ad3f5..8903efc6388 100644 --- a/mindspore/python/mindspore/ops/operations/_ocr_ops.py +++ b/mindspore/python/mindspore/ops/operations/_ocr_ops.py @@ -35,7 +35,7 @@ class GetShape(PrimitiveWithInfer): self.init_prim_io_names(inputs=["x"], outputs=["y"]) def infer_shape(self, x): - return (x[0],) + return (len(x[0]),) def infer_dtype(self, x): return mstype.int32 diff --git a/mindspore/python/mindspore/ops/operations/array_ops.py b/mindspore/python/mindspore/ops/operations/array_ops.py index 7685ee0f728..a51479ff38f 100755 --- a/mindspore/python/mindspore/ops/operations/array_ops.py +++ b/mindspore/python/mindspore/ops/operations/array_ops.py @@ -900,6 +900,13 @@ class TensorShape(Primitive): self.init_prim_io_names(inputs=['input_x'], outputs=['output']) +class Unsqueeze(PrimitiveWithCheck): + @prim_attr_register + def __init__(self, axis): + self.init_prim_io_names(inputs=['x'], outputs=['y']) + self.axis = axis + + class DynamicShape(Primitive): """ Same as operator TensorShape. DynamicShape will be deprecated in the future. @@ -911,7 +918,7 @@ class DynamicShape(Primitive): @deprecated("1.7", "TensorShape", True) @prim_attr_register - def __init__(self): + def __init__(self, dtype=9): """init Shape""" self.init_prim_io_names(inputs=['tensor'], outputs=['output']) self.add_prim_attr('is_dynamic_shape', True)