From d9be0c102de16f75bde15c084148f2427c32277d Mon Sep 17 00:00:00 2001 From: jinyaohui Date: Wed, 3 Feb 2021 19:47:53 +0800 Subject: [PATCH] add some ops --- mindspore/ccsrc/CMakeLists.txt | 9 +- .../graph_kernel/graph_kernel_helper.cc | 2 +- .../ccsrc/backend/session/session_basic.cc | 6 +- mindspore/core/CMakeLists.txt | 20 +- mindspore/core/base/core_ops.h | 129 ++++- mindspore/core/c_ops/abs.cc | 19 - mindspore/core/c_ops/apply_momentum.cc | 51 -- mindspore/core/c_ops/audio_spectrogram.cc | 54 -- mindspore/core/c_ops/batch_norm.cc | 59 --- mindspore/core/c_ops/batch_norm_fold.cc | 21 - .../core/c_ops/binary_cross_entropy_grad.cc | 31 -- mindspore/core/c_ops/broadcast.cc | 42 -- mindspore/core/c_ops/ceil.cc | 21 - mindspore/core/c_ops/cos.cc | 21 - mindspore/core/c_ops/custom_predict.cc | 44 -- mindspore/core/c_ops/div.cc | 21 - mindspore/core/c_ops/equal.cc | 20 - mindspore/core/c_ops/exp.cc | 20 - .../c_ops/fake_quant_with_min_max_vars.cc | 44 -- mindspore/core/c_ops/fft_imag.cc | 22 - mindspore/core/c_ops/flatten_grad.cc | 21 - mindspore/core/c_ops/hashtable_lookup.cc | 22 - mindspore/core/c_ops/less.cc | 21 - mindspore/core/c_ops/less_equal.cc | 20 - .../c_ops/local_response_normalization.cc | 65 --- mindspore/core/c_ops/log.cc | 21 - mindspore/core/c_ops/logical_not.cc | 20 - mindspore/core/c_ops/logical_or.cc | 20 - mindspore/core/c_ops/lstm.cc | 82 --- .../core/load_mindir/anf_model_parser.cc | 4 +- mindspore/core/ops/abs.cc | 62 +++ mindspore/core/{c_ops => ops}/abs.h | 16 +- mindspore/core/ops/adam.cc | 88 ++++ mindspore/core/{c_ops => ops}/adam.h | 21 +- mindspore/core/ops/add.cc | 56 ++ mindspore/core/{c_ops => ops}/add.h | 11 +- mindspore/core/{c_ops => ops}/add_fold.cc | 4 +- mindspore/core/{c_ops => ops}/add_fold.h | 10 +- mindspore/core/ops/adder.cc | 108 ++++ mindspore/core/ops/adder.h | 62 +++ mindspore/core/ops/addn.cc | 70 +++ mindspore/core/{c_ops => ops}/addn.h | 15 +- .../core/{c_ops/concat.cc => ops/all.cc} | 23 +- mindspore/core/ops/all.h | 38 ++ mindspore/core/ops/apply_momentum.cc | 89 ++++ .../core/{c_ops => ops}/apply_momentum.h | 26 +- mindspore/core/ops/arg_max.cc | 73 +++ mindspore/core/ops/arg_max.h | 50 ++ mindspore/core/ops/arg_min.cc | 73 +++ mindspore/core/{c_ops => ops}/arg_min.h | 25 +- mindspore/core/ops/asin.cc | 52 ++ mindspore/core/ops/asin.h | 42 ++ mindspore/core/ops/assert.cc | 78 +++ mindspore/core/ops/assert.h | 44 ++ mindspore/core/{c_ops => ops}/assign.cc | 12 +- mindspore/core/{c_ops => ops}/assign.h | 15 +- mindspore/core/ops/assign_add.cc | 53 ++ mindspore/core/{c_ops => ops}/assign_add.h | 16 +- mindspore/core/ops/atan.cc | 50 ++ mindspore/core/{c_ops => ops}/atan.h | 13 +- mindspore/core/ops/audio_spectrogram.cc | 125 +++++ .../core/{c_ops => ops}/audio_spectrogram.h | 23 +- mindspore/core/{c_ops => ops}/avg_pool.cc | 53 +- mindspore/core/{c_ops => ops}/avg_pool.h | 23 +- mindspore/core/ops/batch_norm.cc | 140 +++++ mindspore/core/{c_ops => ops}/batch_norm.h | 25 +- mindspore/core/ops/batch_norm_fold.cc | 116 ++++ mindspore/core/ops/batch_norm_fold.h | 54 ++ mindspore/core/ops/batch_to_space.cc | 81 +++ mindspore/core/ops/batch_to_space.h | 47 ++ mindspore/core/ops/batch_to_space_nd.cc | 109 ++++ mindspore/core/ops/batch_to_space_nd.h | 48 ++ mindspore/core/{c_ops => ops}/bias_add.cc | 9 +- mindspore/core/{c_ops => ops}/bias_add.h | 12 +- mindspore/core/ops/binary_cross_entropy.cc | 94 ++++ .../{c_ops => ops}/binary_cross_entropy.h | 23 +- mindspore/core/{c_ops => ops}/black_box.cc | 10 +- mindspore/core/{c_ops => ops}/black_box.h | 14 +- mindspore/core/ops/broadcast.cc | 70 +++ mindspore/core/{c_ops => ops}/broadcast.h | 21 +- mindspore/core/ops/broadcast_to.cc | 88 ++++ mindspore/core/{c_ops => ops}/broadcast_to.h | 12 +- mindspore/core/{c_ops => ops}/cast.cc | 6 +- mindspore/core/{c_ops => ops}/cast.h | 12 +- mindspore/core/ops/ceil.cc | 49 ++ mindspore/core/{c_ops => ops}/ceil.h | 15 +- mindspore/core/{c_ops => ops}/clip.cc | 6 +- mindspore/core/{c_ops => ops}/clip.h | 10 +- mindspore/core/ops/concat.cc | 85 +++ mindspore/core/{c_ops => ops}/concat.h | 16 +- mindspore/core/ops/constant.cc | 58 ++ mindspore/core/ops/constant.h | 42 ++ .../core/{c_ops => ops}/constant_of_shape.cc | 29 +- .../core/{c_ops => ops}/constant_of_shape.h | 12 +- .../core/{c_ops => ops}/control_depend.cc | 12 +- .../core/{c_ops => ops}/control_depend.h | 20 +- mindspore/core/{c_ops => ops}/conv2d.cc | 218 ++++---- mindspore/core/{c_ops => ops}/conv2d.h | 45 +- mindspore/core/ops/conv2d_transpose.cc | 199 +++++++ mindspore/core/ops/conv2d_transpose.h | 74 +++ mindspore/core/ops/cos.cc | 56 ++ mindspore/core/{c_ops => ops}/cos.h | 12 +- mindspore/core/ops/crop.cc | 62 +++ mindspore/core/ops/crop.h | 46 ++ mindspore/core/{c_ops => ops}/custom.cc | 6 +- mindspore/core/{c_ops => ops}/custom.h | 10 +- mindspore/core/ops/custom_extract_features.cc | 56 ++ mindspore/core/ops/custom_extract_features.h | 41 ++ mindspore/core/ops/custom_normalize.cc | 69 +++ .../core/{c_ops => ops}/custom_normalize.h | 13 +- mindspore/core/ops/custom_predict.cc | 65 +++ .../core/{c_ops => ops}/custom_predict.h | 24 +- mindspore/core/{c_ops => ops}/depend.cc | 6 +- mindspore/core/{c_ops => ops}/depend.h | 12 +- mindspore/core/ops/depth_to_space.cc | 87 +++ .../core/{c_ops => ops}/depth_to_space.h | 18 +- .../core/{c_ops => ops}/depthwise_conv2d.cc | 50 +- .../core/{c_ops => ops}/depthwise_conv2d.h | 39 +- mindspore/core/ops/detection_post_process.cc | 174 ++++++ mindspore/core/ops/detection_post_process.h | 71 +++ mindspore/core/ops/div.cc | 57 ++ mindspore/core/ops/div.h | 43 ++ mindspore/core/ops/dropout.cc | 70 +++ mindspore/core/{c_ops => ops}/dropout.h | 22 +- .../embedding_lookup.cc => ops/eltwise.cc} | 23 +- mindspore/core/ops/eltwise.h | 38 ++ mindspore/core/ops/elu.cc | 75 +++ mindspore/core/ops/elu.h | 43 ++ mindspore/core/ops/embedding_lookup.cc | 87 +++ .../core/{c_ops => ops}/embedding_lookup.h | 19 +- mindspore/core/ops/equal.cc | 58 ++ mindspore/core/{c_ops => ops}/equal.h | 17 +- mindspore/core/ops/exp.cc | 54 ++ mindspore/core/{c_ops => ops}/exp.h | 19 +- mindspore/core/ops/expand_dims.cc | 61 +++ mindspore/core/{c_ops => ops}/expand_dims.h | 15 +- .../core/ops/fake_quant_with_min_max_vars.cc | 91 ++++ .../fake_quant_with_min_max_vars.h | 20 +- ...ake_quant_with_min_max_vars_per_channel.cc | 77 +++ ...fake_quant_with_min_max_vars_per_channel.h | 49 ++ .../{c_ops/reshape.cc => ops/fft_imag.cc} | 30 +- mindspore/core/{c_ops => ops}/fft_imag.h | 13 +- mindspore/core/ops/fft_real.cc | 43 ++ mindspore/core/ops/fft_real.h | 43 ++ mindspore/core/ops/fill.cc | 46 ++ mindspore/core/{c_ops => ops}/fill.h | 17 +- mindspore/core/ops/flatten.cc | 59 +++ mindspore/core/{c_ops => ops}/flatten.h | 16 +- mindspore/core/ops/floor.cc | 60 +++ mindspore/core/{c_ops => ops}/floor.h | 13 +- mindspore/core/{c_ops => ops}/floor_div.cc | 4 +- mindspore/core/{c_ops => ops}/floor_div.h | 10 +- mindspore/core/{c_ops => ops}/floor_mod.cc | 4 +- mindspore/core/{c_ops => ops}/floor_mod.h | 10 +- mindspore/core/ops/fused_batch_norm.cc | 52 ++ mindspore/core/ops/fused_batch_norm.h | 49 ++ mindspore/core/ops/fusion/activation.cc | 66 +++ mindspore/core/ops/fusion/activation.h | 45 ++ mindspore/core/ops/fusion/add_fusion.cc | 68 +++ mindspore/core/ops/fusion/add_fusion.h | 45 ++ mindspore/core/ops/fusion/adder_fusion.cc | 50 ++ mindspore/core/ops/fusion/adder_fusion.h | 46 ++ mindspore/core/ops/fusion/arg_max_fusion.cc | 40 ++ mindspore/core/ops/fusion/arg_max_fusion.h | 48 ++ mindspore/core/ops/fusion/arg_min_fusion.cc | 45 ++ mindspore/core/ops/fusion/arg_min_fusion.h | 47 ++ mindspore/core/ops/fusion/avg_pool_fusion.cc | 111 ++++ mindspore/core/ops/fusion/avg_pool_fusion.h | 50 ++ .../fusion/conv2d_backprop_filter_fusion.cc | 66 +++ .../fusion/conv2d_backprop_filter_fusion.h | 50 ++ .../fusion/conv2d_backprop_input_fusion.cc | 62 +++ .../ops/fusion/conv2d_backprop_input_fusion.h | 44 ++ mindspore/core/ops/fusion/conv2d_fusion.cc | 62 +++ mindspore/core/ops/fusion/conv2d_fusion.h | 47 ++ .../ops/fusion/conv2d_transpose_fusion.cc | 69 +++ .../core/ops/fusion/conv2d_transpose_fusion.h | 48 ++ .../ops/fusion/depthwise_conv2d_fusion.cc | 75 +++ .../core/ops/fusion/depthwise_conv2d_fusion.h | 41 ++ mindspore/core/ops/fusion/div_fusion.cc | 37 ++ mindspore/core/ops/fusion/div_fusion.h | 37 ++ .../fusion/embedding_lookup_fusion.cc} | 21 +- .../core/ops/fusion/embedding_lookup_fusion.h | 44 ++ mindspore/core/ops/fusion/exp_fusion.cc | 53 ++ mindspore/core/ops/fusion/exp_fusion.h | 42 ++ mindspore/core/ops/fusion/full_connection.cc | 116 ++++ mindspore/core/ops/fusion/full_connection.h | 51 ++ .../core/ops/fusion/l2_normalize_fusion.cc | 42 ++ .../core/ops/fusion/l2_normalize_fusion.h | 40 ++ .../core/ops/fusion/layer_norm_fusion.cc | 38 ++ mindspore/core/ops/fusion/layer_norm_fusion.h | 46 ++ mindspore/core/ops/fusion/max_pool_fusion.cc | 109 ++++ mindspore/core/ops/fusion/max_pool_fusion.h | 50 ++ mindspore/core/ops/fusion/mul_fusion.cc | 38 ++ mindspore/core/ops/fusion/mul_fusion.h | 38 ++ mindspore/core/ops/fusion/pad_fusion.cc | 52 ++ mindspore/core/ops/fusion/pad_fusion.h | 40 ++ .../fusion/partial_fusion.cc} | 26 +- mindspore/core/ops/fusion/partial_fusion.h | 38 ++ mindspore/core/ops/fusion/pow_fusion.cc | 66 +++ mindspore/core/ops/fusion/pow_fusion.h | 40 ++ mindspore/core/ops/fusion/prelu_fusion.cc | 48 ++ mindspore/core/ops/fusion/prelu_fusion.h | 42 ++ mindspore/core/ops/fusion/reduce_fusion.cc | 74 +++ mindspore/core/ops/fusion/reduce_fusion.h | 52 ++ mindspore/core/ops/fusion/scale_fusion.cc | 40 ++ mindspore/core/ops/fusion/scale_fusion.h | 37 ++ mindspore/core/ops/fusion/slice_fusion.cc | 70 +++ mindspore/core/ops/fusion/slice_fusion.h | 45 ++ mindspore/core/ops/fusion/sub_fusion.cc | 37 ++ mindspore/core/ops/fusion/sub_fusion.h | 37 ++ .../fusion/tile_fusion.cc} | 21 +- mindspore/core/ops/fusion/tile_fusion.h | 39 ++ .../fusion/topk_fusion.cc} | 26 +- mindspore/core/ops/fusion/topk_fusion.h | 41 ++ mindspore/core/ops/gather.cc | 52 ++ mindspore/core/{c_ops => ops}/gather.h | 13 +- mindspore/core/ops/gather_nd.cc | 73 +++ mindspore/core/{c_ops => ops}/gather_nd.h | 15 +- mindspore/core/ops/gelu.cc | 58 ++ mindspore/core/ops/gelu.h | 42 ++ mindspore/core/ops/grad/activation_grad.cc | 53 ++ mindspore/core/ops/grad/activation_grad.h | 45 ++ mindspore/core/ops/grad/add_grad.cc | 31 ++ mindspore/core/ops/grad/add_grad.h | 40 ++ mindspore/core/ops/grad/avg_pool_grad.cc | 38 ++ mindspore/core/ops/grad/avg_pool_grad.h | 45 ++ mindspore/core/ops/grad/batch_norm_grad.cc | 73 +++ mindspore/core/ops/grad/batch_norm_grad.h | 46 ++ mindspore/core/ops/grad/bias_grad.cc | 52 ++ .../core/{c_ops => ops/grad}/bias_grad.h | 17 +- .../ops/grad/binary_cross_entropy_grad.cc | 80 +++ .../grad}/binary_cross_entropy_grad.h | 21 +- mindspore/core/ops/grad/bn_grad.cc | 45 ++ mindspore/core/ops/grad/bn_grad.h | 41 ++ .../core/ops/grad/conv2d_backprop_filter.cc | 157 ++++++ .../core/ops/grad/conv2d_backprop_filter.h | 73 +++ .../core/ops/grad/conv2d_backprop_input.cc | 145 +++++ .../core/ops/grad/conv2d_backprop_input.h | 66 +++ .../core/ops/grad/de_conv2d_grad_filter.cc | 138 +++++ .../core/ops/grad/de_conv2d_grad_filter.h | 67 +++ .../gather_nd.cc => ops/grad/div_grad.cc} | 7 +- mindspore/core/ops/grad/div_grad.h | 36 ++ mindspore/core/ops/grad/dropout_grad.cc | 70 +++ mindspore/core/ops/grad/dropout_grad.h | 45 ++ mindspore/core/ops/grad/flatten_grad.cc | 41 ++ .../core/{c_ops => ops/grad}/flatten_grad.h | 12 +- .../core/ops/grad/group_conv2d_grad_input.cc | 167 ++++++ .../core/ops/grad/group_conv2d_grad_input.h | 74 +++ .../core/{c_ops => ops/grad}/log_grad.cc | 8 +- mindspore/core/{c_ops => ops/grad}/log_grad.h | 10 +- mindspore/core/ops/grad/max_pool_grad.cc | 69 +++ mindspore/core/ops/grad/max_pool_grad.h | 50 ++ .../adam.cc => ops/grad/maximum_grad.cc} | 33 +- mindspore/core/ops/grad/maximum_grad.h | 40 ++ mindspore/core/ops/grad/minimum_grad.cc | 43 ++ mindspore/core/ops/grad/minimum_grad.h | 40 ++ mindspore/core/ops/grad/mul_grad.cc | 25 + mindspore/core/ops/grad/mul_grad.h | 39 ++ mindspore/core/ops/grad/neg_grad.cc | 23 + mindspore/core/ops/grad/neg_grad.h | 44 ++ mindspore/core/ops/grad/pool_grad.cc | 89 ++++ mindspore/core/ops/grad/pool_grad.h | 53 ++ mindspore/core/ops/grad/pooling_grad.cc | 105 ++++ mindspore/core/ops/grad/pooling_grad.h | 62 +++ mindspore/core/ops/grad/power_grad.cc | 55 ++ mindspore/core/ops/grad/power_grad.h | 46 ++ .../sigmoid_cross_entropy_with_logits_grad.cc | 66 +++ .../sigmoid_cross_entropy_with_logits_grad.h | 46 ++ .../core/ops/grad/smooth_l1_loss_grad.cc | 69 +++ mindspore/core/ops/grad/smooth_l1_loss_grad.h | 44 ++ mindspore/core/ops/grad/sub_grad.cc | 25 + mindspore/core/ops/grad/sub_grad.h | 39 ++ mindspore/core/{c_ops => ops}/greater.cc | 4 +- mindspore/core/{c_ops => ops}/greater.h | 10 +- .../core/{c_ops => ops}/greater_equal.cc | 9 +- mindspore/core/{c_ops => ops}/greater_equal.h | 12 +- mindspore/core/ops/hashtable_lookup.cc | 56 ++ .../core/{c_ops => ops}/hashtable_lookup.h | 13 +- mindspore/core/{c_ops => ops}/identity.cc | 8 +- mindspore/core/{c_ops => ops}/identity.h | 13 +- .../core/{c_ops => ops}/instance_norm.cc | 10 +- mindspore/core/{c_ops => ops}/instance_norm.h | 14 +- mindspore/core/ops/l2_normalize.cc | 65 +++ mindspore/core/{c_ops => ops}/l2_normalize.h | 28 +- mindspore/core/{c_ops => ops}/layer_norm.cc | 20 +- mindspore/core/{c_ops => ops}/layer_norm.h | 27 +- mindspore/core/ops/leaky_relu.cc | 57 ++ mindspore/core/ops/leaky_relu.h | 47 ++ mindspore/core/ops/less.cc | 56 ++ mindspore/core/{c_ops => ops}/less.h | 14 +- mindspore/core/ops/less_equal.cc | 57 ++ mindspore/core/{c_ops => ops}/less_equal.h | 17 +- .../core/ops/local_response_normalization.cc | 98 ++++ .../local_response_normalization.h | 24 +- mindspore/core/ops/log.cc | 47 ++ mindspore/core/{c_ops => ops}/log.h | 14 +- mindspore/core/ops/logical_and.cc | 61 +++ mindspore/core/{c_ops => ops}/logical_and.h | 13 +- mindspore/core/ops/logical_not.cc | 57 ++ mindspore/core/{c_ops => ops}/logical_not.h | 16 +- mindspore/core/ops/logical_or.cc | 62 +++ mindspore/core/{c_ops => ops}/logical_or.h | 15 +- mindspore/core/ops/logical_xor.cc | 23 + mindspore/core/ops/logical_xor.h | 36 ++ mindspore/core/{c_ops => ops}/loop.cc | 12 +- mindspore/core/{c_ops => ops}/loop.h | 14 +- .../core/{c_ops => ops}/lp_normalization.cc | 12 +- .../core/{c_ops => ops}/lp_normalization.h | 16 +- mindspore/core/{c_ops => ops}/lrn.cc | 46 +- mindspore/core/{c_ops => ops}/lrn.h | 24 +- mindspore/core/ops/lsh_projection.cc | 71 +++ mindspore/core/ops/lsh_projection.h | 46 ++ mindspore/core/ops/lstm.cc | 176 +++++++ mindspore/core/{c_ops => ops}/lstm.h | 34 +- mindspore/core/ops/make_tuple.cc | 31 ++ mindspore/core/ops/make_tuple.h | 40 ++ mindspore/core/ops/mat_mul.cc | 26 + mindspore/core/ops/mat_mul.h | 44 ++ mindspore/core/ops/matrix_diag.cc | 86 +++ mindspore/core/ops/matrix_diag.h | 43 ++ mindspore/core/ops/max_pool.cc | 152 ++++++ mindspore/core/ops/max_pool.h | 61 +++ mindspore/core/ops/maximum.cc | 54 ++ mindspore/core/ops/maximum.h | 40 ++ mindspore/core/ops/merge.cc | 59 +++ mindspore/core/ops/merge.h | 42 ++ mindspore/core/ops/mfcc.cc | 99 ++++ mindspore/core/ops/mfcc.h | 50 ++ mindspore/core/ops/minimum.cc | 58 ++ mindspore/core/ops/minimum.h | 44 ++ mindspore/core/ops/mod.cc | 24 + mindspore/core/{c_ops/div.h => ops/mod.h} | 20 +- mindspore/core/ops/mul.cc | 31 ++ mindspore/core/ops/mul.h | 41 ++ .../core/{c_ops/addn.cc => ops/mul_fold.cc} | 6 +- mindspore/core/ops/mul_fold.h | 44 ++ mindspore/core/ops/neg.cc | 40 ++ mindspore/core/ops/neg.h | 42 ++ mindspore/core/ops/net_output.cc | 31 ++ mindspore/core/ops/net_output.h | 40 ++ mindspore/core/ops/non_max_suppression.cc | 45 ++ mindspore/core/ops/non_max_suppression.h | 49 ++ .../core/{c_ops/asin.cc => ops/not_equal.cc} | 6 +- mindspore/core/ops/not_equal.h | 36 ++ mindspore/core/ops/one_hot.cc | 78 +++ mindspore/core/ops/one_hot.h | 45 ++ mindspore/core/ops/ones_like.cc | 57 ++ mindspore/core/ops/ones_like.h | 41 ++ mindspore/core/ops/onnx_int8_dequantize.cc | 25 + .../onnx_int8_dequantize.h} | 24 +- mindspore/core/ops/onnx_int8_quantize.cc | 31 ++ mindspore/core/ops/onnx_int8_quantize.h | 40 ++ mindspore/core/{c_ops => ops}/op_utils.cc | 4 +- mindspore/core/{c_ops => ops}/op_utils.h | 81 ++- mindspore/core/ops/pack.cc | 80 +++ mindspore/core/ops/pack.h | 48 ++ mindspore/core/ops/pad.cc | 75 +++ mindspore/core/ops/pad.h | 46 ++ mindspore/core/ops/partial.cc | 24 + mindspore/core/ops/partial.h | 36 ++ .../{c_ops/bias_grad.cc => ops/permute.cc} | 18 +- mindspore/core/ops/permute.h | 43 ++ mindspore/core/ops/pow.cc | 53 ++ mindspore/core/ops/pow.h | 44 ++ mindspore/core/ops/prelu.cc | 66 +++ mindspore/core/ops/prelu.h | 44 ++ mindspore/core/{c_ops => ops}/primitive_c.cc | 7 +- mindspore/core/{c_ops => ops}/primitive_c.h | 8 +- mindspore/core/ops/prior_box.cc | 152 ++++++ mindspore/core/ops/prior_box.h | 69 +++ mindspore/core/ops/proposal.cc | 96 ++++ mindspore/core/ops/proposal.h | 58 ++ mindspore/core/ops/quant_dtype_cast.cc | 53 ++ mindspore/core/ops/quant_dtype_cast.h | 51 ++ mindspore/core/ops/range.cc | 113 ++++ mindspore/core/ops/range.h | 52 ++ mindspore/core/ops/rank.cc | 41 ++ mindspore/core/ops/rank.h | 42 ++ mindspore/core/ops/real_div.cc | 57 ++ mindspore/core/ops/real_div.h | 42 ++ mindspore/core/ops/reciprocal.cc | 50 ++ mindspore/core/ops/reciprocal.h | 42 ++ mindspore/core/ops/reduce.cc | 114 ++++ mindspore/core/ops/reduce.h | 46 ++ mindspore/core/ops/reduce_all.cc | 31 ++ mindspore/core/ops/reduce_all.h | 39 ++ mindspore/core/ops/reduce_any.cc | 30 ++ mindspore/core/ops/reduce_any.h | 39 ++ mindspore/core/ops/reduce_asum.cc | 26 + mindspore/core/ops/reduce_asum.h | 40 ++ .../{c_ops/gather.cc => ops/reduce_max.cc} | 8 +- mindspore/core/ops/reduce_max.h | 40 ++ mindspore/core/ops/reduce_mean.cc | 30 ++ mindspore/core/ops/reduce_mean.h | 39 ++ .../{c_ops/floor.cc => ops/reduce_min.cc} | 6 +- mindspore/core/ops/reduce_min.h | 39 ++ mindspore/core/ops/reduce_prod.cc | 30 ++ mindspore/core/ops/reduce_prod.h | 39 ++ .../logical_and.cc => ops/reduce_sum.cc} | 8 +- mindspore/core/ops/reduce_sum.h | 40 ++ mindspore/core/ops/reduce_sum_square.cc | 26 + mindspore/core/ops/reduce_sum_square.h | 40 ++ .../custom_normalize.cc => ops/relu.cc} | 6 +- mindspore/core/ops/relu.h | 36 ++ mindspore/core/{c_ops => ops}/relu6.cc | 10 +- mindspore/core/ops/relu6.h | 42 ++ mindspore/core/ops/reshape.cc | 104 ++++ mindspore/core/{c_ops => ops}/reshape.h | 10 +- mindspore/core/ops/resize.cc | 130 +++++ mindspore/core/ops/resize.h | 65 +++ mindspore/core/ops/resize_bilinear.cc | 76 +++ mindspore/core/ops/resize_bilinear.h | 47 ++ mindspore/core/ops/resize_nearest_neighbor.cc | 47 ++ mindspore/core/ops/resize_nearest_neighbor.h | 45 ++ .../core/{c_ops/fill.cc => ops/return.cc} | 7 +- mindspore/core/ops/return.h | 40 ++ mindspore/core/ops/reverse_sequence.cc | 78 +++ mindspore/core/ops/reverse_sequence.h | 45 ++ mindspore/core/ops/reverse_v2.cc | 72 +++ mindspore/core/ops/reverse_v2.h | 46 ++ mindspore/core/ops/rfft.cc | 63 +++ mindspore/core/ops/rfft.h | 43 ++ mindspore/core/ops/roi_pooling.cc | 84 +++ mindspore/core/ops/roi_pooling.h | 49 ++ mindspore/core/ops/round.cc | 50 ++ mindspore/core/ops/round.h | 43 ++ mindspore/core/ops/rsqrt.cc | 59 +++ mindspore/core/ops/rsqrt.h | 43 ++ mindspore/core/ops/scale.cc | 28 + mindspore/core/ops/scale.h | 44 ++ mindspore/core/ops/scatter_nd.cc | 63 +++ mindspore/core/ops/scatter_nd.h | 42 ++ .../core/{c_ops/atan.cc => ops/select.cc} | 6 +- mindspore/core/ops/select.h | 44 ++ mindspore/core/ops/sgd.cc | 52 ++ mindspore/core/ops/sgd.h | 46 ++ mindspore/core/ops/shape.cc | 44 ++ mindspore/core/{c_ops/relu6.h => ops/shape.h} | 26 +- .../core/{c_ops/flatten.cc => ops/sigmoid.cc} | 7 +- mindspore/core/ops/sigmoid.h | 36 ++ .../ops/sigmoid_cross_entropy_with_logits.cc | 61 +++ .../ops/sigmoid_cross_entropy_with_logits.h | 45 ++ mindspore/core/ops/sin.cc | 56 ++ mindspore/core/ops/sin.h | 43 ++ mindspore/core/ops/skip_gram.cc | 77 +++ mindspore/core/ops/skip_gram.h | 53 ++ mindspore/core/ops/slice.cc | 31 ++ mindspore/core/ops/slice.h | 40 ++ mindspore/core/ops/smooth_l1_loss.cc | 62 +++ mindspore/core/ops/smooth_l1_loss.h | 43 ++ mindspore/core/{c_ops => ops}/softmax.cc | 34 +- mindspore/core/{c_ops => ops}/softmax.h | 26 +- .../ops/softmax_cross_entropy_with_logits.cc | 66 +++ .../ops/softmax_cross_entropy_with_logits.h | 43 ++ mindspore/core/ops/space_to_batch.cc | 97 ++++ mindspore/core/ops/space_to_batch.h | 48 ++ mindspore/core/ops/space_to_batch_nd.cc | 108 ++++ mindspore/core/ops/space_to_batch_nd.h | 48 ++ .../space_to_depth.cc} | 38 +- mindspore/core/ops/space_to_depth.h | 46 ++ .../core/ops/sparse_softmax_cross_entropy.cc | 64 +++ .../core/ops/sparse_softmax_cross_entropy.h | 43 ++ mindspore/core/ops/sparse_to_dense.cc | 51 ++ mindspore/core/ops/sparse_to_dense.h | 41 ++ mindspore/core/ops/split.cc | 52 ++ mindspore/core/ops/split.h | 48 ++ mindspore/core/ops/sqrt.cc | 26 + mindspore/core/ops/sqrt.h | 36 ++ mindspore/core/ops/square.cc | 26 + mindspore/core/ops/square.h | 36 ++ mindspore/core/ops/squared_difference.cc | 57 ++ mindspore/core/ops/squared_difference.h | 41 ++ mindspore/core/{c_ops => ops}/squeeze.cc | 10 +- mindspore/core/{c_ops => ops}/squeeze.h | 12 +- mindspore/core/ops/stack.cc | 82 +++ mindspore/core/ops/stack.h | 48 ++ mindspore/core/ops/strided_slice.cc | 288 ++++++++++ mindspore/core/ops/strided_slice.h | 57 ++ mindspore/core/ops/sub.cc | 58 ++ mindspore/core/ops/sub.h | 45 ++ mindspore/core/ops/switch.cc | 26 + mindspore/core/ops/switch.h | 36 ++ mindspore/core/ops/tan.cc | 56 ++ mindspore/core/ops/tan.h | 43 ++ .../{c_ops/expand_dims.cc => ops/tanh.cc} | 7 +- mindspore/core/{c_ops/asin.h => ops/tanh.h} | 20 +- mindspore/core/ops/tensor_list_from_tensor.cc | 91 ++++ mindspore/core/ops/tensor_list_from_tensor.h | 45 ++ mindspore/core/ops/tensor_list_get_item.cc | 36 ++ mindspore/core/ops/tensor_list_get_item.h | 39 ++ mindspore/core/ops/tensor_list_reserve.cc | 46 ++ mindspore/core/ops/tensor_list_reserve.h | 41 ++ mindspore/core/ops/tensor_list_set_item.cc | 36 ++ mindspore/core/ops/tensor_list_set_item.h | 39 ++ mindspore/core/ops/tensor_list_stack.cc | 76 +++ mindspore/core/ops/tensor_list_stack.h | 46 ++ mindspore/core/ops/tile.cc | 77 +++ mindspore/core/ops/tile.h | 45 ++ .../to_format.cc} | 30 +- mindspore/core/ops/to_format.h | 44 ++ mindspore/core/ops/topk.cc | 59 +++ mindspore/core/ops/topk.h | 46 ++ mindspore/core/ops/transpose.cc | 26 + mindspore/core/ops/transpose.h | 36 ++ mindspore/core/ops/tuple_get_item.cc | 23 + mindspore/core/ops/tuple_get_item.h | 40 ++ .../{c_ops/assign_add.cc => ops/unique.cc} | 6 +- mindspore/core/ops/unique.h | 36 ++ mindspore/core/ops/unpack.cc | 64 +++ mindspore/core/ops/unpack.h | 49 ++ mindspore/core/ops/unsorted_segment_sum.cc | 86 +++ mindspore/core/ops/unsorted_segment_sum.h | 47 ++ mindspore/core/ops/unsqueeze.cc | 75 +++ mindspore/core/ops/unsqueeze.h | 45 ++ mindspore/core/ops/unstack.cc | 63 +++ mindspore/core/ops/unstack.h | 50 ++ mindspore/core/ops/where.cc | 80 +++ mindspore/core/ops/where.h | 43 ++ mindspore/core/ops/while.cc | 65 +++ mindspore/core/ops/while.h | 47 ++ mindspore/core/ops/zeros_like.cc | 66 +++ mindspore/core/ops/zeros_like.h | 42 ++ mindspore/core/utils/check_convert_utils.cc | 496 ++++++++++++++---- mindspore/core/utils/check_convert_utils.h | 242 ++++++++- mindspore/lite/src/ops/ops_def.cc | 8 +- mindspore/lite/src/ops/schema_def.h | 2 +- .../parser/caffe/caffe_node_parser.h | 2 +- tests/ut/cpp/CMakeLists.txt | 6 +- tests/ut/cpp/ops/test_ops_add.cc | 61 +++ tests/ut/cpp/ops/test_ops_addn.cc | 97 ++++ tests/ut/cpp/ops/test_ops_argmax.cc | 89 ++++ tests/ut/cpp/ops/test_ops_assert.cc | 108 ++++ .../ut/cpp/ops/test_ops_audio_spectrogram.cc | 69 +++ tests/ut/cpp/ops/test_ops_avg_pool_grad.cc | 64 +++ tests/ut/cpp/ops/test_ops_batch_norm.cc | 95 ++++ tests/ut/cpp/ops/test_ops_batch_norm_fold.cc | 73 +++ tests/ut/cpp/ops/test_ops_batch_norm_grad.cc | 98 ++++ tests/ut/cpp/ops/test_ops_batchtospace.cc | 66 +++ .../ops/test_ops_binary_cross_entropy_grad.cc | 98 ++++ tests/ut/cpp/ops/test_ops_ceil.cc | 61 +++ tests/ut/cpp/ops/test_ops_concat.cc | 101 ++++ tests/ut/cpp/ops/test_ops_constant.cc | 62 +++ .../test_ops_conv2d.cc} | 29 +- tests/ut/cpp/ops/test_ops_cos.cc | 63 +++ tests/ut/cpp/ops/test_ops_crop.cc | 78 +++ tests/ut/cpp/ops/test_ops_custom_predict.cc | 74 +++ tests/ut/cpp/ops/test_ops_depthtospace.cc | 63 +++ .../ops/test_ops_detection_post_process.cc | 89 ++++ tests/ut/cpp/ops/test_ops_div.cc | 63 +++ tests/ut/cpp/ops/test_ops_dropout_grad.cc | 63 +++ tests/ut/cpp/ops/test_ops_elu.cc | 62 +++ tests/ut/cpp/ops/test_ops_embeddinglookup.cc | 67 +++ tests/ut/cpp/ops/test_ops_equal.cc | 64 +++ tests/ut/cpp/ops/test_ops_exp.cc | 62 +++ ...ake_quant_with_min_max_vars_per_channel.cc | 72 +++ tests/ut/cpp/ops/test_ops_fftimag.cc | 59 +++ tests/ut/cpp/ops/test_ops_fftreal.cc | 61 +++ tests/ut/cpp/ops/test_ops_fill.cc | 66 +++ tests/ut/cpp/ops/test_ops_flatten.cc | 60 +++ tests/ut/cpp/ops/test_ops_flattengrad.cc | 63 +++ tests/ut/cpp/ops/test_ops_floor.cc | 61 +++ tests/ut/cpp/ops/test_ops_full_connection.cc | 125 +++++ tests/ut/cpp/ops/test_ops_gather.cc | 83 +++ tests/ut/cpp/ops/test_ops_hashtable_lookup.cc | 75 +++ tests/ut/cpp/ops/test_ops_l2normalize.cc | 63 +++ tests/ut/cpp/ops/test_ops_leakyrelu.cc | 61 +++ tests/ut/cpp/ops/test_ops_less.cc | 63 +++ .../test_ops_localresponsenormalization.cc | 62 +++ tests/ut/cpp/ops/test_ops_log.cc | 59 +++ tests/ut/cpp/ops/test_ops_logical_not.cc | 77 +++ tests/ut/cpp/ops/test_ops_lsh_projection.cc | 125 +++++ tests/ut/cpp/ops/test_ops_matrix_diag.cc | 65 +++ tests/ut/cpp/ops/test_ops_max_pool_grad.cc | 66 +++ tests/ut/cpp/ops/test_ops_maximum.cc | 61 +++ tests/ut/cpp/ops/test_ops_merge.cc | 74 +++ tests/ut/cpp/ops/test_ops_mfcc.cc | 64 +++ tests/ut/cpp/ops/test_ops_minimum.cc | 64 +++ tests/ut/cpp/ops/test_ops_mul.cc | 61 +++ tests/ut/cpp/ops/test_ops_neg.cc | 62 +++ .../cpp/ops/test_ops_non_max_suppression.cc | 62 +++ tests/ut/cpp/ops/test_ops_one_hot.cc | 70 +++ tests/ut/cpp/ops/test_ops_pack.cc | 67 +++ tests/ut/cpp/ops/test_ops_pooling_grad.cc | 74 +++ tests/ut/cpp/ops/test_ops_pow.cc | 63 +++ tests/ut/cpp/ops/test_ops_prelu.cc | 93 ++++ tests/ut/cpp/ops/test_ops_prior_box.cc | 76 +++ tests/ut/cpp/ops/test_ops_quantd_type_cast.cc | 65 +++ tests/ut/cpp/ops/test_ops_range.cc | 124 +++++ tests/ut/cpp/ops/test_ops_rank.cc | 62 +++ tests/ut/cpp/ops/test_ops_realdiv.cc | 64 +++ tests/ut/cpp/ops/test_ops_reshape.cc | 90 ++++ tests/ut/cpp/ops/test_ops_reverse_v2.cc | 64 +++ tests/ut/cpp/ops/test_ops_rfft.cc | 63 +++ tests/ut/cpp/ops/test_ops_round.cc | 61 +++ tests/ut/cpp/ops/test_ops_sin.cc | 59 +++ tests/ut/cpp/ops/test_ops_slice_fusion.cc | 69 +++ tests/ut/cpp/ops/test_ops_softmax.cc | 124 +++++ tests/ut/cpp/ops/test_ops_sqrt.cc | 59 +++ tests/ut/cpp/ops/test_ops_square.cc | 60 +++ .../ut/cpp/ops/test_ops_squareddifference.cc | 61 +++ tests/ut/cpp/ops/test_ops_strided_slice.cc | 204 +++++++ tests/ut/cpp/ops/test_ops_sub.cc | 61 +++ tests/ut/cpp/ops/test_ops_topk.cc | 84 +++ tests/ut/cpp/ops/test_ops_unpack.cc | 73 +++ .../cpp/ops/test_ops_unsorted_segment_sum.cc | 65 +++ tests/ut/cpp/ops/test_ops_unsqueeze.cc | 112 ++++ tests/ut/cpp/ops/test_ops_unstack.cc | 73 +++ tests/ut/cpp/ops/test_ops_where.cc | 95 ++++ 608 files changed, 28986 insertions(+), 1834 deletions(-) delete mode 100644 mindspore/core/c_ops/abs.cc delete mode 100644 mindspore/core/c_ops/apply_momentum.cc delete mode 100644 mindspore/core/c_ops/audio_spectrogram.cc delete mode 100644 mindspore/core/c_ops/batch_norm.cc delete mode 100644 mindspore/core/c_ops/batch_norm_fold.cc delete mode 100644 mindspore/core/c_ops/binary_cross_entropy_grad.cc delete mode 100644 mindspore/core/c_ops/broadcast.cc delete mode 100644 mindspore/core/c_ops/ceil.cc delete mode 100644 mindspore/core/c_ops/cos.cc delete mode 100644 mindspore/core/c_ops/custom_predict.cc delete mode 100644 mindspore/core/c_ops/div.cc delete mode 100644 mindspore/core/c_ops/equal.cc delete mode 100644 mindspore/core/c_ops/exp.cc delete mode 100644 mindspore/core/c_ops/fake_quant_with_min_max_vars.cc delete mode 100644 mindspore/core/c_ops/fft_imag.cc delete mode 100644 mindspore/core/c_ops/flatten_grad.cc delete mode 100644 mindspore/core/c_ops/hashtable_lookup.cc delete mode 100644 mindspore/core/c_ops/less.cc delete mode 100644 mindspore/core/c_ops/less_equal.cc delete mode 100644 mindspore/core/c_ops/local_response_normalization.cc delete mode 100644 mindspore/core/c_ops/log.cc delete mode 100644 mindspore/core/c_ops/logical_not.cc delete mode 100644 mindspore/core/c_ops/logical_or.cc delete mode 100644 mindspore/core/c_ops/lstm.cc create mode 100644 mindspore/core/ops/abs.cc rename mindspore/core/{c_ops => ops}/abs.h (69%) create mode 100644 mindspore/core/ops/adam.cc rename mindspore/core/{c_ops => ops}/adam.h (63%) create mode 100644 mindspore/core/ops/add.cc rename mindspore/core/{c_ops => ops}/add.h (81%) rename mindspore/core/{c_ops => ops}/add_fold.cc (91%) rename mindspore/core/{c_ops => ops}/add_fold.h (84%) create mode 100644 mindspore/core/ops/adder.cc create mode 100644 mindspore/core/ops/adder.h create mode 100644 mindspore/core/ops/addn.cc rename mindspore/core/{c_ops => ops}/addn.h (69%) rename mindspore/core/{c_ops/concat.cc => ops/all.cc} (65%) create mode 100644 mindspore/core/ops/all.h create mode 100644 mindspore/core/ops/apply_momentum.cc rename mindspore/core/{c_ops => ops}/apply_momentum.h (59%) create mode 100644 mindspore/core/ops/arg_max.cc create mode 100644 mindspore/core/ops/arg_max.h create mode 100644 mindspore/core/ops/arg_min.cc rename mindspore/core/{c_ops => ops}/arg_min.h (68%) create mode 100644 mindspore/core/ops/asin.cc create mode 100644 mindspore/core/ops/asin.h create mode 100644 mindspore/core/ops/assert.cc create mode 100644 mindspore/core/ops/assert.h rename mindspore/core/{c_ops => ops}/assign.cc (78%) rename mindspore/core/{c_ops => ops}/assign.h (78%) create mode 100644 mindspore/core/ops/assign_add.cc rename mindspore/core/{c_ops => ops}/assign_add.h (68%) create mode 100644 mindspore/core/ops/atan.cc rename mindspore/core/{c_ops => ops}/atan.h (72%) create mode 100644 mindspore/core/ops/audio_spectrogram.cc rename mindspore/core/{c_ops => ops}/audio_spectrogram.h (60%) rename mindspore/core/{c_ops => ops}/avg_pool.cc (78%) rename mindspore/core/{c_ops => ops}/avg_pool.h (76%) create mode 100644 mindspore/core/ops/batch_norm.cc rename mindspore/core/{c_ops => ops}/batch_norm.h (71%) create mode 100644 mindspore/core/ops/batch_norm_fold.cc create mode 100644 mindspore/core/ops/batch_norm_fold.h create mode 100644 mindspore/core/ops/batch_to_space.cc create mode 100644 mindspore/core/ops/batch_to_space.h create mode 100644 mindspore/core/ops/batch_to_space_nd.cc create mode 100644 mindspore/core/ops/batch_to_space_nd.h rename mindspore/core/{c_ops => ops}/bias_add.cc (88%) rename mindspore/core/{c_ops => ops}/bias_add.h (84%) create mode 100644 mindspore/core/ops/binary_cross_entropy.cc rename mindspore/core/{c_ops => ops}/binary_cross_entropy.h (59%) rename mindspore/core/{c_ops => ops}/black_box.cc (83%) rename mindspore/core/{c_ops => ops}/black_box.h (79%) create mode 100644 mindspore/core/ops/broadcast.cc rename mindspore/core/{c_ops => ops}/broadcast.h (62%) create mode 100644 mindspore/core/ops/broadcast_to.cc rename mindspore/core/{c_ops => ops}/broadcast_to.h (86%) rename mindspore/core/{c_ops => ops}/cast.cc (88%) rename mindspore/core/{c_ops => ops}/cast.h (85%) create mode 100644 mindspore/core/ops/ceil.cc rename mindspore/core/{c_ops => ops}/ceil.h (81%) rename mindspore/core/{c_ops => ops}/clip.cc (93%) rename mindspore/core/{c_ops => ops}/clip.h (86%) create mode 100644 mindspore/core/ops/concat.cc rename mindspore/core/{c_ops => ops}/concat.h (81%) create mode 100644 mindspore/core/ops/constant.cc create mode 100644 mindspore/core/ops/constant.h rename mindspore/core/{c_ops => ops}/constant_of_shape.cc (51%) rename mindspore/core/{c_ops => ops}/constant_of_shape.h (76%) rename mindspore/core/{c_ops => ops}/control_depend.cc (69%) rename mindspore/core/{c_ops => ops}/control_depend.h (68%) rename mindspore/core/{c_ops => ops}/conv2d.cc (66%) rename mindspore/core/{c_ops => ops}/conv2d.h (69%) create mode 100644 mindspore/core/ops/conv2d_transpose.cc create mode 100644 mindspore/core/ops/conv2d_transpose.h create mode 100644 mindspore/core/ops/cos.cc rename mindspore/core/{c_ops => ops}/cos.h (85%) create mode 100644 mindspore/core/ops/crop.cc create mode 100644 mindspore/core/ops/crop.h rename mindspore/core/{c_ops => ops}/custom.cc (92%) rename mindspore/core/{c_ops => ops}/custom.h (86%) create mode 100644 mindspore/core/ops/custom_extract_features.cc create mode 100644 mindspore/core/ops/custom_extract_features.h create mode 100644 mindspore/core/ops/custom_normalize.cc rename mindspore/core/{c_ops => ops}/custom_normalize.h (72%) create mode 100644 mindspore/core/ops/custom_predict.cc rename mindspore/core/{c_ops => ops}/custom_predict.h (62%) rename mindspore/core/{c_ops => ops}/depend.cc (88%) rename mindspore/core/{c_ops => ops}/depend.h (84%) create mode 100644 mindspore/core/ops/depth_to_space.cc rename mindspore/core/{c_ops => ops}/depth_to_space.h (67%) rename mindspore/core/{c_ops => ops}/depthwise_conv2d.cc (84%) rename mindspore/core/{c_ops => ops}/depthwise_conv2d.h (73%) create mode 100644 mindspore/core/ops/detection_post_process.cc create mode 100644 mindspore/core/ops/detection_post_process.h create mode 100644 mindspore/core/ops/div.cc create mode 100644 mindspore/core/ops/div.h create mode 100644 mindspore/core/ops/dropout.cc rename mindspore/core/{c_ops => ops}/dropout.h (62%) rename mindspore/core/{c_ops/embedding_lookup.cc => ops/eltwise.cc} (61%) create mode 100644 mindspore/core/ops/eltwise.h create mode 100644 mindspore/core/ops/elu.cc create mode 100644 mindspore/core/ops/elu.h create mode 100644 mindspore/core/ops/embedding_lookup.cc rename mindspore/core/{c_ops => ops}/embedding_lookup.h (64%) create mode 100644 mindspore/core/ops/equal.cc rename mindspore/core/{c_ops => ops}/equal.h (69%) create mode 100644 mindspore/core/ops/exp.cc rename mindspore/core/{c_ops => ops}/exp.h (64%) create mode 100644 mindspore/core/ops/expand_dims.cc rename mindspore/core/{c_ops => ops}/expand_dims.h (68%) create mode 100644 mindspore/core/ops/fake_quant_with_min_max_vars.cc rename mindspore/core/{c_ops => ops}/fake_quant_with_min_max_vars.h (65%) create mode 100644 mindspore/core/ops/fake_quant_with_min_max_vars_per_channel.cc create mode 100644 mindspore/core/ops/fake_quant_with_min_max_vars_per_channel.h rename mindspore/core/{c_ops/reshape.cc => ops/fft_imag.cc} (64%) rename mindspore/core/{c_ops => ops}/fft_imag.h (71%) create mode 100644 mindspore/core/ops/fft_real.cc create mode 100644 mindspore/core/ops/fft_real.h create mode 100644 mindspore/core/ops/fill.cc rename mindspore/core/{c_ops => ops}/fill.h (68%) create mode 100644 mindspore/core/ops/flatten.cc rename mindspore/core/{c_ops => ops}/flatten.h (68%) create mode 100644 mindspore/core/ops/floor.cc rename mindspore/core/{c_ops => ops}/floor.h (72%) rename mindspore/core/{c_ops => ops}/floor_div.cc (91%) rename mindspore/core/{c_ops => ops}/floor_div.h (84%) rename mindspore/core/{c_ops => ops}/floor_mod.cc (91%) rename mindspore/core/{c_ops => ops}/floor_mod.h (84%) create mode 100644 mindspore/core/ops/fused_batch_norm.cc create mode 100644 mindspore/core/ops/fused_batch_norm.h create mode 100644 mindspore/core/ops/fusion/activation.cc create mode 100644 mindspore/core/ops/fusion/activation.h create mode 100644 mindspore/core/ops/fusion/add_fusion.cc create mode 100644 mindspore/core/ops/fusion/add_fusion.h create mode 100644 mindspore/core/ops/fusion/adder_fusion.cc create mode 100644 mindspore/core/ops/fusion/adder_fusion.h create mode 100644 mindspore/core/ops/fusion/arg_max_fusion.cc create mode 100644 mindspore/core/ops/fusion/arg_max_fusion.h create mode 100644 mindspore/core/ops/fusion/arg_min_fusion.cc create mode 100644 mindspore/core/ops/fusion/arg_min_fusion.h create mode 100644 mindspore/core/ops/fusion/avg_pool_fusion.cc create mode 100644 mindspore/core/ops/fusion/avg_pool_fusion.h create mode 100644 mindspore/core/ops/fusion/conv2d_backprop_filter_fusion.cc create mode 100644 mindspore/core/ops/fusion/conv2d_backprop_filter_fusion.h create mode 100644 mindspore/core/ops/fusion/conv2d_backprop_input_fusion.cc create mode 100644 mindspore/core/ops/fusion/conv2d_backprop_input_fusion.h create mode 100644 mindspore/core/ops/fusion/conv2d_fusion.cc create mode 100644 mindspore/core/ops/fusion/conv2d_fusion.h create mode 100644 mindspore/core/ops/fusion/conv2d_transpose_fusion.cc create mode 100644 mindspore/core/ops/fusion/conv2d_transpose_fusion.h create mode 100644 mindspore/core/ops/fusion/depthwise_conv2d_fusion.cc create mode 100644 mindspore/core/ops/fusion/depthwise_conv2d_fusion.h create mode 100644 mindspore/core/ops/fusion/div_fusion.cc create mode 100644 mindspore/core/ops/fusion/div_fusion.h rename mindspore/core/{c_ops/dropout.cc => ops/fusion/embedding_lookup_fusion.cc} (58%) create mode 100644 mindspore/core/ops/fusion/embedding_lookup_fusion.h create mode 100644 mindspore/core/ops/fusion/exp_fusion.cc create mode 100644 mindspore/core/ops/fusion/exp_fusion.h create mode 100644 mindspore/core/ops/fusion/full_connection.cc create mode 100644 mindspore/core/ops/fusion/full_connection.h create mode 100644 mindspore/core/ops/fusion/l2_normalize_fusion.cc create mode 100644 mindspore/core/ops/fusion/l2_normalize_fusion.h create mode 100644 mindspore/core/ops/fusion/layer_norm_fusion.cc create mode 100644 mindspore/core/ops/fusion/layer_norm_fusion.h create mode 100644 mindspore/core/ops/fusion/max_pool_fusion.cc create mode 100644 mindspore/core/ops/fusion/max_pool_fusion.h create mode 100644 mindspore/core/ops/fusion/mul_fusion.cc create mode 100644 mindspore/core/ops/fusion/mul_fusion.h create mode 100644 mindspore/core/ops/fusion/pad_fusion.cc create mode 100644 mindspore/core/ops/fusion/pad_fusion.h rename mindspore/core/{c_ops/arg_min.cc => ops/fusion/partial_fusion.cc} (57%) create mode 100644 mindspore/core/ops/fusion/partial_fusion.h create mode 100644 mindspore/core/ops/fusion/pow_fusion.cc create mode 100644 mindspore/core/ops/fusion/pow_fusion.h create mode 100644 mindspore/core/ops/fusion/prelu_fusion.cc create mode 100644 mindspore/core/ops/fusion/prelu_fusion.h create mode 100644 mindspore/core/ops/fusion/reduce_fusion.cc create mode 100644 mindspore/core/ops/fusion/reduce_fusion.h create mode 100644 mindspore/core/ops/fusion/scale_fusion.cc create mode 100644 mindspore/core/ops/fusion/scale_fusion.h create mode 100644 mindspore/core/ops/fusion/slice_fusion.cc create mode 100644 mindspore/core/ops/fusion/slice_fusion.h create mode 100644 mindspore/core/ops/fusion/sub_fusion.cc create mode 100644 mindspore/core/ops/fusion/sub_fusion.h rename mindspore/core/{c_ops/broadcast_to.cc => ops/fusion/tile_fusion.cc} (57%) create mode 100644 mindspore/core/ops/fusion/tile_fusion.h rename mindspore/core/{c_ops/l2_normalize.cc => ops/fusion/topk_fusion.cc} (53%) create mode 100644 mindspore/core/ops/fusion/topk_fusion.h create mode 100644 mindspore/core/ops/gather.cc rename mindspore/core/{c_ops => ops}/gather.h (72%) create mode 100644 mindspore/core/ops/gather_nd.cc rename mindspore/core/{c_ops => ops}/gather_nd.h (69%) create mode 100644 mindspore/core/ops/gelu.cc create mode 100644 mindspore/core/ops/gelu.h create mode 100644 mindspore/core/ops/grad/activation_grad.cc create mode 100644 mindspore/core/ops/grad/activation_grad.h create mode 100644 mindspore/core/ops/grad/add_grad.cc create mode 100644 mindspore/core/ops/grad/add_grad.h create mode 100644 mindspore/core/ops/grad/avg_pool_grad.cc create mode 100644 mindspore/core/ops/grad/avg_pool_grad.h create mode 100644 mindspore/core/ops/grad/batch_norm_grad.cc create mode 100644 mindspore/core/ops/grad/batch_norm_grad.h create mode 100644 mindspore/core/ops/grad/bias_grad.cc rename mindspore/core/{c_ops => ops/grad}/bias_grad.h (70%) create mode 100644 mindspore/core/ops/grad/binary_cross_entropy_grad.cc rename mindspore/core/{c_ops => ops/grad}/binary_cross_entropy_grad.h (72%) create mode 100644 mindspore/core/ops/grad/bn_grad.cc create mode 100644 mindspore/core/ops/grad/bn_grad.h create mode 100644 mindspore/core/ops/grad/conv2d_backprop_filter.cc create mode 100644 mindspore/core/ops/grad/conv2d_backprop_filter.h create mode 100644 mindspore/core/ops/grad/conv2d_backprop_input.cc create mode 100644 mindspore/core/ops/grad/conv2d_backprop_input.h create mode 100644 mindspore/core/ops/grad/de_conv2d_grad_filter.cc create mode 100644 mindspore/core/ops/grad/de_conv2d_grad_filter.h rename mindspore/core/{c_ops/gather_nd.cc => ops/grad/div_grad.cc} (83%) create mode 100644 mindspore/core/ops/grad/div_grad.h create mode 100644 mindspore/core/ops/grad/dropout_grad.cc create mode 100644 mindspore/core/ops/grad/dropout_grad.h create mode 100644 mindspore/core/ops/grad/flatten_grad.cc rename mindspore/core/{c_ops => ops/grad}/flatten_grad.h (85%) create mode 100644 mindspore/core/ops/grad/group_conv2d_grad_input.cc create mode 100644 mindspore/core/ops/grad/group_conv2d_grad_input.h rename mindspore/core/{c_ops => ops/grad}/log_grad.cc (87%) rename mindspore/core/{c_ops => ops/grad}/log_grad.h (84%) create mode 100644 mindspore/core/ops/grad/max_pool_grad.cc create mode 100644 mindspore/core/ops/grad/max_pool_grad.h rename mindspore/core/{c_ops/adam.cc => ops/grad/maximum_grad.cc} (53%) create mode 100644 mindspore/core/ops/grad/maximum_grad.h create mode 100644 mindspore/core/ops/grad/minimum_grad.cc create mode 100644 mindspore/core/ops/grad/minimum_grad.h create mode 100644 mindspore/core/ops/grad/mul_grad.cc create mode 100644 mindspore/core/ops/grad/mul_grad.h create mode 100644 mindspore/core/ops/grad/neg_grad.cc create mode 100644 mindspore/core/ops/grad/neg_grad.h create mode 100644 mindspore/core/ops/grad/pool_grad.cc create mode 100644 mindspore/core/ops/grad/pool_grad.h create mode 100644 mindspore/core/ops/grad/pooling_grad.cc create mode 100644 mindspore/core/ops/grad/pooling_grad.h create mode 100644 mindspore/core/ops/grad/power_grad.cc create mode 100644 mindspore/core/ops/grad/power_grad.h create mode 100644 mindspore/core/ops/grad/sigmoid_cross_entropy_with_logits_grad.cc create mode 100644 mindspore/core/ops/grad/sigmoid_cross_entropy_with_logits_grad.h create mode 100644 mindspore/core/ops/grad/smooth_l1_loss_grad.cc create mode 100644 mindspore/core/ops/grad/smooth_l1_loss_grad.h create mode 100644 mindspore/core/ops/grad/sub_grad.cc create mode 100644 mindspore/core/ops/grad/sub_grad.h rename mindspore/core/{c_ops => ops}/greater.cc (92%) rename mindspore/core/{c_ops => ops}/greater.h (85%) rename mindspore/core/{c_ops => ops}/greater_equal.cc (84%) rename mindspore/core/{c_ops => ops}/greater_equal.h (84%) create mode 100644 mindspore/core/ops/hashtable_lookup.cc rename mindspore/core/{c_ops => ops}/hashtable_lookup.h (72%) rename mindspore/core/{c_ops => ops}/identity.cc (84%) rename mindspore/core/{c_ops => ops}/identity.h (77%) rename mindspore/core/{c_ops => ops}/instance_norm.cc (78%) rename mindspore/core/{c_ops => ops}/instance_norm.h (78%) create mode 100644 mindspore/core/ops/l2_normalize.cc rename mindspore/core/{c_ops => ops}/l2_normalize.h (64%) rename mindspore/core/{c_ops => ops}/layer_norm.cc (68%) rename mindspore/core/{c_ops => ops}/layer_norm.h (58%) create mode 100644 mindspore/core/ops/leaky_relu.cc create mode 100644 mindspore/core/ops/leaky_relu.h create mode 100644 mindspore/core/ops/less.cc rename mindspore/core/{c_ops => ops}/less.h (82%) create mode 100644 mindspore/core/ops/less_equal.cc rename mindspore/core/{c_ops => ops}/less_equal.h (68%) create mode 100644 mindspore/core/ops/local_response_normalization.cc rename mindspore/core/{c_ops => ops}/local_response_normalization.h (60%) create mode 100644 mindspore/core/ops/log.cc rename mindspore/core/{c_ops => ops}/log.h (82%) create mode 100644 mindspore/core/ops/logical_and.cc rename mindspore/core/{c_ops => ops}/logical_and.h (71%) create mode 100644 mindspore/core/ops/logical_not.cc rename mindspore/core/{c_ops => ops}/logical_not.h (68%) create mode 100644 mindspore/core/ops/logical_or.cc rename mindspore/core/{c_ops => ops}/logical_or.h (68%) create mode 100644 mindspore/core/ops/logical_xor.cc create mode 100644 mindspore/core/ops/logical_xor.h rename mindspore/core/{c_ops => ops}/loop.cc (74%) rename mindspore/core/{c_ops => ops}/loop.h (78%) rename mindspore/core/{c_ops => ops}/lp_normalization.cc (75%) rename mindspore/core/{c_ops => ops}/lp_normalization.h (77%) rename mindspore/core/{c_ops => ops}/lrn.cc (50%) rename mindspore/core/{c_ops => ops}/lrn.h (66%) create mode 100644 mindspore/core/ops/lsh_projection.cc create mode 100644 mindspore/core/ops/lsh_projection.h create mode 100644 mindspore/core/ops/lstm.cc rename mindspore/core/{c_ops => ops}/lstm.h (56%) create mode 100644 mindspore/core/ops/make_tuple.cc create mode 100644 mindspore/core/ops/make_tuple.h create mode 100644 mindspore/core/ops/mat_mul.cc create mode 100644 mindspore/core/ops/mat_mul.h create mode 100644 mindspore/core/ops/matrix_diag.cc create mode 100644 mindspore/core/ops/matrix_diag.h create mode 100644 mindspore/core/ops/max_pool.cc create mode 100644 mindspore/core/ops/max_pool.h create mode 100644 mindspore/core/ops/maximum.cc create mode 100644 mindspore/core/ops/maximum.h create mode 100644 mindspore/core/ops/merge.cc create mode 100644 mindspore/core/ops/merge.h create mode 100644 mindspore/core/ops/mfcc.cc create mode 100644 mindspore/core/ops/mfcc.h create mode 100644 mindspore/core/ops/minimum.cc create mode 100644 mindspore/core/ops/minimum.h create mode 100644 mindspore/core/ops/mod.cc rename mindspore/core/{c_ops/div.h => ops/mod.h} (68%) create mode 100644 mindspore/core/ops/mul.cc create mode 100644 mindspore/core/ops/mul.h rename mindspore/core/{c_ops/addn.cc => ops/mul_fold.cc} (86%) create mode 100644 mindspore/core/ops/mul_fold.h create mode 100644 mindspore/core/ops/neg.cc create mode 100644 mindspore/core/ops/neg.h create mode 100644 mindspore/core/ops/net_output.cc create mode 100644 mindspore/core/ops/net_output.h create mode 100644 mindspore/core/ops/non_max_suppression.cc create mode 100644 mindspore/core/ops/non_max_suppression.h rename mindspore/core/{c_ops/asin.cc => ops/not_equal.cc} (85%) create mode 100644 mindspore/core/ops/not_equal.h create mode 100644 mindspore/core/ops/one_hot.cc create mode 100644 mindspore/core/ops/one_hot.h create mode 100644 mindspore/core/ops/ones_like.cc create mode 100644 mindspore/core/ops/ones_like.h create mode 100644 mindspore/core/ops/onnx_int8_dequantize.cc rename mindspore/core/{c_ops/batch_norm_fold.h => ops/onnx_int8_dequantize.h} (58%) create mode 100644 mindspore/core/ops/onnx_int8_quantize.cc create mode 100644 mindspore/core/ops/onnx_int8_quantize.h rename mindspore/core/{c_ops => ops}/op_utils.cc (97%) rename mindspore/core/{c_ops => ops}/op_utils.h (67%) create mode 100644 mindspore/core/ops/pack.cc create mode 100644 mindspore/core/ops/pack.h create mode 100644 mindspore/core/ops/pad.cc create mode 100644 mindspore/core/ops/pad.h create mode 100644 mindspore/core/ops/partial.cc create mode 100644 mindspore/core/ops/partial.h rename mindspore/core/{c_ops/bias_grad.cc => ops/permute.cc} (68%) create mode 100644 mindspore/core/ops/permute.h create mode 100644 mindspore/core/ops/pow.cc create mode 100644 mindspore/core/ops/pow.h create mode 100644 mindspore/core/ops/prelu.cc create mode 100644 mindspore/core/ops/prelu.h rename mindspore/core/{c_ops => ops}/primitive_c.cc (94%) rename mindspore/core/{c_ops => ops}/primitive_c.h (92%) create mode 100644 mindspore/core/ops/prior_box.cc create mode 100644 mindspore/core/ops/prior_box.h create mode 100644 mindspore/core/ops/proposal.cc create mode 100644 mindspore/core/ops/proposal.h create mode 100644 mindspore/core/ops/quant_dtype_cast.cc create mode 100644 mindspore/core/ops/quant_dtype_cast.h create mode 100644 mindspore/core/ops/range.cc create mode 100644 mindspore/core/ops/range.h create mode 100644 mindspore/core/ops/rank.cc create mode 100644 mindspore/core/ops/rank.h create mode 100644 mindspore/core/ops/real_div.cc create mode 100644 mindspore/core/ops/real_div.h create mode 100644 mindspore/core/ops/reciprocal.cc create mode 100644 mindspore/core/ops/reciprocal.h create mode 100644 mindspore/core/ops/reduce.cc create mode 100644 mindspore/core/ops/reduce.h create mode 100644 mindspore/core/ops/reduce_all.cc create mode 100644 mindspore/core/ops/reduce_all.h create mode 100644 mindspore/core/ops/reduce_any.cc create mode 100644 mindspore/core/ops/reduce_any.h create mode 100644 mindspore/core/ops/reduce_asum.cc create mode 100644 mindspore/core/ops/reduce_asum.h rename mindspore/core/{c_ops/gather.cc => ops/reduce_max.cc} (82%) create mode 100644 mindspore/core/ops/reduce_max.h create mode 100644 mindspore/core/ops/reduce_mean.cc create mode 100644 mindspore/core/ops/reduce_mean.h rename mindspore/core/{c_ops/floor.cc => ops/reduce_min.cc} (85%) create mode 100644 mindspore/core/ops/reduce_min.h create mode 100644 mindspore/core/ops/reduce_prod.cc create mode 100644 mindspore/core/ops/reduce_prod.h rename mindspore/core/{c_ops/logical_and.cc => ops/reduce_sum.cc} (82%) create mode 100644 mindspore/core/ops/reduce_sum.h create mode 100644 mindspore/core/ops/reduce_sum_square.cc create mode 100644 mindspore/core/ops/reduce_sum_square.h rename mindspore/core/{c_ops/custom_normalize.cc => ops/relu.cc} (88%) create mode 100644 mindspore/core/ops/relu.h rename mindspore/core/{c_ops => ops}/relu6.cc (89%) create mode 100644 mindspore/core/ops/relu6.h create mode 100644 mindspore/core/ops/reshape.cc rename mindspore/core/{c_ops => ops}/reshape.h (87%) create mode 100644 mindspore/core/ops/resize.cc create mode 100644 mindspore/core/ops/resize.h create mode 100644 mindspore/core/ops/resize_bilinear.cc create mode 100644 mindspore/core/ops/resize_bilinear.h create mode 100644 mindspore/core/ops/resize_nearest_neighbor.cc create mode 100644 mindspore/core/ops/resize_nearest_neighbor.h rename mindspore/core/{c_ops/fill.cc => ops/return.cc} (82%) create mode 100644 mindspore/core/ops/return.h create mode 100644 mindspore/core/ops/reverse_sequence.cc create mode 100644 mindspore/core/ops/reverse_sequence.h create mode 100644 mindspore/core/ops/reverse_v2.cc create mode 100644 mindspore/core/ops/reverse_v2.h create mode 100644 mindspore/core/ops/rfft.cc create mode 100644 mindspore/core/ops/rfft.h create mode 100644 mindspore/core/ops/roi_pooling.cc create mode 100644 mindspore/core/ops/roi_pooling.h create mode 100644 mindspore/core/ops/round.cc create mode 100644 mindspore/core/ops/round.h create mode 100644 mindspore/core/ops/rsqrt.cc create mode 100644 mindspore/core/ops/rsqrt.h create mode 100644 mindspore/core/ops/scale.cc create mode 100644 mindspore/core/ops/scale.h create mode 100644 mindspore/core/ops/scatter_nd.cc create mode 100644 mindspore/core/ops/scatter_nd.h rename mindspore/core/{c_ops/atan.cc => ops/select.cc} (86%) create mode 100644 mindspore/core/ops/select.h create mode 100644 mindspore/core/ops/sgd.cc create mode 100644 mindspore/core/ops/sgd.h create mode 100644 mindspore/core/ops/shape.cc rename mindspore/core/{c_ops/relu6.h => ops/shape.h} (67%) rename mindspore/core/{c_ops/flatten.cc => ops/sigmoid.cc} (82%) create mode 100644 mindspore/core/ops/sigmoid.h create mode 100644 mindspore/core/ops/sigmoid_cross_entropy_with_logits.cc create mode 100644 mindspore/core/ops/sigmoid_cross_entropy_with_logits.h create mode 100644 mindspore/core/ops/sin.cc create mode 100644 mindspore/core/ops/sin.h create mode 100644 mindspore/core/ops/skip_gram.cc create mode 100644 mindspore/core/ops/skip_gram.h create mode 100644 mindspore/core/ops/slice.cc create mode 100644 mindspore/core/ops/slice.h create mode 100644 mindspore/core/ops/smooth_l1_loss.cc create mode 100644 mindspore/core/ops/smooth_l1_loss.h rename mindspore/core/{c_ops => ops}/softmax.cc (73%) rename mindspore/core/{c_ops => ops}/softmax.h (65%) create mode 100644 mindspore/core/ops/softmax_cross_entropy_with_logits.cc create mode 100644 mindspore/core/ops/softmax_cross_entropy_with_logits.h create mode 100644 mindspore/core/ops/space_to_batch.cc create mode 100644 mindspore/core/ops/space_to_batch.h create mode 100644 mindspore/core/ops/space_to_batch_nd.cc create mode 100644 mindspore/core/ops/space_to_batch_nd.h rename mindspore/core/{c_ops/depth_to_space.cc => ops/space_to_depth.cc} (62%) create mode 100644 mindspore/core/ops/space_to_depth.h create mode 100644 mindspore/core/ops/sparse_softmax_cross_entropy.cc create mode 100644 mindspore/core/ops/sparse_softmax_cross_entropy.h create mode 100644 mindspore/core/ops/sparse_to_dense.cc create mode 100644 mindspore/core/ops/sparse_to_dense.h create mode 100644 mindspore/core/ops/split.cc create mode 100644 mindspore/core/ops/split.h create mode 100644 mindspore/core/ops/sqrt.cc create mode 100644 mindspore/core/ops/sqrt.h create mode 100644 mindspore/core/ops/square.cc create mode 100644 mindspore/core/ops/square.h create mode 100644 mindspore/core/ops/squared_difference.cc create mode 100644 mindspore/core/ops/squared_difference.h rename mindspore/core/{c_ops => ops}/squeeze.cc (91%) rename mindspore/core/{c_ops => ops}/squeeze.h (87%) create mode 100644 mindspore/core/ops/stack.cc create mode 100644 mindspore/core/ops/stack.h create mode 100644 mindspore/core/ops/strided_slice.cc create mode 100644 mindspore/core/ops/strided_slice.h create mode 100644 mindspore/core/ops/sub.cc create mode 100644 mindspore/core/ops/sub.h create mode 100644 mindspore/core/ops/switch.cc create mode 100644 mindspore/core/ops/switch.h create mode 100644 mindspore/core/ops/tan.cc create mode 100644 mindspore/core/ops/tan.h rename mindspore/core/{c_ops/expand_dims.cc => ops/tanh.cc} (83%) rename mindspore/core/{c_ops/asin.h => ops/tanh.h} (69%) create mode 100644 mindspore/core/ops/tensor_list_from_tensor.cc create mode 100644 mindspore/core/ops/tensor_list_from_tensor.h create mode 100644 mindspore/core/ops/tensor_list_get_item.cc create mode 100644 mindspore/core/ops/tensor_list_get_item.h create mode 100644 mindspore/core/ops/tensor_list_reserve.cc create mode 100644 mindspore/core/ops/tensor_list_reserve.h create mode 100644 mindspore/core/ops/tensor_list_set_item.cc create mode 100644 mindspore/core/ops/tensor_list_set_item.h create mode 100644 mindspore/core/ops/tensor_list_stack.cc create mode 100644 mindspore/core/ops/tensor_list_stack.h create mode 100644 mindspore/core/ops/tile.cc create mode 100644 mindspore/core/ops/tile.h rename mindspore/core/{c_ops/binary_cross_entropy.cc => ops/to_format.cc} (55%) create mode 100644 mindspore/core/ops/to_format.h create mode 100644 mindspore/core/ops/topk.cc create mode 100644 mindspore/core/ops/topk.h create mode 100644 mindspore/core/ops/transpose.cc create mode 100644 mindspore/core/ops/transpose.h create mode 100644 mindspore/core/ops/tuple_get_item.cc create mode 100644 mindspore/core/ops/tuple_get_item.h rename mindspore/core/{c_ops/assign_add.cc => ops/unique.cc} (86%) create mode 100644 mindspore/core/ops/unique.h create mode 100644 mindspore/core/ops/unpack.cc create mode 100644 mindspore/core/ops/unpack.h create mode 100644 mindspore/core/ops/unsorted_segment_sum.cc create mode 100644 mindspore/core/ops/unsorted_segment_sum.h create mode 100644 mindspore/core/ops/unsqueeze.cc create mode 100644 mindspore/core/ops/unsqueeze.h create mode 100644 mindspore/core/ops/unstack.cc create mode 100644 mindspore/core/ops/unstack.h create mode 100644 mindspore/core/ops/where.cc create mode 100644 mindspore/core/ops/where.h create mode 100644 mindspore/core/ops/while.cc create mode 100644 mindspore/core/ops/while.h create mode 100644 mindspore/core/ops/zeros_like.cc create mode 100644 mindspore/core/ops/zeros_like.h create mode 100644 tests/ut/cpp/ops/test_ops_add.cc create mode 100644 tests/ut/cpp/ops/test_ops_addn.cc create mode 100644 tests/ut/cpp/ops/test_ops_argmax.cc create mode 100644 tests/ut/cpp/ops/test_ops_assert.cc create mode 100644 tests/ut/cpp/ops/test_ops_audio_spectrogram.cc create mode 100644 tests/ut/cpp/ops/test_ops_avg_pool_grad.cc create mode 100644 tests/ut/cpp/ops/test_ops_batch_norm.cc create mode 100644 tests/ut/cpp/ops/test_ops_batch_norm_fold.cc create mode 100644 tests/ut/cpp/ops/test_ops_batch_norm_grad.cc create mode 100644 tests/ut/cpp/ops/test_ops_batchtospace.cc create mode 100644 tests/ut/cpp/ops/test_ops_binary_cross_entropy_grad.cc create mode 100644 tests/ut/cpp/ops/test_ops_ceil.cc create mode 100644 tests/ut/cpp/ops/test_ops_concat.cc create mode 100644 tests/ut/cpp/ops/test_ops_constant.cc rename tests/ut/cpp/{c_ops/test_c_ops_conv2d.cc => ops/test_ops_conv2d.cc} (73%) create mode 100644 tests/ut/cpp/ops/test_ops_cos.cc create mode 100644 tests/ut/cpp/ops/test_ops_crop.cc create mode 100644 tests/ut/cpp/ops/test_ops_custom_predict.cc create mode 100644 tests/ut/cpp/ops/test_ops_depthtospace.cc create mode 100644 tests/ut/cpp/ops/test_ops_detection_post_process.cc create mode 100644 tests/ut/cpp/ops/test_ops_div.cc create mode 100644 tests/ut/cpp/ops/test_ops_dropout_grad.cc create mode 100644 tests/ut/cpp/ops/test_ops_elu.cc create mode 100644 tests/ut/cpp/ops/test_ops_embeddinglookup.cc create mode 100644 tests/ut/cpp/ops/test_ops_equal.cc create mode 100644 tests/ut/cpp/ops/test_ops_exp.cc create mode 100644 tests/ut/cpp/ops/test_ops_fake_quant_with_min_max_vars_per_channel.cc create mode 100644 tests/ut/cpp/ops/test_ops_fftimag.cc create mode 100644 tests/ut/cpp/ops/test_ops_fftreal.cc create mode 100644 tests/ut/cpp/ops/test_ops_fill.cc create mode 100644 tests/ut/cpp/ops/test_ops_flatten.cc create mode 100644 tests/ut/cpp/ops/test_ops_flattengrad.cc create mode 100644 tests/ut/cpp/ops/test_ops_floor.cc create mode 100644 tests/ut/cpp/ops/test_ops_full_connection.cc create mode 100644 tests/ut/cpp/ops/test_ops_gather.cc create mode 100644 tests/ut/cpp/ops/test_ops_hashtable_lookup.cc create mode 100644 tests/ut/cpp/ops/test_ops_l2normalize.cc create mode 100644 tests/ut/cpp/ops/test_ops_leakyrelu.cc create mode 100644 tests/ut/cpp/ops/test_ops_less.cc create mode 100644 tests/ut/cpp/ops/test_ops_localresponsenormalization.cc create mode 100644 tests/ut/cpp/ops/test_ops_log.cc create mode 100644 tests/ut/cpp/ops/test_ops_logical_not.cc create mode 100644 tests/ut/cpp/ops/test_ops_lsh_projection.cc create mode 100644 tests/ut/cpp/ops/test_ops_matrix_diag.cc create mode 100644 tests/ut/cpp/ops/test_ops_max_pool_grad.cc create mode 100644 tests/ut/cpp/ops/test_ops_maximum.cc create mode 100644 tests/ut/cpp/ops/test_ops_merge.cc create mode 100644 tests/ut/cpp/ops/test_ops_mfcc.cc create mode 100644 tests/ut/cpp/ops/test_ops_minimum.cc create mode 100644 tests/ut/cpp/ops/test_ops_mul.cc create mode 100644 tests/ut/cpp/ops/test_ops_neg.cc create mode 100644 tests/ut/cpp/ops/test_ops_non_max_suppression.cc create mode 100644 tests/ut/cpp/ops/test_ops_one_hot.cc create mode 100644 tests/ut/cpp/ops/test_ops_pack.cc create mode 100644 tests/ut/cpp/ops/test_ops_pooling_grad.cc create mode 100644 tests/ut/cpp/ops/test_ops_pow.cc create mode 100644 tests/ut/cpp/ops/test_ops_prelu.cc create mode 100644 tests/ut/cpp/ops/test_ops_prior_box.cc create mode 100644 tests/ut/cpp/ops/test_ops_quantd_type_cast.cc create mode 100644 tests/ut/cpp/ops/test_ops_range.cc create mode 100644 tests/ut/cpp/ops/test_ops_rank.cc create mode 100644 tests/ut/cpp/ops/test_ops_realdiv.cc create mode 100644 tests/ut/cpp/ops/test_ops_reshape.cc create mode 100644 tests/ut/cpp/ops/test_ops_reverse_v2.cc create mode 100644 tests/ut/cpp/ops/test_ops_rfft.cc create mode 100644 tests/ut/cpp/ops/test_ops_round.cc create mode 100644 tests/ut/cpp/ops/test_ops_sin.cc create mode 100644 tests/ut/cpp/ops/test_ops_slice_fusion.cc create mode 100644 tests/ut/cpp/ops/test_ops_softmax.cc create mode 100644 tests/ut/cpp/ops/test_ops_sqrt.cc create mode 100644 tests/ut/cpp/ops/test_ops_square.cc create mode 100644 tests/ut/cpp/ops/test_ops_squareddifference.cc create mode 100644 tests/ut/cpp/ops/test_ops_strided_slice.cc create mode 100644 tests/ut/cpp/ops/test_ops_sub.cc create mode 100644 tests/ut/cpp/ops/test_ops_topk.cc create mode 100644 tests/ut/cpp/ops/test_ops_unpack.cc create mode 100644 tests/ut/cpp/ops/test_ops_unsorted_segment_sum.cc create mode 100644 tests/ut/cpp/ops/test_ops_unsqueeze.cc create mode 100644 tests/ut/cpp/ops/test_ops_unstack.cc create mode 100644 tests/ut/cpp/ops/test_ops_where.cc diff --git a/mindspore/ccsrc/CMakeLists.txt b/mindspore/ccsrc/CMakeLists.txt index cc062c9c097..a45720dcf7d 100644 --- a/mindspore/ccsrc/CMakeLists.txt +++ b/mindspore/ccsrc/CMakeLists.txt @@ -333,14 +333,13 @@ if(CMAKE_SYSTEM_NAME MATCHES "Windows") target_link_libraries(mindspore mindspore::pybind11_module) target_link_libraries(mindspore mindspore_gvar) target_link_libraries(_c_expression PRIVATE -Wl,--whole-archive mindspore -Wl,--no-whole-archive) -elseif(CMAKE_SYSTEM_NAME MATCHES "Darwin") +elseif (CMAKE_SYSTEM_NAME MATCHES "Darwin") target_link_libraries(mindspore mindspore::pybind11_module) target_link_libraries(mindspore mindspore_gvar) target_link_libraries(_c_expression PRIVATE -Wl,-force_load mindspore -Wl,-noall_load) -else() - if(ENABLE_CPU AND (ENABLE_D OR ENABLE_GPU)) - target_link_libraries(mindspore mindspore::pslite proto_input mindspore::protobuf - mindspore::event mindspore::event_pthreads ${zeromq_DIRPATH}/zmq_install/lib/libzmq.a) +else () + if (ENABLE_CPU AND (ENABLE_D OR ENABLE_GPU)) + target_link_libraries(mindspore mindspore::pslite proto_input mindspore::protobuf mindspore::event mindspore::event_pthreads ${zeromq_DIRPATH}/zmq_install/lib/libzmq.a) target_link_libraries(mindspore -Wl,--no-as-needed mindspore::event_core ps_cache) if(${ENABLE_IBVERBS} STREQUAL "ON") target_link_libraries(mindspore ibverbs rdmacm) diff --git a/mindspore/ccsrc/backend/optimizer/graph_kernel/graph_kernel_helper.cc b/mindspore/ccsrc/backend/optimizer/graph_kernel/graph_kernel_helper.cc index 6e2a53a9544..8d57e5bea57 100644 --- a/mindspore/ccsrc/backend/optimizer/graph_kernel/graph_kernel_helper.cc +++ b/mindspore/ccsrc/backend/optimizer/graph_kernel/graph_kernel_helper.cc @@ -717,7 +717,7 @@ std::unordered_set GetExpandOps() { prim::kPrimMinimumGrad, prim::kPrimGkDropout, prim::kPrimDropoutGrad, - prim::kPrimSoftMax, + prim::kPrimSoftmax, prim::kPrimLayerNorm, prim::kPrimLayerNormGrad, #endif diff --git a/mindspore/ccsrc/backend/session/session_basic.cc b/mindspore/ccsrc/backend/session/session_basic.cc index 8be585634b5..a5b7d35581a 100644 --- a/mindspore/ccsrc/backend/session/session_basic.cc +++ b/mindspore/ccsrc/backend/session/session_basic.cc @@ -20,7 +20,7 @@ #include #include -#include "c_ops/primitive_c.h" +#include "ops/primitive_c.h" #include "ir/manager.h" #include "abstract/utils.h" #include "backend/kernel_compiler/common_utils.h" @@ -1750,8 +1750,8 @@ void SessionBasic::RunInfer(NotNull func_graph, const std::vector< input_abstracts.emplace_back(abstract); } auto prim = AnfAlgo::GetCNodePrimitive(node); - if (prim->isa()) { - auto prim_c = prim->cast>(); + if (prim->isa()) { + auto prim_c = prim->cast>(); MS_EXCEPTION_IF_NULL(prim_c); auto abstract = prim_c->Infer(input_abstracts); node->set_abstract(abstract); diff --git a/mindspore/core/CMakeLists.txt b/mindspore/core/CMakeLists.txt index 8a95bc7d5b6..1d3ecdf7179 100644 --- a/mindspore/core/CMakeLists.txt +++ b/mindspore/core/CMakeLists.txt @@ -8,16 +8,16 @@ endif() message("************ build core ***************") file(GLOB_RECURSE CORE_SRC_LIST RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} - "abstract/*.cc" - "base/*.cc" - "c_ops/*.cc" - "ir/*.cc" - "utils/*.cc" - "load_mindir/*.cc" -) + "abstract/*.cc" + "base/*.cc" + "ops/*.cc" + "ir/*.cc" + "utils/*.cc" + "load_mindir/*.cc" + ) if(CMAKE_SYSTEM_NAME MATCHES "Windows") - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-attributes -DHAVE_SNPRINTF") - add_compile_definitions(BUILDING_DLL) + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-attributes -DHAVE_SNPRINTF") + add_compile_definitions(BUILDING_DLL) elseif(CMAKE_SYSTEM_NAME MATCHES "Darwin") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} \ -Wuser-defined-warnings -Winconsistent-missing-override -Wno-delete-non-abstract-non-virtual-dtor") @@ -28,5 +28,5 @@ add_library(mindspore_core STATIC ${CORE_SRC_LIST}) target_link_libraries(mindspore_core PRIVATE mindspore_gvar) if(USE_GLOG) - target_link_libraries(mindspore_core PRIVATE mindspore::glog) + target_link_libraries(mindspore_core PRIVATE mindspore::glog) endif() diff --git a/mindspore/core/base/core_ops.h b/mindspore/core/base/core_ops.h index 36dd4597ca1..34ea77ddd83 100644 --- a/mindspore/core/base/core_ops.h +++ b/mindspore/core/base/core_ops.h @@ -91,7 +91,9 @@ inline const PrimitivePtr kPrimLabelSwitch = std::make_shared("LabelS inline const PrimitivePtr kPrimLabelSet = std::make_shared("LabelSet"); // Arrays +inline const PrimitivePtr kPrimBroadcastTo = std::make_shared("BroadcastTo"); inline const PrimitivePtr kPrimScalarToArray = std::make_shared("scalar_to_array"); +inline const PrimitivePtr kPrimTopK = std::make_shared("TopK"); inline const PrimitivePtr kPrimArrayToScalar = std::make_shared("array_to_scalar"); inline const PrimitivePtr kPrimBroadcastShape = std::make_shared("broadcast_shape"); inline const PrimitivePtr kPrimArrayMap = std::make_shared("array_map"); @@ -99,17 +101,25 @@ inline const PrimitivePtr kPrimArrayReduce = std::make_shared("array_ inline const PrimitivePtr kPrimCast = std::make_shared("Cast"); inline const PrimitivePtr kPrimConcat = std::make_shared("Concat"); inline const PrimitivePtr kPrimSqueeze = std::make_shared("Squeeze"); +inline const PrimitivePtr kPrimUnsqueeze = std::make_shared("Unsqueeze"); inline const PrimitivePtr kPrimTranspose = std::make_shared("Transpose"); +inline const PrimitivePtr kPrimGatherV2 = std::make_shared("GatherV2"); inline const PrimitivePtr kPrimGatherD = std::make_shared("GatherD"); -inline const PrimitivePtr kPrimGather = std::make_shared(kGather); +inline const PrimitivePtr kPrimGather = std::make_shared("Gather"); +inline const PrimitivePtr kPrimGatherND = std::make_shared("GatherND"); inline const PrimitivePtr kPrimSparseGatherV2 = std::make_shared("SparseGatherV2"); +inline const PrimitivePtr kPrimSparseToDense = std::make_shared("SparseToDense"); inline const PrimitivePtr kPrimShape = std::make_shared("Shape"); +inline const PrimitivePtr kPrimStridedSlice = std::make_shared("StridedSlice"); inline const PrimitivePtr kPrimDynamicShape = std::make_shared("DynamicShape"); inline const PrimitivePtr kPrimEmbeddingLookup = std::make_shared("EmbeddingLookup"); inline const PrimitivePtr kPrimEmbeddingLookupCommGrad = std::make_shared("EmbeddingLookupCommGrad"); inline const PrimitivePtr kPrimSize = std::make_shared("Size"); inline const PrimitivePtr kPrimArgMax = std::make_shared("Argmax"); +inline const PrimitivePtr kPrimArgMin = std::make_shared("Argmin"); inline const PrimitivePtr kPrimPack = std::make_shared("Pack"); +inline const PrimitivePtr kPrimUnpack = std::make_shared("Unpack"); +inline const PrimitivePtr kPrimUnstack = std::make_shared("Unstack"); inline const PrimitivePtr kPrimUnsortedSegmentMax = std::make_shared("UnsortedSegmentMax"); inline const PrimitivePtr kPrimUnsortedSegmentSum = std::make_shared("UnsortedSegmentSum"); inline const PrimitivePtr kPrimUnsortedSegmentMin = std::make_shared("UnsortedSegmentMin"); @@ -123,6 +133,7 @@ inline const PrimitivePtr kPrimCacheSwapTable = std::make_shared("Cac inline const PrimitivePtr kPrimDynamicAssign = std::make_shared("DynamicAssign"); inline const PrimitivePtr kPrimPadAndShift = std::make_shared("PadAndShift"); inline const PrimitivePtr kPrimSlice = std::make_shared("Slice"); +inline const PrimitivePtr kPrimSliceFusion = std::make_shared("SliceFusion"); inline const PrimitivePtr kPrimTile = std::make_shared("Tile"); inline const PrimitivePtr kPrimAddN = std::make_shared("AddN"); inline const PrimitivePtr kPrimAccumulateNV2 = std::make_shared("AccumulateNV2"); @@ -145,16 +156,36 @@ inline const PrimitivePtr kPrimSequenceMask = std::make_shared("Seque inline const PrimitivePtr kPrimRange = std::make_shared("Range"); inline const PrimitivePtr kPrimSpaceToBatchND = std::make_shared("SpaceToBatchND"); inline const PrimitivePtr kPrimBatchToSpaceND = std::make_shared("BatchToSpaceND"); +inline const PrimitivePtr kPrimDepthToSpace = std::make_shared("DepthToSpace"); +inline const PrimitivePtr kPrimBatchToSpace = std::make_shared("BatchToSpace"); +inline const PrimitivePtr kPrimSpaceToBatch = std::make_shared("SpaceToBatch"); +inline const PrimitivePtr kPrimScatterNd = std::make_shared("ScatterNd"); +inline const PrimitivePtr kPrimConstantOfShape = std::make_shared("ConstantOfShape"); +inline const PrimitivePtr kPrimSquaredDifference = std::make_shared("SquaredDifference"); +inline const PrimitivePtr kPrimReverseV2 = std::make_shared("ReverseV2"); +inline const PrimitivePtr kPrimReverseSequence = std::make_shared("ReverseSequence"); +inline const PrimitivePtr kPrimRank = std::make_shared("Rank"); +inline const PrimitivePtr kPrimResizeBilinear = std::make_shared("ResizeBilinear"); // NN +inline const PrimitivePtr kPrimAdam = std::make_shared("Adam"); +inline const PrimitivePtr kPrimAudioSpectrogram = std::make_shared("AudioSpectrogram"); inline const PrimitivePtr kPrimFlatten = std::make_shared("Flatten"); -inline const PrimitivePtr kPrimSoftMax = std::make_shared("Softmax"); +inline const PrimitivePtr kPrimCrop = std::make_shared("Crop"); +inline const PrimitivePtr kPrimFlattenGrad = std::make_shared("FlattenGrad"); +inline const PrimitivePtr kPrimSoftmax = std::make_shared("Softmax"); +inline const PrimitivePtr kPrimSparseSoftmaxCrossEntropy = std::make_shared("SparseSoftmaxCrossEntropy"); inline const PrimitivePtr kPrimLogSoftmax = std::make_shared("LogSoftmax"); inline const PrimitivePtr kPrimLogSoftmaxGrad = std::make_shared("LogSoftmaxGrad"); +inline const PrimitivePtr kPrimLstm = std::make_shared("Lstm"); +inline const PrimitivePtr kPrimTan = std::make_shared("Tan"); +inline const PrimitivePtr kPrimAtan = std::make_shared("Atan"); +inline const PrimitivePtr kPrimAsin = std::make_shared("Asin"); inline const PrimitivePtr kPrimTanh = std::make_shared("Tanh"); inline const PrimitivePtr kPrimTanhGrad = std::make_shared("TanhGrad"); inline const PrimitivePtr kPrimPooling = std::make_shared("Pooling"); inline const PrimitivePtr kPrimPoolingGrad = std::make_shared("PoolingGrad"); +inline const PrimitivePtr kPrimROIPooling = std::make_shared("ROIPooling"); inline const PrimitivePtr kPrimMaxPool = std::make_shared("MaxPool"); inline const PrimitivePtr kPrimMaxPoolGrad = std::make_shared("MaxPoolGrad"); inline const PrimitivePtr kPrimMaxPoolWithArgmax = std::make_shared("MaxPoolWithArgmax"); @@ -168,6 +199,9 @@ inline const PrimitivePtr kPrimFusedSparseAdam = std::make_shared("Fu inline const PrimitivePtr kPrimFusedBatchNorm = std::make_shared("FusedBatchNorm"); inline const PrimitivePtr kPrimFusedBatchNormEx = std::make_shared("FusedBatchNormEx"); inline const PrimitivePtr kPrimConv2D = std::make_shared("Conv2D"); +inline const PrimitivePtr kPrimFullConnection = std::make_shared("FullConnection"); +inline const PrimitivePtr kPrimConv2DTranspose = std::make_shared("Conv2DTranspose"); +inline const PrimitivePtr kPrimGroupConv2DGradInput = std::make_shared("GroupConv2DGradInput"); inline const PrimitivePtr kPrimFusedBatchNormGrad = std::make_shared("FusedBatchNormGrad"); inline const PrimitivePtr kPrimFusedBatchNormGradEx = std::make_shared("FusedBatchNormGradEx"); inline const PrimitivePtr kPrimBatchNorm = std::make_shared("BatchNorm"); @@ -179,21 +213,34 @@ inline const PrimitivePtr kPrimConv2DBackpropInput = std::make_shared inline const PrimitivePtr kPrimConv2DBackpropFilter = std::make_shared("Conv2DBackpropFilter"); inline const PrimitivePtr kPrimConv3DBackpropInput = std::make_shared("Conv3DBackpropInput"); inline const PrimitivePtr kPrimConv3DBackpropFilter = std::make_shared("Conv3DBackpropFilter"); +inline const PrimitivePtr kPrimCustomNormalize = std::make_shared("CustomNormalize"); inline const PrimitivePtr kPrimDepthwiseConv2dNative = std::make_shared("DepthwiseConv2dNative"); inline const PrimitivePtr kPrimCTCGreedyDecoder = std::make_shared("CTCGreedyDecoder"); inline const PrimitivePtr kPrimDepthwiseConv2dNativeBackpropFilter = std::make_shared("DepthwiseConv2dNativeBackpropFilter"); inline const PrimitivePtr kPrimDepthwiseConv2dNativeBackpropInput = std::make_shared("DepthwiseConv2dNativeBackpropInput"); +inline const PrimitivePtr kPrimDetectionPostProcess = std::make_shared("DetectionPostProcess"); inline const PrimitivePtr kPrimBiasAdd = std::make_shared("BiasAdd"); +inline const PrimitivePtr kPrimBiasGrad = std::make_shared("BiasGrad"); inline const PrimitivePtr kPrimBiasAddGrad = std::make_shared("BiasAddGrad"); +inline const PrimitivePtr kPrimBiasSubGrad = std::make_shared("BiasSubGrad"); +inline const PrimitivePtr kPrimBinaryCrossEntropy = std::make_shared("BinaryCrossEntropy"); +inline const PrimitivePtr kPrimBinaryCrossEntropyGrad = std::make_shared("BinaryCrossEntropyGrad"); +inline const PrimitivePtr kPrimSmoothL1Loss = std::make_shared("SmoothL1Loss"); +inline const PrimitivePtr kPrimSmoothL1LossGrad = std::make_shared("SmoothL1LossGrad"); inline const PrimitivePtr kPrimSoftmaxCrossEntropyWithLogits = std::make_shared("SoftmaxCrossEntropyWithLogits"); +inline const PrimitivePtr kPrimSigmoidCrossEntropyWithLogits = + std::make_shared("SigmoidCrossEntropyWithLogits"); +inline const PrimitivePtr kPrimSigmoidCrossEntropyWithLogitsGrad = + std::make_shared("SigmoidCrossEntropyWithLogitsGrad"); inline const PrimitivePtr kPrimSparseSoftmaxCrossEntropyWithLogits = std::make_shared("SparseSoftmaxCrossEntropyWithLogits"); inline const PrimitivePtr kPrimMomentum = std::make_shared("Momentum"); inline const PrimitivePtr kPrimApplyMomentum = std::make_shared("ApplyMomentum"); inline const PrimitivePtr kPrimLayerNorm = std::make_shared("LayerNorm"); +inline const PrimitivePtr kPrimLrn = std::make_shared("Lrn"); inline const PrimitivePtr kPrimLayerNormGrad = std::make_shared("LayerNormGrad"); inline const PrimitivePtr kPrimLayerNormXBackprop = std::make_shared("LayerNormXBackprop"); inline const PrimitivePtr kPrimLayerNormBetaGammaBackprop = std::make_shared("LayerNormBetaGammaBackprop"); @@ -204,18 +251,22 @@ inline const PrimitivePtr kPrimDropout = std::make_shared("Dropout"); inline const PrimitivePtr kPrimUniformReal = std::make_shared("UniformReal"); inline const PrimitivePtr kPrimCudnnUniformReal = std::make_shared("CudnnUniformReal"); inline const PrimitivePtr kPrimOneHot = std::make_shared("OneHot"); +inline const PrimitivePtr kPrimGeLU = std::make_shared("Gelu"); inline const PrimitivePtr kPrimGelu = std::make_shared("Gelu"); inline const PrimitivePtr kPrimGeluGrad = std::make_shared("GeluGrad"); inline const PrimitivePtr kPrimFastGelu = std::make_shared("FastGelu"); inline const PrimitivePtr kPrimFastGeluGrad = std::make_shared("FastGeluGrad"); inline const PrimitivePtr kPrimRelu = std::make_shared("ReLU"); +inline const PrimitivePtr kPrimElu = std::make_shared("Elu"); inline const PrimitivePtr kPrimRelu6 = std::make_shared("ReLU6"); inline const PrimitivePtr kPrimReluV2 = std::make_shared("ReLUV2"); +inline const PrimitivePtr kPrimPRelu = std::make_shared("PReLU"); inline const PrimitivePtr kPrimZerosLike = std::make_shared("ZerosLike"); inline const PrimitivePtr kPrimOnesLike = std::make_shared("OnesLike"); inline const PrimitivePtr kPrimBpropCut = std::make_shared("bprop_cut"); inline const PrimitivePtr kPrimFakeQuantPerLayer = std::make_shared("FakeQuantPerLayer"); inline const PrimitivePtr kPrimFakeQuantPerChannel = std::make_shared("FakeQuantPerChannel"); +inline const PrimitivePtr kPrimFakeQuantWithMinMaxVars = std::make_shared("FakeQuantWithMinMaxVars"); inline const PrimitivePtr kPrimApplyRMSProp = std::make_shared("ApplyRMSProp"); inline const PrimitivePtr kPrimSparseApplyFtrl = std::make_shared("SparseApplyFtrl"); inline const PrimitivePtr kPrimSparseApplyProximalAdagrad = std::make_shared("SparseApplyProximalAdagrad"); @@ -224,6 +275,8 @@ inline const PrimitivePtr kPrimFusedAdamWeightDecay = std::make_shared("SGD"); inline const PrimitivePtr kPrimClipByNormNoDivSum = std::make_shared("ClipByNormNoDivSum"); inline const PrimitivePtr kPrimTensorMove = std::make_shared("TensorMove"); +inline const PrimitivePtr kPrimL2Normalize = std::make_shared("L2Normalize"); +inline const PrimitivePtr kPrimCustomExtractFeatures = std::make_shared("CustomExtractFeatures"); // Comm ops inline const PrimitivePtr kPrimMirror = std::make_shared("_MirrorOperator"); @@ -239,6 +292,12 @@ inline const PrimitivePtr kPrimAllGather = std::make_shared("AllGathe inline const PrimitivePtr kPrimReduceScatter = std::make_shared("ReduceScatter"); inline const PrimitivePtr kPrimMemCpyAsync = std::make_shared("memcpy_async"); inline const PrimitivePtr kPrimFill = std::make_shared("Fill"); +// Quant ops +inline const PrimitivePtr kPrimBatchNormFold = std::make_shared("BatchNormFold"); +inline const PrimitivePtr kPrimFakeQuantWithMinMaxVarsPerChannel = + std::make_shared("FakeQuantWithMinMaxVarsPerChannel"); +// Control ops +inline const PrimitivePtr kPrimMerge = std::make_shared("Merge"); // RowTensor inline const PrimitivePtr kPrimMakeRowTensor = std::make_shared("MakeRowTensor"); inline const PrimitivePtr kPrimRowTensorGetValues = std::make_shared("RowTensorGetValues"); @@ -251,12 +310,22 @@ inline const PrimitivePtr kPrimSparseTensorGetValues = std::make_shared("SparseTensorGetIndices"); inline const PrimitivePtr kPrimSparseTensorGetDenseShape = std::make_shared("SparseTensorGetDenseShape"); +// TensorList +inline const PrimitivePtr kPrimTensorListFromTensor = std::make_shared("TensorListFromTensor"); +inline const PrimitivePtr kPrimTensorListReserve = std::make_shared("TensorListReserve"); +inline const PrimitivePtr kPrimTensorListStack = std::make_shared("TensorListStack"); +inline const PrimitivePtr kPrimTensorListSetItem = std::make_shared("TensorListSetItem"); + // Maths +inline const PrimitivePtr kPrimCeil = std::make_shared("Ceil"); +inline const PrimitivePtr kPrimTensorAdd = std::make_shared("TensorAdd"); inline const PrimitivePtr kPrimAdd = std::make_shared("Add"); inline const PrimitivePtr kPrimMatMul = std::make_shared("MatMul"); +inline const PrimitivePtr kPrimMatrixDiag = std::make_shared("MatrixDiag"); inline const PrimitivePtr kPrimBatchMatMul = std::make_shared("BatchMatMul"); inline const PrimitivePtr kPrimMaximumGrad = std::make_shared("MaximumGrad"); inline const PrimitivePtr kPrimMinimumGrad = std::make_shared("MinimumGrad"); +inline const PrimitivePtr kPrimReduce = std::make_shared("Reduce"); inline const PrimitivePtr kPrimReduceMean = std::make_shared("ReduceMean"); inline const PrimitivePtr kPrimReduceSum = std::make_shared("ReduceSum"); inline const PrimitivePtr kPrimReduceAll = std::make_shared("ReduceAll"); @@ -264,6 +333,8 @@ inline const PrimitivePtr kPrimReduceAny = std::make_shared("ReduceAn inline const PrimitivePtr kPrimReduceMax = std::make_shared("ReduceMax"); inline const PrimitivePtr kPrimReduceMin = std::make_shared("ReduceMin"); inline const PrimitivePtr kPrimNeg = std::make_shared("Neg"); +inline const PrimitivePtr kPrimSin = std::make_shared("Sin"); +inline const PrimitivePtr kPrimCos = std::make_shared("Cos"); inline const PrimitivePtr kPrimSub = std::make_shared("Sub"); inline const PrimitivePtr kPrimMul = std::make_shared("Mul"); inline const PrimitivePtr kPrimDiv = std::make_shared("Div"); @@ -279,6 +350,7 @@ inline const PrimitivePtr kPrimSubscalar = std::make_shared("Subscala inline const PrimitivePtr kPrimInplaceAdd = std::make_shared("InplaceAdd"); inline const PrimitivePtr kPrimInplaceSub = std::make_shared("InplaceSub"); inline const PrimitivePtr kPrimPow = std::make_shared("Pow"); +inline const PrimitivePtr kPrimPower = std::make_shared("Power"); inline const PrimitivePtr kPrimRealDiv = std::make_shared("RealDiv"); inline const PrimitivePtr kPrimFloorDiv = std::make_shared("FloorDiv"); inline const PrimitivePtr kPrimSqrt = std::make_shared("Sqrt"); @@ -292,12 +364,13 @@ inline const PrimitivePtr kPrimLog = std::make_shared("Log"); inline const PrimitivePtr kPrimRsqrt = std::make_shared("Rsqrt"); inline const PrimitivePtr kPrimSplitV = std::make_shared("SplitV"); inline const PrimitivePtr kPrimLinSpace = std::make_shared("LinSpace"); +inline const PrimitivePtr kPrimNonMaxSuppression = std::make_shared("NonMaxSuppression"); inline const PrimitivePtr kPrimSign = std::make_shared("Sign"); -inline const PrimitivePtr kPrimSquaredDifference = std::make_shared("SquaredDifference"); -inline const PrimitivePtr kPrimAsin = std::make_shared("Asin"); inline const PrimitivePtr kPrimACos = std::make_shared("ACos"); inline const PrimitivePtr kPrimAsinGrad = std::make_shared("AsinGrad"); inline const PrimitivePtr kPrimACosGrad = std::make_shared("ACosGrad"); +inline const PrimitivePtr kPrimFloorMod = std::make_shared("FloorMod"); +inline const PrimitivePtr kPrimWhere = std::make_shared("Where"); // Statements inline const PrimitivePtr kPrimReturn = std::make_shared("return"); @@ -323,6 +396,7 @@ inline const PrimitivePtr kPrimGenerateShapeIndex = std::make_shared( inline const PrimitivePtr kPrimGenerateInverseIndex = std::make_shared("generate_inverse_index"); // Debug ops +inline const PrimitivePtr kPrimAssert = std::make_shared("Assert"); inline const PrimitivePtr kPrimScalarSummary = std::make_shared("ScalarSummary"); inline const PrimitivePtr kPrimImageSummary = std::make_shared("ImageSummary"); inline const PrimitivePtr kPrimTensorSummary = std::make_shared("TensorSummary"); @@ -349,6 +423,13 @@ inline const PrimitivePtr kPrimInDict = std::make_shared("in_dict"); inline const PrimitivePtr kPrimNotInDict = std::make_shared("not_in_dict"); inline const PrimitivePtr kPrimIsConsant = std::make_shared("is_constant"); inline const PrimitivePtr kPrimEquivFormat = std::make_shared("EquivFormat"); +inline const PrimitivePtr kPrimLshProjection = std::make_shared("LshProjection"); +inline const PrimitivePtr kPrimHashtableLookup = std::make_shared("HashtableLookup"); +inline const PrimitivePtr kPrimCustomPredict = std::make_shared("CustomPredict"); +inline const PrimitivePtr kPrimStack = std::make_shared("Stack"); +inline const PrimitivePtr kPrimPriorBox = std::make_shared("PriorBox"); +inline const PrimitivePtr kPrimQuantDTypeCast = std::make_shared("QuantDTypeCast"); +inline const PrimitivePtr kPrimWhile = std::make_shared("While"); // Structures inline const PrimitivePtr kPrimMakeList = std::make_shared("make_list"); @@ -371,7 +452,7 @@ inline const PrimitivePtr kPrimGetRefKey = std::make_shared("get_ref_ inline const PrimitivePtr kPrimMakeRef = std::make_shared("make_ref"); inline const PrimitivePtr kPrimGetRefValue = std::make_shared("get_ref_value"); -// Other primitive not used by backend but used in core; +// Other primitve not used by backend but used in core; inline const PrimitivePtr kPrimStateSetItem = std::make_shared("state_setitem"); inline const PrimitivePtr kPrimJ = std::make_shared("J"); @@ -382,6 +463,44 @@ inline const PrimitivePtr kPrimMakeDict = std::make_shared("make_dict // GraphKernel ops inline const PrimitivePtr kPrimInplaceAssign = std::make_shared("InplaceAssign"); +// Only used in lite +inline const PrimitivePtr kPrimLeakyRelu = std::make_shared("LeakyRelu"); +inline const PrimitivePtr kPrimConstant = std::make_shared("Constant"); +inline const PrimitivePtr kPrimLocalResponseNormalization = std::make_shared("LocalResponseNormalization"); +inline const PrimitivePtr kPrimFftReal = std::make_shared("FftReal"); +inline const PrimitivePtr kPrimMfcc = std::make_shared("Mfcc"); +inline const PrimitivePtr kPrimRfft = std::make_shared("Rfft"); +inline const PrimitivePtr kPrimFftImag = std::make_shared("FftImag"); +inline const PrimitivePtr kPrimSkipGram = std::make_shared("SkipGram"); +inline const PrimitivePtr kPrimConv2DFusion = std::make_shared("Conv2DFusion"); +inline const PrimitivePtr kPrimConv2dTransposeFusion = std::make_shared("Conv2dTransposeFusion"); +inline const PrimitivePtr kPrimDepthWiseConv2DFusion = std::make_shared("DepthWiseConv2DFusion"); +inline const PrimitivePtr kPrimAddFusion = std::make_shared("AddFusion"); +inline const PrimitivePtr kPrimScaleFusion = std::make_shared("ScaleFusion"); +inline const PrimitivePtr kPrimSubFusion = std::make_shared("SubFusion"); +inline const PrimitivePtr kPrimMulFusion = std::make_shared("MulFusion"); +inline const PrimitivePtr kPrimSigmoid = std::make_shared("Sigmoid"); +inline const PrimitivePtr kPrimClip = std::make_shared("Clip"); +inline const PrimitivePtr kPrimHardTanh = std::make_shared("HardTanh"); +inline const PrimitivePtr kPrimDepthWiseConv2DTransposeFusion = + std::make_shared("DepthWiseConv2DTransposeFusion"); +inline const PrimitivePtr kPrimArgMinFusion = std::make_shared("ArgMinFusion"); +inline const PrimitivePtr kPrimArgMaxFusion = std::make_shared("ArgMaxFusion"); +inline const PrimitivePtr kPrimSpaceToDepth = std::make_shared("SpaceToDepth"); +inline const PrimitivePtr kPrimPadFusion = std::make_shared("PadFusion"); +inline const PrimitivePtr kPrimPowFusion = std::make_shared("PowFusion"); +inline const PrimitivePtr kPrimResize = std::make_shared("Resize"); +inline const PrimitivePtr kPrimConv2dTranspose = std::make_shared("Conv2dTranspose"); +inline const PrimitivePtr kPrimArgMinWithValue = std::make_shared("ArgMinWithValue"); +inline const PrimitivePtr kPrimIf = std::make_shared("If"); +inline const PrimitivePtr kPrimAvgPoolFusion = std::make_shared("AvgPoolFusion"); +inline const PrimitivePtr kPrimMaxPoolFusion = std::make_shared("MaxPoolFusion"); +inline const PrimitivePtr kPrimActivation = std::make_shared("Activation"); +inline const PrimitivePtr kPrimTopKFusion = std::make_shared("TopKFusion"); +inline const PrimitivePtr kPrimTileFusion = std::make_shared("TileFusion"); +inline const PrimitivePtr kPrimReduceFusion = std::make_shared("ReduceFusion"); +inline const PrimitivePtr kPrimLayerNormFusion = std::make_shared("LayerNormFusion"); + class DoSignaturePrimitive : public Primitive { public: explicit DoSignaturePrimitive(const std::string &name, const ValuePtr &function) diff --git a/mindspore/core/c_ops/abs.cc b/mindspore/core/c_ops/abs.cc deleted file mode 100644 index df65c135ddd..00000000000 --- a/mindspore/core/c_ops/abs.cc +++ /dev/null @@ -1,19 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#include "c_ops/abs.h" -namespace mindspore { -REGISTER_PRIMITIVE_C(kNameAbs, Abs); -} // namespace mindspore diff --git a/mindspore/core/c_ops/apply_momentum.cc b/mindspore/core/c_ops/apply_momentum.cc deleted file mode 100644 index 5b5da739bec..00000000000 --- a/mindspore/core/c_ops/apply_momentum.cc +++ /dev/null @@ -1,51 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "c_ops/apply_momentum.h" -#include "c_ops/op_utils.h" -#include "utils/check_convert_utils.h" - -namespace mindspore { -void ApplyMomentum::Init(bool use_nesterov, bool use_locking, float gradient_scale) { - this->set_use_nesterov(use_nesterov); - this->set_use_locking(use_locking); - this->set_gradient_scale(gradient_scale); -} - -void ApplyMomentum::set_use_nesterov(bool use_nesterov) { this->AddAttr(kUseNesterov, MakeValue(use_nesterov)); } - -void ApplyMomentum::set_use_locking(bool use_locking) { this->AddAttr(kUseLocking, MakeValue(use_locking)); } - -void ApplyMomentum::set_gradient_scale(float gradient_scale) { - this->AddAttr(kGradientScale, MakeValue(gradient_scale)); -} - -bool ApplyMomentum::get_use_nesterov() const { - auto value_ptr = GetAttr(kUseNesterov); - return GetValue(value_ptr); -} - -bool ApplyMomentum::get_use_locking() const { - auto value_ptr = GetAttr(kUseLocking); - return GetValue(value_ptr); -} - -float ApplyMomentum::get_gradient_scale() { - auto value_ptr = GetAttr(kGradientScale); - return GetValue(value_ptr); -} -REGISTER_PRIMITIVE_C(kNameApplyMomentum, ApplyMomentum); -} // namespace mindspore diff --git a/mindspore/core/c_ops/audio_spectrogram.cc b/mindspore/core/c_ops/audio_spectrogram.cc deleted file mode 100644 index f9a30c58c3b..00000000000 --- a/mindspore/core/c_ops/audio_spectrogram.cc +++ /dev/null @@ -1,54 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "c_ops/audio_spectrogram.h" -#include -#include -#include -#include -#include -#include "c_ops/op_utils.h" -#include "utils/check_convert_utils.h" -#include "abstract/primitive_infer_map.h" - -namespace mindspore { - -void AudioSpectrogram::set_window_size(const int64_t &window_size) { - this->AddAttr(kWindowSize, MakeValue(window_size)); -} -int64_t AudioSpectrogram::get_window_size() const { - auto value_ptr = GetAttr(kWindowSize); - return GetValue(value_ptr); -} - -void AudioSpectrogram::set_stride(const int64_t &stride) { this->AddAttr(kStride, MakeValue(stride)); } -int64_t AudioSpectrogram::get_stride() const { - auto value_ptr = GetAttr(kStride); - return GetValue(value_ptr); -} - -void AudioSpectrogram::set_mag_square(const bool &mag_square) { this->AddAttr(kMagSquare, MakeValue(mag_square)); } -bool AudioSpectrogram::get_mag_square() const { - auto value_ptr = GetAttr(kMagSquare); - return GetValue(value_ptr); -} -void AudioSpectrogram::Init(const int64_t &window_size, const int64_t &stride, const bool &mag_square) { - this->set_window_size(window_size); - this->set_stride(stride); - this->set_mag_square(mag_square); -} -REGISTER_PRIMITIVE_C(kNameAudioSpectrogram, AudioSpectrogram); -} // namespace mindspore diff --git a/mindspore/core/c_ops/batch_norm.cc b/mindspore/core/c_ops/batch_norm.cc deleted file mode 100644 index de912feb421..00000000000 --- a/mindspore/core/c_ops/batch_norm.cc +++ /dev/null @@ -1,59 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#include -#include -#include -#include -#include -#include "c_ops/batch_norm.h" -#include "abstract/primitive_infer_map.h" -#include "utils/check_convert_utils.h" - -namespace mindspore { -void BatchNorm::Init(bool is_training, float epsilon, const Format &format) { - set_is_training(is_training); - set_epsilon(epsilon); - set_format(format); -} - -void BatchNorm::set_is_training(bool is_training) { this->AddAttr(kIsTraining, MakeValue(is_training)); } - -void BatchNorm::set_epsilon(float epsilon) { - CheckAndConvertUtils::CheckInRange(kEpsilon, epsilon, kIncludeBoth, {0.0, 1.0}, this->name()); - this->AddAttr(kEpsilon, MakeValue(epsilon)); -} - -void BatchNorm::set_format(const Format &format) { - int64_t f = format; - this->AddAttr(kFormat, MakeValue(f)); -} - -bool BatchNorm::get_is_trainging() { - auto value_ptr = GetAttr(kIsTraining); - return GetValue(value_ptr); -} - -float BatchNorm::get_epsilon() { - auto value_ptr = GetAttr(kEpsilon); - return GetValue(value_ptr); -} - -Format BatchNorm::get_format() const { - auto value_ptr = GetAttr(kFormat); - return Format(GetValue(value_ptr)); -} -REGISTER_PRIMITIVE_C(kNameBatchNorm, BatchNorm); -} // namespace mindspore diff --git a/mindspore/core/c_ops/batch_norm_fold.cc b/mindspore/core/c_ops/batch_norm_fold.cc deleted file mode 100644 index df6483f50c5..00000000000 --- a/mindspore/core/c_ops/batch_norm_fold.cc +++ /dev/null @@ -1,21 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "c_ops/batch_norm_fold.h" - -namespace mindspore { -REGISTER_PRIMITIVE_C(kNameBatchNormFold, BatchNormFold); -} // namespace mindspore diff --git a/mindspore/core/c_ops/binary_cross_entropy_grad.cc b/mindspore/core/c_ops/binary_cross_entropy_grad.cc deleted file mode 100644 index 54a641a9e93..00000000000 --- a/mindspore/core/c_ops/binary_cross_entropy_grad.cc +++ /dev/null @@ -1,31 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "c_ops/binary_cross_entropy_grad.h" - -namespace mindspore { -void BinaryCrossEntropyGrad::Init(const std::string &reduction) { set_reduction(reduction); } - -void BinaryCrossEntropyGrad::set_reduction(const std::string &reduction) { - CheckAndConvertUtils::CheckString(kReduction, reduction, {"none", "mean", "sum"}, name()); - this->AddAttr(kReduction, MakeValue(reduction)); -} -std::string BinaryCrossEntropyGrad::get_reduction() const { - auto value_ptr = GetAttr(kReduction); - return GetValue(value_ptr); -} -REGISTER_PRIMITIVE_C(kNameBinaryCrossEntropyGrad, BinaryCrossEntropyGrad); -} // namespace mindspore diff --git a/mindspore/core/c_ops/broadcast.cc b/mindspore/core/c_ops/broadcast.cc deleted file mode 100644 index b113c9ea2f2..00000000000 --- a/mindspore/core/c_ops/broadcast.cc +++ /dev/null @@ -1,42 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "c_ops/broadcast.h" -#include "c_ops/op_utils.h" -#include "utils/check_convert_utils.h" - -namespace mindspore { -void Broadcast::Init(int64_t root_rank, const std::string &group) { - this->set_root_rank(root_rank); - this->set_group(group); -} -void Broadcast::set_root_rank(int64_t root_rank) { this->AddAttr(kKeepProb, MakeValue(root_rank)); } - -void Broadcast::set_group(const std::string &group) { - CheckAndConvertUtils::CheckString(kGroup, group, {"hccl_world_group", "hccl_world_group"}, this->name()); - this->AddAttr(kGroup, MakeValue(group)); -} -int64_t Broadcast::get_root_rank() { - auto value_ptr = this->GetAttr(kRootRank); - return GetValue(value_ptr); -} - -std::string Broadcast::get_group() const { - auto value_ptr = this->GetAttr(kGroup); - return GetValue(value_ptr); -} -REGISTER_PRIMITIVE_C(kNameBroadcast, Broadcast); -} // namespace mindspore diff --git a/mindspore/core/c_ops/ceil.cc b/mindspore/core/c_ops/ceil.cc deleted file mode 100644 index 3932883f829..00000000000 --- a/mindspore/core/c_ops/ceil.cc +++ /dev/null @@ -1,21 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "c_ops/ceil.h" - -namespace mindspore { -REGISTER_PRIMITIVE_C(kNameCeil, Ceil); -} diff --git a/mindspore/core/c_ops/cos.cc b/mindspore/core/c_ops/cos.cc deleted file mode 100644 index 8dd5db035ea..00000000000 --- a/mindspore/core/c_ops/cos.cc +++ /dev/null @@ -1,21 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "c_ops/cos.h" - -namespace mindspore { -REGISTER_PRIMITIVE_C(kNameCos, Cos); -} diff --git a/mindspore/core/c_ops/custom_predict.cc b/mindspore/core/c_ops/custom_predict.cc deleted file mode 100644 index 0873abf0299..00000000000 --- a/mindspore/core/c_ops/custom_predict.cc +++ /dev/null @@ -1,44 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "c_ops/custom_predict.h" -#include "c_ops/op_utils.h" -#include "utils/check_convert_utils.h" -#include "abstract/primitive_infer_map.h" - -namespace mindspore { -void CustomPredict::Init(int64_t outputNum, float weight_threshold) { - this->set_outputNum(outputNum); - this->set_weight_threshold(weight_threshold); -} - -void CustomPredict::set_outputNum(int64_t outputNum) { this->AddAttr(kOutputNum, MakeValue(outputNum)); } - -int64_t CustomPredict::get_outputNum() const { - auto value_ptr = this->GetAttr(kOutputNum); - return GetValue(value_ptr); -} - -void CustomPredict::set_weight_threshold(float weight_threshold) { - this->AddAttr(kWeightThreshold, MakeValue(weight_threshold)); -} - -float CustomPredict::get_weight_threshold() const { - auto value_ptr = this->GetAttr(kWeightThreshold); - return GetValue(value_ptr); -} -REGISTER_PRIMITIVE_C(kNameCustomPredict, CustomPredict); -} // namespace mindspore diff --git a/mindspore/core/c_ops/div.cc b/mindspore/core/c_ops/div.cc deleted file mode 100644 index 8e5356b6cb1..00000000000 --- a/mindspore/core/c_ops/div.cc +++ /dev/null @@ -1,21 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "c_ops/div.h" - -namespace mindspore { -REGISTER_PRIMITIVE_C(kNameDiv, Div); -} // namespace mindspore diff --git a/mindspore/core/c_ops/equal.cc b/mindspore/core/c_ops/equal.cc deleted file mode 100644 index c6cd04fb673..00000000000 --- a/mindspore/core/c_ops/equal.cc +++ /dev/null @@ -1,20 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#include "c_ops/equal.h" - -namespace mindspore { -REGISTER_PRIMITIVE_C(kNameEqual, Equal); -} // namespace mindspore diff --git a/mindspore/core/c_ops/exp.cc b/mindspore/core/c_ops/exp.cc deleted file mode 100644 index 15076dcf947..00000000000 --- a/mindspore/core/c_ops/exp.cc +++ /dev/null @@ -1,20 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#include "c_ops/exp.h" - -namespace mindspore { -REGISTER_PRIMITIVE_C(kNameExp, Exp); -} // namespace mindspore diff --git a/mindspore/core/c_ops/fake_quant_with_min_max_vars.cc b/mindspore/core/c_ops/fake_quant_with_min_max_vars.cc deleted file mode 100644 index 2e8b029b0a1..00000000000 --- a/mindspore/core/c_ops/fake_quant_with_min_max_vars.cc +++ /dev/null @@ -1,44 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "c_ops/fake_quant_with_min_max_vars.h" -#include "c_ops/op_utils.h" -#include "utils/check_convert_utils.h" -#include "abstract/primitive_infer_map.h" - -namespace mindspore { -void FakeQuantWithMinMaxVars::Init(const bool &narrow_range, int64_t num_bits) { - this->set_narrow_range(narrow_range); - this->set_num_bits(num_bits); -} - -void FakeQuantWithMinMaxVars::set_narrow_range(const bool &narrow_range) { - this->AddAttr(kNarrowRange, MakeValue(narrow_range)); -} - -bool FakeQuantWithMinMaxVars::get_narrow_range() const { - auto value_ptr = this->GetAttr(kNarrowRange); - return GetValue(value_ptr); -} - -void FakeQuantWithMinMaxVars::set_num_bits(int64_t num_bits) { this->AddAttr(kNumBits, MakeValue(num_bits)); } - -int64_t FakeQuantWithMinMaxVars::get_num_bits() const { - auto value_ptr = this->GetAttr(kNumBits); - return GetValue(value_ptr); -} -REGISTER_PRIMITIVE_C(kNameFakeQuantWithMinMaxVars, FakeQuantWithMinMaxVars); -} // namespace mindspore diff --git a/mindspore/core/c_ops/fft_imag.cc b/mindspore/core/c_ops/fft_imag.cc deleted file mode 100644 index ef63040e83b..00000000000 --- a/mindspore/core/c_ops/fft_imag.cc +++ /dev/null @@ -1,22 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "c_ops/fft_imag.h" -#include - -namespace mindspore { -REGISTER_PRIMITIVE_C(kNameFftImag, FftImag); -} diff --git a/mindspore/core/c_ops/flatten_grad.cc b/mindspore/core/c_ops/flatten_grad.cc deleted file mode 100644 index 24c7c932829..00000000000 --- a/mindspore/core/c_ops/flatten_grad.cc +++ /dev/null @@ -1,21 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "c_ops/flatten_grad.h" - -namespace mindspore { -REGISTER_PRIMITIVE_C(kNameFlattenGrad, FlattenGrad); -} diff --git a/mindspore/core/c_ops/hashtable_lookup.cc b/mindspore/core/c_ops/hashtable_lookup.cc deleted file mode 100644 index 145b4564199..00000000000 --- a/mindspore/core/c_ops/hashtable_lookup.cc +++ /dev/null @@ -1,22 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "c_ops/hashtable_lookup.h" -#include "utils/check_convert_utils.h" - -namespace mindspore { -REGISTER_PRIMITIVE_C(kNameHashtableLookup, HashtableLookup); -} // namespace mindspore diff --git a/mindspore/core/c_ops/less.cc b/mindspore/core/c_ops/less.cc deleted file mode 100644 index f7e89ee28f6..00000000000 --- a/mindspore/core/c_ops/less.cc +++ /dev/null @@ -1,21 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "c_ops/less.h" - -namespace mindspore { -REGISTER_PRIMITIVE_C(kNameLess, Less); -} diff --git a/mindspore/core/c_ops/less_equal.cc b/mindspore/core/c_ops/less_equal.cc deleted file mode 100644 index 6efc85d9bd3..00000000000 --- a/mindspore/core/c_ops/less_equal.cc +++ /dev/null @@ -1,20 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#include "c_ops/less_equal.h" - -namespace mindspore { -REGISTER_PRIMITIVE_C(kNameLessEqual, LessEqual); -} // namespace mindspore diff --git a/mindspore/core/c_ops/local_response_normalization.cc b/mindspore/core/c_ops/local_response_normalization.cc deleted file mode 100644 index 2ade03e0832..00000000000 --- a/mindspore/core/c_ops/local_response_normalization.cc +++ /dev/null @@ -1,65 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "c_ops/local_response_normalization.h" -#include -#include -#include -#include -#include -#include "c_ops/op_utils.h" -#include "utils/check_convert_utils.h" -#include "abstract/primitive_infer_map.h" - -namespace mindspore { -void LocalResponseNormalization::set_depth_radius(const int64_t &depth_radius) { - this->AddAttr(kDepthRadius, MakeValue(depth_radius)); -} - -int64_t LocalResponseNormalization::get_depth_radius() const { - auto value_ptr = GetAttr(kDepthRadius); - return GetValue(value_ptr); -} - -void LocalResponseNormalization::set_bias(const float &bias) { this->AddAttr(kBias, MakeValue(bias)); } - -float LocalResponseNormalization::get_bias() const { - auto value_ptr = GetAttr(kBias); - return GetValue(value_ptr); -} - -void LocalResponseNormalization::set_alpha(const float &alpha) { this->AddAttr(kAlpha, MakeValue(alpha)); } - -float LocalResponseNormalization::get_alpha() const { - auto value_ptr = GetAttr(kAlpha); - return GetValue(value_ptr); -} - -void LocalResponseNormalization::set_beta(const float &beta) { this->AddAttr(kBeta, MakeValue(beta)); } - -float LocalResponseNormalization::get_beta() const { - auto value_ptr = GetAttr(kBeta); - return GetValue(value_ptr); -} -void LocalResponseNormalization::Init(const int64_t &depth_radius, const float &bias, const float &alpha, - const float &beta) { - this->set_depth_radius(depth_radius); - this->set_bias(bias); - this->set_alpha(alpha); - this->set_beta(beta); -} -REGISTER_PRIMITIVE_C(kNameLocalResponseNormalization, LocalResponseNormalization); -} // namespace mindspore diff --git a/mindspore/core/c_ops/log.cc b/mindspore/core/c_ops/log.cc deleted file mode 100644 index f3703c5cf8d..00000000000 --- a/mindspore/core/c_ops/log.cc +++ /dev/null @@ -1,21 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "c_ops/log.h" - -namespace mindspore { -REGISTER_PRIMITIVE_C(kNameLog, Log); -} diff --git a/mindspore/core/c_ops/logical_not.cc b/mindspore/core/c_ops/logical_not.cc deleted file mode 100644 index f701cc1aaec..00000000000 --- a/mindspore/core/c_ops/logical_not.cc +++ /dev/null @@ -1,20 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#include "c_ops/logical_not.h" - -namespace mindspore { -REGISTER_PRIMITIVE_C(kNameLogicalNot, LogicalNot); -} // namespace mindspore diff --git a/mindspore/core/c_ops/logical_or.cc b/mindspore/core/c_ops/logical_or.cc deleted file mode 100644 index 1b9131ff082..00000000000 --- a/mindspore/core/c_ops/logical_or.cc +++ /dev/null @@ -1,20 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#include "c_ops/logical_or.h" - -namespace mindspore { -REGISTER_PRIMITIVE_C(kNameLogicalOr, LogicalOr); -} // namespace mindspore diff --git a/mindspore/core/c_ops/lstm.cc b/mindspore/core/c_ops/lstm.cc deleted file mode 100644 index 077470cf4bb..00000000000 --- a/mindspore/core/c_ops/lstm.cc +++ /dev/null @@ -1,82 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "c_ops/lstm.h" - -namespace mindspore { -void LSTM::set_input_size(const int64_t &input_size) { - CheckAndConvertUtils::CheckInteger(kInput_size, input_size, kGreaterThan, 0, this->name()); - AddAttr(kInput_size, MakeValue(input_size)); -} -int64_t LSTM::get_input_size() const { - auto value_ptr = this->GetAttr(kInput_size); - return GetValue(value_ptr); -} -void LSTM::set_hidden_size(const int64_t &hidden_size) { - CheckAndConvertUtils::CheckInteger(kHidden_size, hidden_size, kGreaterThan, 0, this->name()); - AddAttr(kHidden_size, MakeValue(hidden_size)); -} -int64_t LSTM::get_hidden_size() const { - auto value_ptr = this->GetAttr(kHidden_size); - return GetValue(value_ptr); -} -void LSTM::set_num_layers(const int64_t &num_layers) { - CheckAndConvertUtils::CheckInteger(kNum_layers, num_layers, kGreaterThan, 0, this->name()); - AddAttr(kNum_layers, MakeValue(kNum_layers)); -} -int64_t LSTM::get_num_layers() const { - auto value_ptr = this->GetAttr(kNum_layers); - return GetValue(value_ptr); -} -void LSTM::set_has_bias(const bool &has_bias) { AddAttr(kHasBias, MakeValue(has_bias)); } -bool LSTM::get_has_bias() const { - auto value_ptr = this->GetAttr(kHasBias); - return GetValue(value_ptr); -} -void LSTM::set_dropout(const float &dropout) { - CheckAndConvertUtils::CheckInRange(kDropout, dropout, kIncludeBoth, {0, 1}, this->name()); - AddAttr(kDropout, MakeValue(dropout)); -} -float LSTM::get_dropout() const { - auto value_ptr = this->GetAttr(kDropout); - return GetValue(value_ptr); -} -void LSTM::set_bidirectional(const bool &bidirectional) { AddAttr(kBidirectional, MakeValue(bidirectional)); } -bool LSTM::get_bidirectional() const { - auto value_ptr = this->GetAttr(kBidirectional); - return GetValue(value_ptr); -} -void LSTM::set_num_directions(const int64_t &num_directions) { AddAttr(kNumDirections, MakeValue(num_directions)); } -int64_t LSTM::get_num_directions() const { - auto value_ptr = this->GetAttr(kNumDirections); - return GetValue(value_ptr); -} -void LSTM::Init(const int64_t &input_size, const int64_t &hidden_size, const int64_t &num_layers, const bool &has_bias, - const float &dropout, const bool &bidirectional) { - this->set_input_size(input_size); - this->set_hidden_size(hidden_size); - this->set_num_layers(num_layers); - this->set_has_bias(has_bias); - this->set_dropout(dropout); - this->set_bidirectional(bidirectional); - if (bidirectional) { - this->set_num_directions(2); - } else { - this->set_num_directions(1); - } -} -REGISTER_PRIMITIVE_C(kNameLSTM, LSTM); -} // namespace mindspore diff --git a/mindspore/core/load_mindir/anf_model_parser.cc b/mindspore/core/load_mindir/anf_model_parser.cc index 2210881494b..8df89433817 100644 --- a/mindspore/core/load_mindir/anf_model_parser.cc +++ b/mindspore/core/load_mindir/anf_model_parser.cc @@ -25,7 +25,7 @@ #include #include "ir/tensor.h" #include "ir/param_info.h" -#include "c_ops/primitive_c.h" +#include "ops/primitive_c.h" #include "abstract/abstract_value.h" #include "utils/log_adapter.h" #include "utils/shape_utils.h" @@ -676,7 +676,7 @@ CNodePtr MSANFModelParser::BuildCNodeForFuncGraph(const FuncGraphPtr &outputFunc const std::string &node_type = node_proto.op_type(); std::shared_ptr prim; - auto op_primc_fns = OpPrimCRegister::GetInstance().GetPrimCMap(); + auto op_primc_fns = ops::OpPrimCRegister::GetInstance().GetPrimCMap(); if (op_primc_fns.find(node_type) != op_primc_fns.end()) { prim = op_primc_fns[node_type](); } else { diff --git a/mindspore/core/ops/abs.cc b/mindspore/core/ops/abs.cc new file mode 100644 index 00000000000..1e5b04a242e --- /dev/null +++ b/mindspore/core/ops/abs.cc @@ -0,0 +1,62 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops/abs.h" +#include +#include +#include +#include +#include +#include +#include "ops/op_utils.h" +#include "utils/check_convert_utils.h" +#include "abstract/primitive_infer_map.h" + +namespace mindspore { +namespace ops { +namespace { +abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(primitive); + auto abs_prim = primitive->cast(); + MS_EXCEPTION_IF_NULL(abs_prim); + auto prim_name = abs_prim->name(); + for (const auto &item : input_args) { + MS_EXCEPTION_IF_NULL(item); + } + auto in_shape = CheckAndConvertUtils::ConvertShapePtrToShape("x_shape", input_args[0]->GetShapeTrack(), prim_name); + return std::make_shared(in_shape); +} + +TypePtr InferType(const PrimitivePtr &prim, const std::vector &input_args) { + if (std::any_of(input_args.begin(), input_args.end(), [](AbstractBasePtr a) { return a == nullptr; })) { + MS_LOG(EXCEPTION) << "nullptr"; + } + std::map types; + types.emplace("input_x", input_args[0]->BuildType()); + auto infer_type = CheckAndConvertUtils::CheckTensorTypeSame(types, common_valid_types, prim->name()); + return TypeIdToType(infer_type); +} +} // namespace + +AbstractBasePtr AbsInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + return std::make_shared(InferType(primitive, input_args), + InferShape(primitive, input_args)->shape()); +} +REGISTER_PRIMITIVE_EVAL_IMPL(Abs, prim::kPrimAbs, AbsInfer); +REGISTER_PRIMITIVE_C(kNameAbs, Abs); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/c_ops/abs.h b/mindspore/core/ops/abs.h similarity index 69% rename from mindspore/core/c_ops/abs.h rename to mindspore/core/ops/abs.h index 1c9e72e41b4..1dae00eb525 100644 --- a/mindspore/core/c_ops/abs.h +++ b/mindspore/core/ops/abs.h @@ -14,13 +14,17 @@ * limitations under the License. */ -#ifndef MINDSPORE_CORE_C_OPS_ABS_H_ -#define MINDSPORE_CORE_C_OPS_ABS_H_ -#include "c_ops/primitive_c.h" +#ifndef MINDSPORE_CORE_OPS_ABS_H_ +#define MINDSPORE_CORE_OPS_ABS_H_ +#include +#include + +#include "ops/primitive_c.h" #include "abstract/abstract_value.h" #include "utils/check_convert_utils.h" namespace mindspore { +namespace ops { constexpr auto kNameAbs = "Abs"; class Abs : public PrimitiveC { public: @@ -29,6 +33,10 @@ class Abs : public PrimitiveC { MS_DECLARE_PARENT(Abs, PrimitiveC); void Init() {} }; +AbstractBasePtr AbsInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); +using PrimAbsPtr = std::shared_ptr; +} // namespace ops } // namespace mindspore -#endif // MINDSPORE_CORE_C_OPS_ABS_H_ +#endif // MINDSPORE_CORE_OPS_ABS_H_ diff --git a/mindspore/core/ops/adam.cc b/mindspore/core/ops/adam.cc new file mode 100644 index 00000000000..909c10a75b1 --- /dev/null +++ b/mindspore/core/ops/adam.cc @@ -0,0 +1,88 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops/adam.h" +#include "ops/op_utils.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +namespace { +abstract::AbstractBasePtr AdamInfer(const PrimitivePtr &primitive, const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(primitive); + auto Adam_prim = primitive->cast(); + MS_EXCEPTION_IF_NULL(Adam_prim); + auto prim_name = Adam_prim->name(); + + // infer shape + auto var_shape = CheckAndConvertUtils::ConvertShapePtrToShape("var_shape", input_args[0]->GetShapeTrack(), prim_name); + auto m_shape = CheckAndConvertUtils::ConvertShapePtrToShape("m_shape", input_args[1]->GetShapeTrack(), prim_name); + auto v_shape = CheckAndConvertUtils::ConvertShapePtrToShape("v_shape", input_args[2]->GetShapeTrack(), prim_name); + auto grad_shape = + CheckAndConvertUtils::ConvertShapePtrToShape("grad_shape", input_args[9]->GetShapeTrack(), prim_name); + CheckAndConvertUtils::Check("var_shape", var_shape, kEqual, "m_shape", m_shape, prim_name); + CheckAndConvertUtils::Check("var_shape", var_shape, kEqual, "v_shape", v_shape, prim_name); + CheckAndConvertUtils::Check("var_shape", var_shape, kEqual, "grad_shape", grad_shape, prim_name); + + // infer type + auto var_type = input_args[0]->BuildType(); + auto m_type = input_args[1]->BuildType(); + auto v_type = input_args[2]->BuildType(); + auto grad_type = input_args[9]->BuildType(); + CheckAndConvertUtils::CheckTensorTypeValid("var_type", var_type, common_valid_types, prim_name); + CheckAndConvertUtils::CheckTensorTypeValid("m_type", m_type, common_valid_types, prim_name); + CheckAndConvertUtils::CheckTensorTypeValid("v_type", v_type, common_valid_types, prim_name); + CheckAndConvertUtils::CheckTensorTypeValid("grad_type", grad_type, common_valid_types, prim_name); + + auto infer_var_type = var_type->cast()->element(); + auto infer_m_type = m_type->cast()->element(); + auto infer_v_type = v_type->cast()->element(); + // auto infer_grad_type = grad_type->cast()->element(); + auto output0 = std::make_shared(infer_var_type, var_shape); + auto output1 = std::make_shared(infer_m_type, m_shape); + auto output2 = std::make_shared(infer_v_type, v_shape); + AbstractBasePtrList output = {output0, output1, output2}; + return std::make_shared(output); +} +} // namespace +void Adam::Init(const bool use_locking, const bool use_nesterov) { + this->set_use_locking(use_locking); + this->set_use_nesterov(use_nesterov); +} + +void Adam::set_use_locking(const bool use_locking) { this->AddAttr(kUseLocking, MakeValue(use_locking)); } + +void Adam::set_use_nesterov(const bool use_nesterov) { this->AddAttr(kUseNesterov, MakeValue(use_nesterov)); } + +bool Adam::get_use_locking() const { + auto value_ptr = GetAttr(kUseLocking); + return GetValue(value_ptr); +} + +bool Adam::get_use_nesterov() const { + auto value_ptr = GetAttr(kUseNesterov); + return GetValue(value_ptr); +} + +AbstractBasePtr AdamInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + return std::make_shared(AdamInfer(primitive, input_args)); +} + +REGISTER_PRIMITIVE_EVAL_IMPL(Adam, prim::kPrimAdam, AdamInfer); +REGISTER_PRIMITIVE_C(kNameAdam, Adam); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/c_ops/adam.h b/mindspore/core/ops/adam.h similarity index 63% rename from mindspore/core/c_ops/adam.h rename to mindspore/core/ops/adam.h index 89d25d167cc..1767b7e342c 100644 --- a/mindspore/core/c_ops/adam.h +++ b/mindspore/core/ops/adam.h @@ -14,29 +14,34 @@ * limitations under the License. */ -#ifndef MINDSPORE_CORE_C_OPS_ADAM_H_ -#define MINDSPORE_CORE_C_OPS_ADAM_H_ +#ifndef MINDSPORE_CORE_OPS_ADAM_H_ +#define MINDSPORE_CORE_OPS_ADAM_H_ #include #include #include #include -#include "c_ops/primitive_c.h" +#include "ops/primitive_c.h" #include "abstract/abstract_value.h" #include "utils/check_convert_utils.h" namespace mindspore { +namespace ops { constexpr auto kNameAdam = "Adam"; class Adam : public PrimitiveC { public: Adam() : PrimitiveC(kNameAdam) {} ~Adam() = default; MS_DECLARE_PARENT(Adam, PrimitiveC); - void Init(const bool &use_locking = false, const bool &use_nesteroy = false); - void set_use_locking(const bool &use_locking); - void set_use_nesteroy(const bool &use_nesteroy); + void Init(const bool use_locking = false, const bool use_nesterov = false); + void set_use_locking(const bool use_locking); + void set_use_nesterov(const bool use_nesterov); bool get_use_locking() const; - bool get_use_nesteroy() const; + bool get_use_nesterov() const; }; +AbstractBasePtr AdamInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); +using PrimAdamPtr = std::shared_ptr; +} // namespace ops } // namespace mindspore -#endif // MINDSPORE_CORE_C_OPS_ADAM_H_ +#endif // MINDSPORE_CORE_OPS_ADAM_H_ diff --git a/mindspore/core/ops/add.cc b/mindspore/core/ops/add.cc new file mode 100644 index 00000000000..e091d90d231 --- /dev/null +++ b/mindspore/core/ops/add.cc @@ -0,0 +1,56 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops/add.h" +#include +#include +#include +#include "ops/op_utils.h" +#include "utils/check_convert_utils.h" +#include "abstract/primitive_infer_map.h" + +namespace mindspore { +namespace ops { +namespace { +abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(primitive); + auto add_prim = primitive->cast(); + MS_EXCEPTION_IF_NULL(add_prim); + auto prim_name = add_prim->name(); + return BroadCastInferShape(prim_name, input_args); +} + +TypePtr InferType(const PrimitivePtr &prim, const std::vector &input_args) { + if (std::any_of(input_args.begin(), input_args.end(), [](const AbstractBasePtr &a) { return a == nullptr; })) { + MS_LOG(EXCEPTION) << "nullptr"; + } + std::map types; + types.emplace("x", input_args[0]->BuildType()); + types.emplace("y", input_args[1]->BuildType()); + auto infer_type = CheckAndConvertUtils::CheckTensorTypeSame(types, common_valid_types, prim->name()); + return TypeIdToType(infer_type); +} +} // namespace + +AbstractBasePtr AddInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + return std::make_shared(InferType(primitive, input_args), + InferShape(primitive, input_args)->shape()); +} +REGISTER_PRIMITIVE_EVAL_IMPL(Add, prim::kPrimAdd, AddInfer); +REGISTER_PRIMITIVE_C(kNameAdd, Add); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/c_ops/add.h b/mindspore/core/ops/add.h similarity index 81% rename from mindspore/core/c_ops/add.h rename to mindspore/core/ops/add.h index be4528a5f98..ac622a20e7a 100644 --- a/mindspore/core/c_ops/add.h +++ b/mindspore/core/ops/add.h @@ -14,21 +14,23 @@ * limitations under the License. */ -#ifndef MINDSPORE_CORE_C_OPS_ADD_H_ -#define MINDSPORE_CORE_C_OPS_ADD_H_ +#ifndef MINDSPORE_CORE_OPS_ADD_H_ +#define MINDSPORE_CORE_OPS_ADD_H_ #include #include #include #include -#include "c_ops/primitive_c.h" +#include "ops/primitive_c.h" #include "abstract/abstract_value.h" #include "utils/check_convert_utils.h" namespace mindspore { +namespace ops { constexpr auto kNameAdd = "Add"; class Add : public PrimitiveC { public: Add() : PrimitiveC(kNameAdd) { InitIOName({"x", "y"}, {"output"}); } + explicit Add(const std::string k_name) : PrimitiveC(k_name) { InitIOName({"x", "y"}, {"output"}); } ~Add() = default; MS_DECLARE_PARENT(Add, PrimitiveC); void Init() {} @@ -37,6 +39,7 @@ class Add : public PrimitiveC { AbstractBasePtr AddInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, const std::vector &input_args); using PrimAddPtr = std::shared_ptr; +} // namespace ops } // namespace mindspore -#endif // MINDSPORE_CORE_C_OPS_ADD_H_ +#endif // MINDSPORE_CORE_OPS_ADD_H_ diff --git a/mindspore/core/c_ops/add_fold.cc b/mindspore/core/ops/add_fold.cc similarity index 91% rename from mindspore/core/c_ops/add_fold.cc rename to mindspore/core/ops/add_fold.cc index e24fff7d036..a6759aca962 100644 --- a/mindspore/core/c_ops/add_fold.cc +++ b/mindspore/core/ops/add_fold.cc @@ -14,8 +14,10 @@ * limitations under the License. */ -#include "c_ops/add_fold.h" +#include "ops/add_fold.h" namespace mindspore { +namespace ops { REGISTER_PRIMITIVE_C(kNameAddFold, AddFold); +} // namespace ops } // namespace mindspore diff --git a/mindspore/core/c_ops/add_fold.h b/mindspore/core/ops/add_fold.h similarity index 84% rename from mindspore/core/c_ops/add_fold.h rename to mindspore/core/ops/add_fold.h index 8f0f5c9397b..fc61acb6aaf 100644 --- a/mindspore/core/c_ops/add_fold.h +++ b/mindspore/core/ops/add_fold.h @@ -14,17 +14,18 @@ * limitations under the License. */ -#ifndef MINDSPORE_CORE_C_OPS_ADDFOLD_H_ -#define MINDSPORE_CORE_C_OPS_ADDFOLD_H_ +#ifndef MINDSPORE_CORE_OPS_ADD_FOLD_H_ +#define MINDSPORE_CORE_OPS_ADD_FOLD_H_ #include #include #include #include -#include "c_ops/primitive_c.h" +#include "ops/primitive_c.h" #include "abstract/abstract_value.h" #include "utils/check_convert_utils.h" namespace mindspore { +namespace ops { constexpr auto kNameAddFold = "AddFold"; class AddFold : public PrimitiveC { public: @@ -33,6 +34,7 @@ class AddFold : public PrimitiveC { MS_DECLARE_PARENT(AddFold, PrimitiveC); void Init() {} }; +} // namespace ops } // namespace mindspore -#endif // MINDSPORE_CORE_C_OPS_ADDFOLD_H_ +#endif // MINDSPORE_CORE_OPS_ADD_FOLD_H_ diff --git a/mindspore/core/ops/adder.cc b/mindspore/core/ops/adder.cc new file mode 100644 index 00000000000..9e14aa3aeb7 --- /dev/null +++ b/mindspore/core/ops/adder.cc @@ -0,0 +1,108 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops/adder.h" +#include "ops/op_utils.h" + +namespace mindspore { +namespace ops { +void Adder::Init(const int64_t in_channel, const int64_t out_channel, const std::vector &kernel_size, + const PadMode &pad_mode, const std::vector &stride, const std::vector &pad_list, + const std::vector &dilation, const int64_t group, const Format &format) { + set_in_channel(in_channel); + set_out_channel(out_channel); + set_kernel_size(kernel_size); + set_pad_mode(pad_mode); + set_stride(stride); + set_pad_list(pad_list); + set_dilation(dilation); + set_group(group); + set_format(format); +} + +void Adder::set_in_channel(const int64_t in_channel) { this->AddAttr(kInChannel, MakeValue(in_channel)); } + +int64_t Adder::get_in_channel() const { + auto value_ptr = GetAttr(kInChannel); + return GetValue(value_ptr); +} + +void Adder::set_out_channel(const int64_t out_channel) { this->AddAttr(kOutChannel, MakeValue(out_channel)); } + +int64_t Adder::get_out_channel() const { + auto value_ptr = GetAttr(kOutChannel); + return GetValue(value_ptr); +} + +void Adder::set_kernel_size(const std::vector &kernel_size) { + this->AddAttr(kKernelSize, MakeValue(kernel_size)); +} + +std::vector Adder::get_kernel_size() const { + auto value_ptr = GetAttr(kKernelSize); + return GetValue>(value_ptr); +} + +void Adder::set_pad_mode(const PadMode &pad_mode) { + int64_t swi = pad_mode; + this->AddAttr(kPadMode, MakeValue(swi)); +} + +PadMode Adder::get_pad_mode() const { + auto value_ptr = GetAttr(kPadMode); + return PadMode(GetValue(value_ptr)); +} + +void Adder::set_stride(const std::vector &stride) { this->AddAttr(kStride, MakeValue(stride)); } + +std::vector Adder::get_stride() const { + auto value_ptr = GetAttr(kStride); + return GetValue>(value_ptr); +} + +void Adder::set_pad_list(const std::vector &pad_list) { this->AddAttr(kPadList, MakeValue(pad_list)); } + +std::vector Adder::get_pad_list() const { + auto value_ptr = GetAttr(kPadList); + return GetValue>(value_ptr); +} + +void Adder::set_dilation(const std::vector &dilation) { this->AddAttr(kDilation, MakeValue(dilation)); } + +std::vector Adder::get_dilation() const { + auto value_ptr = GetAttr(kDilation); + return GetValue>(value_ptr); +} + +void Adder::set_group(const int64_t group) { this->AddAttr(kGroup, MakeValue(group)); } + +int64_t Adder::get_group() const { + auto value_ptr = GetAttr(kGroup); + return GetValue(value_ptr); +} + +void Adder::set_format(const Format &format) { + int64_t swi = format; + this->AddAttr(kFormat, MakeValue(swi)); +} + +Format Adder::get_format() const { + auto value_ptr = GetAttr(kFormat); + return Format(GetValue(value_ptr)); +} +REGISTER_PRIMITIVE_C(kNameAdder, Adder); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/adder.h b/mindspore/core/ops/adder.h new file mode 100644 index 00000000000..4fe36cb5967 --- /dev/null +++ b/mindspore/core/ops/adder.h @@ -0,0 +1,62 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_ADDER_H_ +#define MINDSPORE_CORE_OPS_ADDER_H_ + +#include +#include +#include +#include +#include "ops/primitive_c.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameAdder = "Adder"; +class Adder : public PrimitiveC { + public: + explicit Adder(const std::string &k_name = kNameAdder) : PrimitiveC(k_name) {} + ~Adder() = default; + MS_DECLARE_PARENT(Adder, PrimitiveC); + void Init(const int64_t in_channel, const int64_t out_channel, const std::vector &kernel_size, + const PadMode &pad_mode, const std::vector &stride, const std::vector &pad_list, + const std::vector &dilation, const int64_t group, const Format &format); + void set_in_channel(const int64_t in_channel); + void set_out_channel(const int64_t out_channel); + void set_kernel_size(const std::vector &kernel_size); + void set_pad_mode(const PadMode &pad_mode); + void set_stride(const std::vector &stride); + void set_pad_list(const std::vector &pad_list); + void set_dilation(const std::vector &dilation); + void set_group(const int64_t group); + void set_format(const Format &format); + + int64_t get_in_channel() const; + int64_t get_out_channel() const; + std::vector get_kernel_size() const; + PadMode get_pad_mode() const; + std::vector get_stride() const; + std::vector get_pad_list() const; + std::vector get_dilation() const; + int64_t get_group() const; + Format get_format() const; +}; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_ADDER_H_ diff --git a/mindspore/core/ops/addn.cc b/mindspore/core/ops/addn.cc new file mode 100644 index 00000000000..1933e3b0fed --- /dev/null +++ b/mindspore/core/ops/addn.cc @@ -0,0 +1,70 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include +#include +#include "ops/addn.h" +#include "ops/op_utils.h" + +namespace mindspore { +namespace ops { +AbstractBasePtr AddNInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(primitive); + auto prim_name = primitive->name(); + CheckAndConvertUtils::CheckInteger("input number", input_args.size(), kEqual, 1, prim_name); + for (const auto &item : input_args) { + MS_EXCEPTION_IF_NULL(item); + } + auto input_tuple = input_args[0]->cast(); + MS_EXCEPTION_IF_NULL(input_tuple); + auto elements = input_tuple->elements(); + CheckAndConvertUtils::CheckInteger("concat element num", elements.size(), kGreaterEqual, 1, prim_name); + auto element0 = elements[0]->cast(); + MS_EXCEPTION_IF_NULL(element0); + auto element0_shape = + CheckAndConvertUtils::ConvertShapePtrToShape("element0 shape", element0->BuildShape(), prim_name); + + std::map types; + types.emplace("element0", element0->BuildType()); + for (size_t i = 1; i < elements.size(); ++i) { + std::string elementi = "element" + std::to_string(i); + auto elementi_shape = + CheckAndConvertUtils::ConvertShapePtrToShape(elementi + " shape", elements[i]->BuildShape(), prim_name); + CheckAndConvertUtils::CheckInteger(elementi + " shape rank", elementi_shape.size(), kEqual, element0_shape.size(), + prim_name); + for (size_t j = 0; j < element0_shape.size(); ++j) { + if (elementi_shape[j] != element0_shape[j]) { + MS_LOG(EXCEPTION) << "element " << i << " shape in input can not concat with first element."; + } + } + types.emplace(elementi, elements[i]->BuildType()); + } + std::set valid_types = common_valid_types; + valid_types.insert(kNumberTypeBool); + auto infer_type = CheckAndConvertUtils::CheckTensorTypeSame(types, valid_types, prim_name); + + return std::make_shared(TypeIdToType(infer_type), + std::make_shared(element0_shape)); +} + +REGISTER_PRIMITIVE_EVAL_IMPL(AddN, prim::kPrimAddN, AddNInfer); +REGISTER_PRIMITIVE_C(kNameAddN, AddN); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/c_ops/addn.h b/mindspore/core/ops/addn.h similarity index 69% rename from mindspore/core/c_ops/addn.h rename to mindspore/core/ops/addn.h index 2a171ae06b8..7459ac28066 100644 --- a/mindspore/core/c_ops/addn.h +++ b/mindspore/core/ops/addn.h @@ -14,13 +14,16 @@ * limitations under the License. */ -#ifndef MINDSPORE_CORE_C_OPS_ADDN_H_ -#define MINDSPORE_CORE_C_OPS_ADDN_H_ -#include "c_ops/primitive_c.h" +#ifndef MINDSPORE_CORE_OPS_ADDN_H_ +#define MINDSPORE_CORE_OPS_ADDN_H_ +#include +#include +#include "ops/primitive_c.h" #include "abstract/abstract_value.h" #include "utils/check_convert_utils.h" namespace mindspore { +namespace ops { constexpr auto kNameAddN = "AddN"; class AddN : public PrimitiveC { public: @@ -29,6 +32,10 @@ class AddN : public PrimitiveC { MS_DECLARE_PARENT(AddN, PrimitiveC); void Init() {} }; +AbstractBasePtr AddNInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); +using PrimAddNPtr = std::shared_ptr; +} // namespace ops } // namespace mindspore -#endif // MINDSPORE_CORE_C_OPS_ADDN_H_ +#endif // MINDSPORE_CORE_OPS_ADDN_H_ diff --git a/mindspore/core/c_ops/concat.cc b/mindspore/core/ops/all.cc similarity index 65% rename from mindspore/core/c_ops/concat.cc rename to mindspore/core/ops/all.cc index a64d117f827..14b1cd18980 100644 --- a/mindspore/core/c_ops/concat.cc +++ b/mindspore/core/ops/all.cc @@ -14,19 +14,20 @@ * limitations under the License. */ -#include "c_ops/concat.h" -#include "c_ops/op_utils.h" +#include "ops/all.h" +#include "ops/op_utils.h" #include "utils/check_convert_utils.h" -namespace mindspore { -void Concat::Init(int64_t axis) { this->set_axis(axis); } -int64_t Concat::get_axis() const { - auto value_ptr = this->GetAttr(kAxis); +namespace mindspore { +namespace ops { +void All::Init(const int64_t keep_dims) { this->set_keep_dims(keep_dims); } + +void All::set_keep_dims(const int64_t keep_dims) { this->AddAttr(kKeepDims, MakeValue(keep_dims)); } + +int64_t All::get_keep_dims() const { + auto value_ptr = GetAttr(kKeepDims); return GetValue(value_ptr); } - -void Concat::set_axis(int64_t axis) { - this->AddAttr(kAxis, MakeValue(CheckAndConvertUtils::CheckInteger(kAxis, axis, kGreaterEqual, 0, this->name()))); -} -REGISTER_PRIMITIVE_C(kNameConcat, Concat); +REGISTER_PRIMITIVE_C(kNameAll, All); +} // namespace ops } // namespace mindspore diff --git a/mindspore/core/ops/all.h b/mindspore/core/ops/all.h new file mode 100644 index 00000000000..c8035874c8a --- /dev/null +++ b/mindspore/core/ops/all.h @@ -0,0 +1,38 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_ALL_H_ +#define MINDSPORE_CORE_OPS_ALL_H_ +#include "ops/primitive_c.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameAll = "All"; +class All : public PrimitiveC { + public: + All() : PrimitiveC(kNameAll) {} + ~All() = default; + MS_DECLARE_PARENT(All, PrimitiveC); + void Init(const int64_t keep_dims); + void set_keep_dims(const int64_t keep_dims); + int64_t get_keep_dims() const; +}; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_ALL_H_ diff --git a/mindspore/core/ops/apply_momentum.cc b/mindspore/core/ops/apply_momentum.cc new file mode 100644 index 00000000000..5b03ef32303 --- /dev/null +++ b/mindspore/core/ops/apply_momentum.cc @@ -0,0 +1,89 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include +#include +#include "ops/apply_momentum.h" +#include "ops/op_utils.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +void ApplyMomentum::Init(const bool use_nesterov, const bool use_locking, const float gradient_scale) { + this->set_use_nesterov(use_nesterov); + this->set_use_locking(use_locking); + this->set_gradient_scale(gradient_scale); +} + +void ApplyMomentum::set_use_nesterov(const bool use_nesterov) { this->AddAttr(kUseNesterov, MakeValue(use_nesterov)); } + +void ApplyMomentum::set_use_locking(const bool use_locking) { this->AddAttr(kUseLocking, MakeValue(use_locking)); } + +void ApplyMomentum::set_gradient_scale(const float gradient_scale) { + this->AddAttr(kGradientScale, MakeValue(gradient_scale)); +} + +bool ApplyMomentum::get_use_nesterov() const { + auto value_ptr = GetAttr(kUseNesterov); + return GetValue(value_ptr); +} + +bool ApplyMomentum::get_use_locking() const { + auto value_ptr = GetAttr(kUseLocking); + return GetValue(value_ptr); +} + +float ApplyMomentum::get_gradient_scale() const { + auto value_ptr = GetAttr(kGradientScale); + return GetValue(value_ptr); +} +AbstractBasePtr ApplyMomentumInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(primitive); + auto momentum_prim = primitive->cast(); + MS_EXCEPTION_IF_NULL(momentum_prim); + auto prim_name = momentum_prim->name(); + CheckAndConvertUtils::CheckInteger("apply_momentum_infer", input_args.size(), kEqual, 5, prim_name); + + // Infer shape + auto v_shape = CheckAndConvertUtils::ConvertShapePtrToShape("v_shape", input_args[0]->BuildShape(), prim_name); + + // Infer type + auto v_type = input_args[0]->BuildType()->cast()->element(); + auto a_type = input_args[1]->BuildType()->cast()->element(); + auto l_type = input_args[2]->BuildType()->cast()->element(); + auto g_type = input_args[3]->BuildType()->cast()->element(); + auto m_type = input_args[4]->BuildType()->cast()->element(); + const std::set valid_types = {kNumberTypeFloat16, kNumberTypeFloat32, kNumberTypeFloat64}; + CheckAndConvertUtils::CheckTensorTypeValid("v_type", v_type, valid_types, prim_name); + CheckAndConvertUtils::CheckTensorTypeValid("a_type", a_type, valid_types, prim_name); + const std::set valid_types_ptr = {TypeIdToType(kNumberTypeFloat16), TypeIdToType(kNumberTypeFloat32), + TypeIdToType(kNumberTypeFloat64)}; + std::map args; + args.insert({"l_type", l_type}); + args.insert({"g_type", g_type}); + args.insert({"m_type", m_type}); + CheckAndConvertUtils::CheckScalarOrTensorTypesSame(args, valid_types_ptr, prim_name); + + return std::make_shared(g_type, v_shape); +} +REGISTER_PRIMITIVE_EVAL_IMPL(ApplyMomentum, prim::kPrimApplyMomentum, ApplyMomentumInfer); +REGISTER_PRIMITIVE_C(kNameApplyMomentum, ApplyMomentum); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/c_ops/apply_momentum.h b/mindspore/core/ops/apply_momentum.h similarity index 59% rename from mindspore/core/c_ops/apply_momentum.h rename to mindspore/core/ops/apply_momentum.h index c14cfc2479e..388bec9dd87 100644 --- a/mindspore/core/c_ops/apply_momentum.h +++ b/mindspore/core/ops/apply_momentum.h @@ -14,13 +14,17 @@ * limitations under the License. */ -#ifndef MINDSPORE_CORE_C_OPS_APPLYMOMENTUM_H_ -#define MINDSPORE_CORE_C_OPS_APPLYMOMENTUM_H_ -#include "c_ops/primitive_c.h" +#ifndef MINDSPORE_CORE_OPS_APPLY_MOMENTUM_H_ +#define MINDSPORE_CORE_OPS_APPLY_MOMENTUM_H_ +#include +#include + +#include "ops/primitive_c.h" #include "abstract/abstract_value.h" #include "utils/check_convert_utils.h" namespace mindspore { +namespace ops { constexpr auto kNameApplyMomentum = "ApplyMomentum"; class ApplyMomentum : public PrimitiveC { public: @@ -29,14 +33,18 @@ class ApplyMomentum : public PrimitiveC { } ~ApplyMomentum() = default; MS_DECLARE_PARENT(ApplyMomentum, PrimitiveC); - void Init(bool use_nesterov, bool use_locking, float gradient_scale); - void set_use_nesterov(bool use_nesterov); - void set_use_locking(bool use_locking); - void set_gradient_scale(float gradient_scale); + void Init(const bool use_nesterov = false, const bool use_locking = false, const float gradient_scale = 1.0); + void set_use_nesterov(const bool use_nesterov); + void set_use_locking(const bool use_locking); + void set_gradient_scale(const float gradient_scale); bool get_use_nesterov() const; bool get_use_locking() const; - float get_gradient_scale(); + float get_gradient_scale() const; }; +AbstractBasePtr ApplyMomentumInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); +using PrimApplyMomentumPtr = std::shared_ptr; +} // namespace ops } // namespace mindspore -#endif // MINDSPORE_CORE_C_OPS_APPLYMOMENTUM_H_ +#endif // MINDSPORE_CORE_OPS_APPLY_MOMENTUM_H_ diff --git a/mindspore/core/ops/arg_max.cc b/mindspore/core/ops/arg_max.cc new file mode 100644 index 00000000000..79ef82681c5 --- /dev/null +++ b/mindspore/core/ops/arg_max.cc @@ -0,0 +1,73 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops/arg_max.h" + +namespace mindspore { +namespace ops { +namespace { +abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(primitive); + auto prim_name = primitive->name(); + auto prim = primitive->cast(); + MS_EXCEPTION_IF_NULL(prim); + auto axis = prim->get_axis(); + auto x_shape = CheckAndConvertUtils::ConvertShapePtrToShape("x_shape", input_args[0]->BuildShape(), prim_name); + auto x_rank = SizeToLong(x_shape.size()); + CheckAndConvertUtils::CheckInRange("argmax axis", axis, kIncludeLeft, {-x_rank, x_rank}, prim_name); + axis = axis < 0 ? axis + x_rank : axis; + std::vector out_shape; + for (size_t i = 0; i < x_shape.size(); ++i) { + if (SizeToLong(i) != axis) { + out_shape.emplace_back(x_shape[i]); + } + } + return std::make_shared(out_shape); +} + +TypePtr InferType(const PrimitivePtr &prim, const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(prim); + CheckAndConvertUtils::CheckInteger("input number", input_args.size(), kEqual, 1, prim->name()); + for (const auto &item : input_args) { + MS_EXCEPTION_IF_NULL(item); + } + return kInt32; +} +} // namespace + +void ArgMax::Init(const int64_t axis, const TypeId output_type) { + set_axis(axis); + set_output_type(output_type); +} + +void ArgMax::set_axis(const int64_t axis) { this->AddAttr(kAxis, MakeValue(axis)); } +void ArgMax::set_output_type(const TypeId output_type) { this->AddAttr(kOutputType, TypeIdToType(output_type)); } + +int64_t ArgMax::get_axis() const { return GetValue(GetAttr(kAxis)); } +TypeId ArgMax::get_output_type() const { + auto type_ptr = GetAttr(kOutputType)->cast()->element(); + return type_ptr->type_id(); +} + +AbstractBasePtr ArgMaxInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + return std::make_shared(InferType(primitive, input_args), + InferShape(primitive, input_args)->shape()); +} +REGISTER_PRIMITIVE_EVAL_IMPL(ArgMax, prim::kPrimArgMax, ArgMaxInfer); +REGISTER_PRIMITIVE_C(kNameArgMax, ArgMax); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/arg_max.h b/mindspore/core/ops/arg_max.h new file mode 100644 index 00000000000..75440f0b0a4 --- /dev/null +++ b/mindspore/core/ops/arg_max.h @@ -0,0 +1,50 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_ARG_MAX_H_ +#define MINDSPORE_CORE_OPS_ARG_MAX_H_ +#include +#include +#include + +#include "ops/primitive_c.h" +#include "ops/op_utils.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameArgMax = "Argmax"; +class ArgMax : public PrimitiveC { + public: + ArgMax() : PrimitiveC(kNameArgMax) { InitIOName({"x"}, {"output"}); } + explicit ArgMax(const std::string k_name) : PrimitiveC(k_name) { InitIOName({"x"}, {"output"}); } + ~ArgMax() = default; + MS_DECLARE_PARENT(ArgMax, PrimitiveC); + void Init(const int64_t axis = -1, const TypeId output_type = kNumberTypeInt32); + void set_axis(const int64_t axis); + void set_output_type(const TypeId output_type); + + int64_t get_axis() const; + TypeId get_output_type() const; +}; +AbstractBasePtr ArgMaxInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); +using PrimArgMaxPtr = std::shared_ptr; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_ARG_MAX_H_ diff --git a/mindspore/core/ops/arg_min.cc b/mindspore/core/ops/arg_min.cc new file mode 100644 index 00000000000..706f4802b0f --- /dev/null +++ b/mindspore/core/ops/arg_min.cc @@ -0,0 +1,73 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include "ops/arg_min.h" + +namespace mindspore { +namespace ops { +void ArgMin::Init(const int64_t axis, const TypeId output_type) { + set_axis(axis); + set_output_type(output_type); +} + +void ArgMin::set_axis(const int64_t axis) { this->AddAttr(kAxis, MakeValue(axis)); } +void ArgMin::set_output_type(const TypeId output_type) { this->AddAttr(kOutputType, TypeIdToType(output_type)); } + +int64_t ArgMin::get_axis() const { + auto value_ptr = GetAttr(kAxis); + return GetValue(value_ptr); +} + +TypeId ArgMin::get_output_type() const { + auto type_ptr = GetAttr(kOutputType)->cast()->element(); + return type_ptr->type_id(); +} + +AbstractBasePtr ArgMinInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(primitive); + auto argmin_prim = primitive->cast(); + MS_EXCEPTION_IF_NULL(argmin_prim); + auto prim_name = argmin_prim->name(); + CheckAndConvertUtils::CheckInteger("arg_min_infer", input_args.size(), kEqual, 1, prim_name); + + // Infer shape + auto axis = argmin_prim->get_axis(); + auto x_shape = CheckAndConvertUtils::ConvertShapePtrToShape("x_shape", input_args[0]->BuildShape(), prim_name); + auto x_rank = SizeToLong(x_shape.size()); + CheckAndConvertUtils::CheckInRange("axis", axis, kIncludeLeft, {-x_rank, x_rank}, prim_name); + if (axis < 0) { + axis += x_rank; + } + std::vector out_shape; + for (int64_t i = 0; i < x_rank; i++) { + if (i != axis) { + out_shape.push_back(x_shape[i]); + } + } + + // Infer type + auto x_dtype = input_args[0]->BuildType()->cast()->element(); + std::set template_types = {TypeIdToType(kObjectTypeTensorType)}; + CheckAndConvertUtils::CheckSubClass("x_dtype", x_dtype, template_types, prim_name); + + return std::make_shared(x_dtype, std::make_shared(out_shape)); +} +REGISTER_PRIMITIVE_EVAL_IMPL(ArgMin, prim::kPrimArgMin, ArgMinInfer); +REGISTER_PRIMITIVE_C(kNameArgMin, ArgMin); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/c_ops/arg_min.h b/mindspore/core/ops/arg_min.h similarity index 68% rename from mindspore/core/c_ops/arg_min.h rename to mindspore/core/ops/arg_min.h index 622ebc024b3..6872d7f0db1 100644 --- a/mindspore/core/c_ops/arg_min.h +++ b/mindspore/core/ops/arg_min.h @@ -14,32 +14,37 @@ * limitations under the License. */ -#ifndef MINDSPORE_CORE_C_OPS_ARGMIN_H_ -#define MINDSPORE_CORE_C_OPS_ARGMIN_H_ +#ifndef MINDSPORE_CORE_OPS_ARG_MIN_H_ +#define MINDSPORE_CORE_OPS_ARG_MIN_H_ +#include #include #include -#include "c_ops/primitive_c.h" -#include "c_ops/op_utils.h" +#include "ops/primitive_c.h" +#include "ops/op_utils.h" #include "abstract/abstract_value.h" #include "utils/check_convert_utils.h" namespace mindspore { +namespace ops { constexpr auto kNameArgMin = "ArgMin"; class ArgMin : public PrimitiveC { public: ArgMin() : PrimitiveC(kNameArgMin) { InitIOName({"x"}, {"output"}); } + explicit ArgMin(const std::string k_name) : PrimitiveC(k_name) { InitIOName({"x"}, {"output"}); } ~ArgMin() = default; MS_DECLARE_PARENT(ArgMin, PrimitiveC); - void Init(bool keep_dims, int64_t axis = -1); - void set_axis(int64_t axis); - void set_keep_dims(bool keep_dims); - int64_t get_axis(); - bool get_keep_dims(); + void Init(const int64_t axis = -1, const TypeId output_type = kNumberTypeInt32); + void set_axis(const int64_t axis); + void set_output_type(const TypeId output_type); + + int64_t get_axis() const; + TypeId get_output_type() const; }; AbstractBasePtr ArgMinInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, const std::vector &input_args); using PrimArgMin = std::shared_ptr; +} // namespace ops } // namespace mindspore -#endif // MINDSPORE_CORE_C_OPS_ARGMIN_H_ +#endif // MINDSPORE_CORE_OPS_ARG_MIN_H_ diff --git a/mindspore/core/ops/asin.cc b/mindspore/core/ops/asin.cc new file mode 100644 index 00000000000..5938c58b9ef --- /dev/null +++ b/mindspore/core/ops/asin.cc @@ -0,0 +1,52 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include + +#include "ops/asin.h" + +namespace mindspore { +namespace ops { +AbstractBasePtr AsinInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(primitive); + auto asin_prim = primitive->cast(); + MS_EXCEPTION_IF_NULL(asin_prim); + auto prim_name = asin_prim->name(); + CheckAndConvertUtils::CheckInteger("Asin_infer", input_args.size(), kEqual, 1, prim_name); + + // Infer Shape + auto x_shape = CheckAndConvertUtils::ConvertShapePtrToShape("x_shape", input_args[0]->BuildShape(), prim_name); + auto infer_shape = std::make_shared(x_shape); + + // Infer Type + auto dtype = input_args[0]->BuildType(); + const std::set valid_types = {kNumberTypeFloat16, kNumberTypeFloat32, kNumberTypeInt32}; + CheckAndConvertUtils::CheckTensorTypeValid("x_dtype", dtype, valid_types, prim_name); + auto tensor_type = dtype->cast(); + MS_EXCEPTION_IF_NULL(tensor_type); + auto element = tensor_type->element(); + MS_EXCEPTION_IF_NULL(element); + auto infer_type = std::make_shared(TypeIdToType(element->type_id())); + + return std::make_shared(infer_type, infer_shape->shape()); +} +REGISTER_PRIMITIVE_EVAL_IMPL(Asin, prim::kPrimAsin, AsinInfer); +REGISTER_PRIMITIVE_C(kNameAsin, Asin); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/asin.h b/mindspore/core/ops/asin.h new file mode 100644 index 00000000000..ebed63649b3 --- /dev/null +++ b/mindspore/core/ops/asin.h @@ -0,0 +1,42 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_ASIN_H_ +#define MINDSPORE_CORE_OPS_ASIN_H_ +#include +#include + +#include "ops/primitive_c.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameAsin = "Asin"; +class Asin : public PrimitiveC { + public: + Asin() : PrimitiveC(kNameAsin) {} + ~Asin() = default; + MS_DECLARE_PARENT(Asin, PrimitiveC); + void Init() {} +}; +AbstractBasePtr ASinInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); +using PrimAsinPtr = std::shared_ptr; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_ASIN_H_ diff --git a/mindspore/core/ops/assert.cc b/mindspore/core/ops/assert.cc new file mode 100644 index 00000000000..e0bb23d1dcc --- /dev/null +++ b/mindspore/core/ops/assert.cc @@ -0,0 +1,78 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include +#include + +#include "ops/assert.h" +#include "ops/op_utils.h" + +namespace mindspore { +namespace ops { +void Assert::Init(const int64_t summarize) { set_summarize(summarize); } + +void Assert::set_summarize(const int64_t summarize) { this->AddAttr(kSummarize, MakeValue(summarize)); } + +int64_t Assert::get_summarize() const { + auto value_ptr = GetAttr(kSummarize); + return GetValue(value_ptr); +} + +AbstractBasePtr AssertInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(primitive); + auto Assert_prim = primitive->cast(); + MS_EXCEPTION_IF_NULL(Assert_prim); + auto op_name = Assert_prim->name(); + TypePtr condition; + if (!(input_args[0]->BuildType()->type_id() == kObjectTypeTensorType)) { + auto condition_value = GetValue>(input_args[0]->BuildValue()); + CheckAndConvertUtils::CheckInteger("condition's rank", condition_value.size(), kLessEqual, 1, op_name); + if (condition_value.size() == 1) { + CheckAndConvertUtils::CheckInteger("condition[0]", condition_value[0], kEqual, 1, op_name); + } + condition = TypeIdToType(kNumberTypeBool); + } else { + auto condition_shape = + CheckAndConvertUtils::ConvertShapePtrToShape("input_shape", input_args[0]->BuildShape(), op_name); + CheckAndConvertUtils::CheckInteger("condition's rank", condition_shape[0], kLessEqual, 1, op_name); + if (condition_shape[0] == 1) { + auto condition_value = reinterpret_cast(input_args[0]->BuildValue()->cast()->data_c()); + MS_EXCEPTION_IF_NULL(condition_value); + // auto condition_value = GetValue(input_args[0]->BuildValue()); + CheckAndConvertUtils::CheckInteger("condition[0]", *condition_value, kEqual, 1, op_name); + } + condition = input_args[0]->BuildType(); + } + std::vector output_shape = {1}; + std::set local_bool = {TypeIdToType(kNumberTypeBool)}; + std::map args = {{"condition", condition}}; + CheckAndConvertUtils::CheckScalarOrTensorTypesSame(args, local_bool, op_name); + auto inputs_type = input_args[1]->BuildType()->cast()->elements(); + for (auto dtype : inputs_type) { + std::set template_types = {TypeIdToType(kObjectTypeTensorType)}; + CheckAndConvertUtils::CheckSubClass("input", dtype, template_types, op_name); + } + return std::make_shared(TypeIdToType(kNumberTypeInt32), output_shape); +} + +REGISTER_PRIMITIVE_EVAL_IMPL(Assert, prim::kPrimAssert, AssertInfer); +REGISTER_PRIMITIVE_C(kNameAssert, Assert); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/assert.h b/mindspore/core/ops/assert.h new file mode 100644 index 00000000000..c4488bde7a9 --- /dev/null +++ b/mindspore/core/ops/assert.h @@ -0,0 +1,44 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_ASSERT_H_ +#define MINDSPORE_CORE_OPS_ASSERT_H_ +#include +#include +#include "ops/primitive_c.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameAssert = "Assert"; +class Assert : public PrimitiveC { + public: + Assert() : PrimitiveC(kNameAssert) {} + ~Assert() = default; + MS_DECLARE_PARENT(Assert, PrimitiveC); + void Init(const int64_t summarize = 3); + void set_summarize(const int64_t summarize); + int64_t get_summarize() const; +}; + +AbstractBasePtr AssertInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); +using PrimAssertPtr = std::shared_ptr; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_ASSERT_H_ diff --git a/mindspore/core/c_ops/assign.cc b/mindspore/core/ops/assign.cc similarity index 78% rename from mindspore/core/c_ops/assign.cc rename to mindspore/core/ops/assign.cc index 4ea267ac54d..1bfad53e91b 100644 --- a/mindspore/core/c_ops/assign.cc +++ b/mindspore/core/ops/assign.cc @@ -14,8 +14,18 @@ * limitations under the License. */ -#include "c_ops/assign.h" +#include +#include +#include +#include +#include + +#include "ops/assign.h" +#include "ops/op_utils.h" +#include "ir/dtype/ref.h" namespace mindspore { +namespace ops { REGISTER_PRIMITIVE_C(kNameAssign, Assign); +} // namespace ops } // namespace mindspore diff --git a/mindspore/core/c_ops/assign.h b/mindspore/core/ops/assign.h similarity index 78% rename from mindspore/core/c_ops/assign.h rename to mindspore/core/ops/assign.h index 87b5f0761e2..a0072725a3e 100644 --- a/mindspore/core/c_ops/assign.h +++ b/mindspore/core/ops/assign.h @@ -14,13 +14,17 @@ * limitations under the License. */ -#ifndef MINDSPORE_CORE_C_OPS_ASSIGN_H_ -#define MINDSPORE_CORE_C_OPS_ASSIGN_H_ -#include "c_ops/primitive_c.h" +#ifndef MINDSPORE_CORE_OPS_ASSIGN_H_ +#define MINDSPORE_CORE_OPS_ASSIGN_H_ +#include +#include + +#include "ops/primitive_c.h" #include "abstract/abstract_value.h" #include "utils/check_convert_utils.h" namespace mindspore { +namespace ops { constexpr auto kNameAssign = "Assign"; class Assign : public PrimitiveC { public: @@ -29,6 +33,9 @@ class Assign : public PrimitiveC { MS_DECLARE_PARENT(Assign, PrimitiveC); void Init() {} }; + +using PrimAssignPtr = std::shared_ptr; +} // namespace ops } // namespace mindspore -#endif // MINDSPORE_CORE_C_OPS_ASSIGN_H_ +#endif // MINDSPORE_CORE_OPS_ASSIGN_H_ diff --git a/mindspore/core/ops/assign_add.cc b/mindspore/core/ops/assign_add.cc new file mode 100644 index 00000000000..59a15d677a0 --- /dev/null +++ b/mindspore/core/ops/assign_add.cc @@ -0,0 +1,53 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include "ops/assign_add.h" +#include "ops/op_utils.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +namespace { +abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(primitive); + auto assignadd_prim = primitive->cast(); + MS_EXCEPTION_IF_NULL(assignadd_prim); + auto prim_name = assignadd_prim->name(); + auto value_shape = + CheckAndConvertUtils::ConvertShapePtrToShape("value_shape", input_args[1]->BuildShape(), prim_name); + return std::make_shared(value_shape); +} + +TypePtr InferType(const PrimitivePtr &primitive, const std::vector &input_args) { + std::map types; + types.emplace("x", input_args[0]->BuildType()); + types.emplace("w", input_args[1]->BuildType()); + // check_scalar_or_tensor_types_same + auto infer_type = CheckAndConvertUtils::CheckTensorTypeSame(types, common_valid_types, "AssignAdd"); + return TypeIdToType(infer_type); +} +} // namespace +AbstractBasePtr AssignAddInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + return std::make_shared(InferType(primitive, input_args), + InferShape(primitive, input_args)->shape()); +} +REGISTER_PRIMITIVE_EVAL_IMPL(AssignAdd, prim::kPrimAssignAdd, AssignAddInfer); +REGISTER_PRIMITIVE_C(kNameAssignAdd, AssignAdd); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/c_ops/assign_add.h b/mindspore/core/ops/assign_add.h similarity index 68% rename from mindspore/core/c_ops/assign_add.h rename to mindspore/core/ops/assign_add.h index b6713a66628..645d39bb38f 100644 --- a/mindspore/core/c_ops/assign_add.h +++ b/mindspore/core/ops/assign_add.h @@ -14,13 +14,17 @@ * limitations under the License. */ -#ifndef MINDSPORE_CORE_C_OPS_ASSIGNADD_H_ -#define MINDSPORE_CORE_C_OPS_ASSIGNADD_H_ -#include "c_ops/primitive_c.h" +#ifndef MINDSPORE_CORE_OPS_ASSIGN_ADD_H_ +#define MINDSPORE_CORE_OPS_ASSIGN_ADD_H_ +#include +#include + +#include "ops/primitive_c.h" #include "abstract/abstract_value.h" #include "utils/check_convert_utils.h" namespace mindspore { +namespace ops { constexpr auto kNameAssignAdd = "AssignAdd"; class AssignAdd : public PrimitiveC { public: @@ -29,6 +33,10 @@ class AssignAdd : public PrimitiveC { MS_DECLARE_PARENT(AssignAdd, PrimitiveC); void Init() {} }; +AbstractBasePtr AssignAddInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); +using PrimAssignAddPtr = std::shared_ptr; +} // namespace ops } // namespace mindspore -#endif // MINDSPORE_CORE_C_OPS_ASSIGNADD_H_ +#endif // MINDSPORE_CORE_OPS_ASSIGN_ADD_H_ diff --git a/mindspore/core/ops/atan.cc b/mindspore/core/ops/atan.cc new file mode 100644 index 00000000000..499e55dcc5e --- /dev/null +++ b/mindspore/core/ops/atan.cc @@ -0,0 +1,50 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + +#include "ops/atan.h" + +namespace mindspore { +namespace ops { +AbstractBasePtr AtanInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(primitive); + auto atan_prim = primitive->cast(); + MS_EXCEPTION_IF_NULL(atan_prim); + auto prim_name = atan_prim->name(); + CheckAndConvertUtils::CheckInteger("Atan_infer", input_args.size(), kEqual, 1, prim_name); + + // Infer Shape + auto x_shape = CheckAndConvertUtils::ConvertShapePtrToShape("x_shape", input_args[0]->BuildShape(), prim_name); + auto infer_shape = std::make_shared(x_shape); + + // Infer Type + auto dtype = input_args[0]->BuildType(); + const std::set valid_types = {kNumberTypeFloat16, kNumberTypeFloat32, kNumberTypeInt32}; + CheckAndConvertUtils::CheckTensorTypeValid("x_dtype", dtype, valid_types, prim_name); + auto tensor_type = dtype->cast(); + MS_EXCEPTION_IF_NULL(tensor_type); + auto element = tensor_type->element(); + MS_EXCEPTION_IF_NULL(element); + auto infer_type = std::make_shared(TypeIdToType(element->type_id())); + + return std::make_shared(infer_type, infer_shape->shape()); +} +REGISTER_PRIMITIVE_EVAL_IMPL(Atan, prim::kPrimAtan, AtanInfer); +REGISTER_PRIMITIVE_C(kNameAtan, Atan); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/c_ops/atan.h b/mindspore/core/ops/atan.h similarity index 72% rename from mindspore/core/c_ops/atan.h rename to mindspore/core/ops/atan.h index ea670a813f0..cf726611263 100644 --- a/mindspore/core/c_ops/atan.h +++ b/mindspore/core/ops/atan.h @@ -14,17 +14,18 @@ * limitations under the License. */ -#ifndef MINDSPORE_CORE_C_OPS_ATAN_H_ -#define MINDSPORE_CORE_C_OPS_ATAN_H_ +#ifndef MINDSPORE_CORE_OPS_ATAN_H_ +#define MINDSPORE_CORE_OPS_ATAN_H_ #include #include #include #include -#include "c_ops/primitive_c.h" +#include "ops/primitive_c.h" #include "abstract/abstract_value.h" #include "utils/check_convert_utils.h" namespace mindspore { +namespace ops { constexpr auto kNameAtan = "Atan"; class Atan : public PrimitiveC { public: @@ -33,6 +34,10 @@ class Atan : public PrimitiveC { MS_DECLARE_PARENT(Atan, PrimitiveC); void Init() {} }; +AbstractBasePtr ATanInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); +using PrimAtanPtr = std::shared_ptr; +} // namespace ops } // namespace mindspore -#endif // MINDSPORE_CORE_C_OPS_ATAN_H_ +#endif // MINDSPORE_CORE_OPS_ATAN_H_ diff --git a/mindspore/core/ops/audio_spectrogram.cc b/mindspore/core/ops/audio_spectrogram.cc new file mode 100644 index 00000000000..c2ae4cf98dd --- /dev/null +++ b/mindspore/core/ops/audio_spectrogram.cc @@ -0,0 +1,125 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops/audio_spectrogram.h" +#include +#include +#include +#include +#include +#include "ops/op_utils.h" +#include "utils/check_convert_utils.h" +#include "abstract/primitive_infer_map.h" + +namespace mindspore { +namespace ops { +namespace { +abstract::ShapePtr AudioSpectrogramInferShape(const PrimitivePtr &primitive, + const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(primitive); + auto audio_spectrogram_prim = primitive->cast(); + MS_EXCEPTION_IF_NULL(audio_spectrogram_prim); + auto prim_name = audio_spectrogram_prim->name(); + auto input_shape = + CheckAndConvertUtils::ConvertShapePtrToShape("input_shape", input_args[0]->BuildShape(), prim_name); + if (input_shape.size() != 2) { + MS_LOG(ERROR) << "input shape is error, which need to be 2 dimensions"; + } + if (audio_spectrogram_prim->get_window_size() < 2) { + MS_LOG(ERROR) << "window size is too short, now is " << audio_spectrogram_prim->get_window_size(); + } + if (audio_spectrogram_prim->get_stride() < 1) { + MS_LOG(ERROR) << "stride must be positive, now is " << audio_spectrogram_prim->get_stride(); + } + std::vector infer_shape; + infer_shape.push_back(input_shape[1]); + int64_t sample_sub_window = input_shape[0] - audio_spectrogram_prim->get_window_size(); + infer_shape.push_back(sample_sub_window < 0 ? 0 : 1 + sample_sub_window / audio_spectrogram_prim->get_stride()); + int64_t fft_length = audio_spectrogram_prim->GetFftLength(audio_spectrogram_prim->get_window_size()); + infer_shape.push_back(fft_length / 2 + 1); + MS_LOG(ERROR) << infer_shape; + return std::make_shared(infer_shape); +} + +TypePtr AudioSpectrogramInferType(const PrimitivePtr &prim, const std::vector &input_args) { + for (const auto &item : input_args) { + MS_EXCEPTION_IF_NULL(item); + } + auto infer_type = input_args[0]->BuildType(); + auto tensor_type = infer_type->cast(); + MS_EXCEPTION_IF_NULL(tensor_type); + auto data_type = tensor_type->element(); + MS_EXCEPTION_IF_NULL(data_type); + return data_type; +} +} // namespace + +void AudioSpectrogram::set_window_size(const int64_t window_size) { + this->AddAttr(kWindowSize, MakeValue(window_size)); +} +int64_t AudioSpectrogram::get_window_size() const { + auto value_ptr = GetAttr(kWindowSize); + return GetValue(value_ptr); +} + +void AudioSpectrogram::set_stride(const int64_t stride) { this->AddAttr(kStride, MakeValue(stride)); } +int64_t AudioSpectrogram::get_stride() const { + auto value_ptr = GetAttr(kStride); + return GetValue(value_ptr); +} + +int64_t AudioSpectrogram::Log2Ceil(int64_t length) { + if (length == 0) { + return -1; + } + int64_t floor = 0; + for (int64_t i = 4; i >= 0; --i) { + const int64_t shift = (int64_t)(1 << i); + int64_t tmp = length >> shift; + if (tmp != 0) { + length = tmp; + floor += shift; + } + } + return length == (length & ~(length - 1)) ? floor : floor + 1; +} + +int64_t AudioSpectrogram::GetFftLength(int64_t length) { + int64_t shift = Log2Ceil(length); + return 1 << shift; +} + +void AudioSpectrogram::set_mag_square(const bool mag_square) { this->AddAttr(kMagSquare, MakeValue(mag_square)); } +bool AudioSpectrogram::get_mag_square() const { + auto value_ptr = GetAttr(kMagSquare); + return GetValue(value_ptr); +} +void AudioSpectrogram::Init(const int64_t window_size, const int64_t stride, const bool mag_square) { + this->set_window_size(window_size); + this->set_stride(stride); + this->set_mag_square(mag_square); +} + +AbstractBasePtr AudioSpectrogramInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + return std::make_shared(AudioSpectrogramInferType(primitive, input_args), + AudioSpectrogramInferShape(primitive, input_args)->shape()); +} + +REGISTER_PRIMITIVE_EVAL_IMPL(AudioSpectrogram, prim::kPrimAudioSpectrogram, AudioSpectrogramInfer); +REGISTER_PRIMITIVE_C(kNameAudioSpectrogram, AudioSpectrogram); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/c_ops/audio_spectrogram.h b/mindspore/core/ops/audio_spectrogram.h similarity index 60% rename from mindspore/core/c_ops/audio_spectrogram.h rename to mindspore/core/ops/audio_spectrogram.h index 54059385ba1..7d4a11d71a8 100644 --- a/mindspore/core/c_ops/audio_spectrogram.h +++ b/mindspore/core/ops/audio_spectrogram.h @@ -14,31 +14,38 @@ * limitations under the License. */ -#ifndef MINDSPORE_CORE_C_OPS_AUDIOSPECTROGRAM_H_ -#define MINDSPORE_CORE_C_OPS_AUDIOSPECTROGRAM_H_ +#ifndef MINDSPORE_CORE_OPS_AUDIO_SPECTROGRAM_H_ +#define MINDSPORE_CORE_OPS_AUDIO_SPECTROGRAM_H_ #include #include #include #include -#include "c_ops/primitive_c.h" +#include "ops/primitive_c.h" #include "abstract/abstract_value.h" #include "utils/check_convert_utils.h" namespace mindspore { +namespace ops { constexpr auto kNameAudioSpectrogram = "AudioSpectrogram"; class AudioSpectrogram : public PrimitiveC { public: AudioSpectrogram() : PrimitiveC(kNameAudioSpectrogram) {} ~AudioSpectrogram() = default; MS_DECLARE_PARENT(AudioSpectrogram, PrimitiveC); - void Init(const int64_t &window_size, const int64_t &stride, const bool &mag_square); - void set_window_size(const int64_t &window_size); - void set_stride(const int64_t &stride); - void set_mag_square(const bool &mag_square); + void Init(const int64_t window_size, const int64_t stride, const bool mag_square); + void set_window_size(const int64_t window_size); + void set_stride(const int64_t stride); + void set_mag_square(const bool mag_square); int64_t get_window_size() const; int64_t get_stride() const; bool get_mag_square() const; + int64_t Log2Ceil(int64_t length); + int64_t GetFftLength(int64_t length); }; +AbstractBasePtr AudioSpectrogramInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); +using PrimAudioSpectrogramPtr = std::shared_ptr; +} // namespace ops } // namespace mindspore -#endif // MINDSPORE_CORE_C_OPS_AUDIOSPECTROGRAM_H_ +#endif // MINDSPORE_CORE_OPS_AUDIO_SPECTROGRAM_H_ diff --git a/mindspore/core/c_ops/avg_pool.cc b/mindspore/core/ops/avg_pool.cc similarity index 78% rename from mindspore/core/c_ops/avg_pool.cc rename to mindspore/core/ops/avg_pool.cc index a6cd65defcf..487c6858ee9 100644 --- a/mindspore/core/c_ops/avg_pool.cc +++ b/mindspore/core/ops/avg_pool.cc @@ -14,25 +14,26 @@ * limitations under the License. */ -#include "c_ops/avg_pool.h" +#include "ops/avg_pool.h" #include #include #include #include #include -#include "c_ops/op_utils.h" +#include "ops/op_utils.h" #include "utils/check_convert_utils.h" #include "abstract/primitive_infer_map.h" namespace mindspore { -void AvgPool::set_padding(const std::string &padding) { - CheckAndConvertUtils::CheckString(kPadding, padding, {kValid, kSame}, this->name()); - this->AddAttr(kPadding, MakeValue(padding)); +namespace ops { +void AvgPool::set_pad_mode(const PadMode &pad_mode) { + int64_t swi = pad_mode; + this->AddAttr(kPadMode, MakeValue(swi)); } -std::string AvgPool::get_padding() const { - auto value_ptr = GetAttr(kPadding); - return GetValue(value_ptr); +PadMode AvgPool::get_pad_mode() const { + auto value_ptr = GetAttr(kPadMode); + return PadMode(GetValue(value_ptr)); } void AvgPool::set_kernel_size(const std::vector &kernel_size) { this->AddAttr(kKernelSize, MakeValue(CheckAndConvertUtils::CheckPositiveVector(kKernelSize, kernel_size, this->name(), @@ -44,12 +45,12 @@ std::vector AvgPool::get_kernel_size() const { return GetValue>(value_ptr); } void AvgPool::set_strides(const std::vector &strides) { - this->AddAttr(kStride, - MakeValue(CheckAndConvertUtils::CheckPositiveVector(kStride, strides, this->name(), false, true))); + this->AddAttr(kStrides, + MakeValue(CheckAndConvertUtils::CheckPositiveVector(kStrides, strides, this->name(), false, true))); } std::vector AvgPool::get_strides() const { - auto value_ptr = GetAttr(kStride); + auto value_ptr = GetAttr(kStrides); return GetValue>(value_ptr); } @@ -70,20 +71,19 @@ std::vector AvgPool::get_pad() const { return GetValue>(value_ptr); } -void AvgPool::set_round_mode(const int64_t &round_mode) { - CheckAndConvertUtils::CheckInRange(kRoundMode, round_mode, kIncludeBoth, {0, 1}, this->name()); - this->AddAttr(kRoundMode, MakeValue(round_mode)); +void AvgPool::set_round_mode(const RoundMode &round_mode) { + int64_t swi = round_mode; + this->AddAttr(kRoundMode, MakeValue(swi)); } -int64_t AvgPool::get_round_mode() const { +RoundMode AvgPool::get_round_mode() const { auto value_ptr = GetAttr(kRoundMode); - return GetValue(value_ptr); + return RoundMode(GetValue(value_ptr)); } -void AvgPool::Init(const std::vector &kernel_size, const std::vector &stride, - const std::string &padding, const Format &format, const std::vector &pad, - const int64_t &round_mode) { - this->set_padding(padding); +void AvgPool::Init(const std::vector &kernel_size, const std::vector &stride, const PadMode &pad_mode, + const Format &format, const std::vector &pad, const RoundMode &round_mode) { + this->set_pad_mode(pad_mode); this->set_kernel_size(kernel_size); this->set_strides(stride); this->set_format(format); @@ -98,9 +98,12 @@ abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vectorname(); auto in_shape = CheckAndConvertUtils::ConvertShapePtrToShape("x_shape", input_args[0]->GetShapeTrack(), op_name); + if (pool_prim->get_format() == NHWC) { + in_shape = {in_shape[0], in_shape[3], in_shape[1], in_shape[2]}; + } CheckAndConvertUtils::CheckInteger("x_rank", in_shape.size(), kEqual, 4, op_name); auto kernel_size = pool_prim->get_kernel_size(); - auto pad_mode = pool_prim->get_padding(); + auto pad_mode = pool_prim->get_pad_mode(); auto batch = in_shape[0]; auto channel = in_shape[1]; auto in_h = in_shape[2]; @@ -113,14 +116,17 @@ abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector out_shape = {batch, channel, out_h, out_w}; + if (pool_prim->get_format() == NHWC) { + out_shape = {batch, out_h, out_w, channel}; + } if (std::any_of(out_shape.begin(), out_shape.end(), [](int64_t a) { return a <= 0; })) { MS_LOG(EXCEPTION) << "Kernel size is not valid."; } @@ -142,4 +148,5 @@ AbstractBasePtr AvgPoolInfer(const abstract::AnalysisEnginePtr &, const Primitiv } REGISTER_PRIMITIVE_EVAL_IMPL(AvgPool, prim::kPrimAvgPool, AvgPoolInfer); REGISTER_PRIMITIVE_C(kNameAvgPool, AvgPool); +} // namespace ops } // namespace mindspore diff --git a/mindspore/core/c_ops/avg_pool.h b/mindspore/core/ops/avg_pool.h similarity index 76% rename from mindspore/core/c_ops/avg_pool.h rename to mindspore/core/ops/avg_pool.h index 866f3927da6..4985519938d 100644 --- a/mindspore/core/c_ops/avg_pool.h +++ b/mindspore/core/ops/avg_pool.h @@ -14,45 +14,48 @@ * limitations under the License. */ -#ifndef MINDSPORE_CORE_C_OPS_AVG_POOL_H_ -#define MINDSPORE_CORE_C_OPS_AVG_POOL_H_ +#ifndef MINDSPORE_CORE_OPS_AVG_POOL_H_ +#define MINDSPORE_CORE_OPS_AVG_POOL_H_ #include #include #include #include -#include "c_ops/primitive_c.h" +#include "ops/primitive_c.h" #include "abstract/abstract_value.h" #include "utils/check_convert_utils.h" namespace mindspore { +namespace ops { constexpr auto kNameAvgPool = "AvgPool"; class AvgPool : public PrimitiveC { public: AvgPool() : PrimitiveC(kNameAvgPool) { InitIOName({"x"}, {"output"}); } + explicit AvgPool(const std::string k_name) : PrimitiveC(k_name) { InitIOName({"x"}, {"output"}); } ~AvgPool() = default; MS_DECLARE_PARENT(AvgPool, PrimitiveC); void Init(const std::vector &kernel_size = {1}, const std::vector &stride = {1}, - const std::string &padding = "valid", const Format &format = NCHW, - const std::vector &pad = {0, 0, 0, 0}, const int64_t &round_mode = 0); - void set_padding(const std::string &padding); + const PadMode &pad_mode = VALID, const Format &format = NCHW, + const std::vector &pad = {0, 0, 0, 0}, const RoundMode &round_mode = FLOOR); + void set_pad_mode(const PadMode &pad_mode); void set_kernel_size(const std::vector &kernel_size); void set_strides(const std::vector &strides); void set_format(const Format &format); void set_pad(const std::vector &pad); - void set_round_mode(const int64_t &round_mode); + void set_round_mode(const RoundMode &round_mode); std::vector get_kernel_size() const; std::vector get_strides() const; - std::string get_padding() const; + PadMode get_pad_mode() const; Format get_format() const; std::vector get_pad() const; - int64_t get_round_mode() const; + RoundMode get_round_mode() const; }; AbstractBasePtr AvgPoolInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, const std::vector &input_args); using PrimAvgPoolPtr = std::shared_ptr; +} // namespace ops } // namespace mindspore -#endif // MINDSPORE_CORE_C_OPS_AVG_POOL_H_ +#endif // MINDSPORE_CORE_OPS_AVG_POOL_H_ diff --git a/mindspore/core/ops/batch_norm.cc b/mindspore/core/ops/batch_norm.cc new file mode 100644 index 00000000000..626ae921b0b --- /dev/null +++ b/mindspore/core/ops/batch_norm.cc @@ -0,0 +1,140 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include +#include +#include +#include "ops/batch_norm.h" +#include "abstract/primitive_infer_map.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +void BatchNorm::Init(const bool is_training, const float epsilon, const float momentum, const Format &format) { + set_is_training(is_training); + set_epsilon(epsilon); + set_format(format); + set_momentum(momentum); +} + +void BatchNorm::set_is_training(const bool is_training) { this->AddAttr(kIsTraining, MakeValue(is_training)); } + +void BatchNorm::set_epsilon(const float epsilon) { + CheckAndConvertUtils::CheckInRange(kEpsilon, epsilon, kIncludeBoth, {0.0, 1.0}, this->name()); + this->AddAttr(kEpsilon, MakeValue(epsilon)); +} + +void BatchNorm::set_format(const Format &format) { + int64_t f = format; + this->AddAttr(kFormat, MakeValue(f)); +} + +void BatchNorm::set_momentum(const float momentun) { + CheckAndConvertUtils::CheckInRange(kMomentum, momentun, kIncludeBoth, {0.0, 1.0}, this->name()); + this->AddAttr(kMomentum, MakeValue(momentun)); +} + +float BatchNorm::get_momentum() const { + auto value_ptr = GetAttr(kMomentum); + return GetValue(value_ptr); +} + +bool BatchNorm::get_is_training() const { + auto value_ptr = GetAttr(kIsTraining); + return GetValue(value_ptr); +} + +float BatchNorm::get_epsilon() const { + auto value_ptr = GetAttr(kEpsilon); + return GetValue(value_ptr); +} + +Format BatchNorm::get_format() const { + auto value_ptr = GetAttr(kFormat); + return Format(GetValue(value_ptr)); +} + +AbstractBasePtr BatchNormInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + // Infer shape + MS_EXCEPTION_IF_NULL(primitive); + auto batch_prim = primitive->cast(); + MS_EXCEPTION_IF_NULL(batch_prim); + auto prim_name = batch_prim->name(); + CheckAndConvertUtils::CheckInteger("batch_norm_infer", input_args.size(), kEqual, 5, prim_name); + + auto input_x = CheckAndConvertUtils::ConvertShapePtrToShape("input_x", input_args[0]->BuildShape(), prim_name); + if (batch_prim->get_format() == NHWC) { + input_x = {input_x[0], input_x[3], input_x[1], input_x[2]}; + } + auto scale = CheckAndConvertUtils::ConvertShapePtrToShape("scale", input_args[1]->BuildShape(), prim_name); + auto bias = CheckAndConvertUtils::ConvertShapePtrToShape("bias", input_args[2]->BuildShape(), prim_name); + auto mean = CheckAndConvertUtils::ConvertShapePtrToShape("mean", input_args[3]->BuildShape(), prim_name); + auto variance = CheckAndConvertUtils::ConvertShapePtrToShape("variance", input_args[4]->BuildShape(), prim_name); + + std::vector input_shape_norm; + if (batch_prim->get_format() == NCHW) { + input_shape_norm = + CheckAndConvertUtils::ConvertShapePtrToShape("x_shape", input_args[0]->GetShapeTrack(), prim_name); + } else { + input_shape_norm.push_back(input_x[0]); + input_shape_norm.push_back(input_x[3]); + input_shape_norm.push_back(input_x[1]); + input_shape_norm.push_back(input_x[2]); + } + CheckAndConvertUtils::CheckInteger("scale rank", scale.size(), kEqual, 1, prim_name); + CheckAndConvertUtils::Check("scale shape", scale, kEqual, "bias shape", bias, prim_name, TypeError); + CheckAndConvertUtils::Check("scale shape[0]", scale[0], kEqual, "input_x channel", input_shape_norm[1], prim_name, + TypeError); + if (!batch_prim->get_is_training()) { + CheckAndConvertUtils::CheckInteger("mean rank", mean.size(), kEqual, 1, prim_name); + CheckAndConvertUtils::Check("mean shape", mean, kEqual, "variance shape", variance, prim_name, TypeError); + CheckAndConvertUtils::Check("mean shape", mean, kEqual, "scale shape", scale, prim_name, TypeError); + } + + // Infer type + auto input_x_type = input_args[0]->BuildType()->cast()->element(); + auto scale_type = input_args[1]->BuildType()->cast()->element(); + auto bias_type = input_args[2]->BuildType()->cast()->element(); + + const std::set valid_types = {kNumberTypeFloat16, kNumberTypeFloat32}; + CheckAndConvertUtils::CheckTensorTypeValid("input_x", input_args[0]->BuildType(), valid_types, prim_name); + std::map args; + args.emplace("scale", input_args[1]->BuildType()); + args.emplace("bias", input_args[2]->BuildType()); + CheckAndConvertUtils::CheckTensorTypeSame(args, valid_types, prim_name); + std::map args_moving; + args_moving.emplace("scale", input_args[2]->BuildType()); + args_moving.emplace("bias", input_args[3]->BuildType()); + CheckAndConvertUtils::CheckTensorTypeSame(args_moving, valid_types, prim_name); + + auto output0 = std::make_shared(input_x_type, input_x); + auto output1 = std::make_shared(scale_type, scale); + auto output2 = std::make_shared(bias_type, scale); + auto output3 = std::make_shared(input_x_type, scale); + if (batch_prim->get_format() == NHWC) { + output2 = std::make_shared(scale_type, scale); + output3 = std::make_shared(bias_type, scale); + output1 = std::make_shared(input_x_type, scale); + } + AbstractBasePtrList output = {output0, output1, output2, output3, output3}; + return std::make_shared(output); +} +REGISTER_PRIMITIVE_EVAL_IMPL(BatchNorm, prim::kPrimBatchNorm, BatchNormInfer); +REGISTER_PRIMITIVE_C(kNameBatchNorm, BatchNorm); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/c_ops/batch_norm.h b/mindspore/core/ops/batch_norm.h similarity index 71% rename from mindspore/core/c_ops/batch_norm.h rename to mindspore/core/ops/batch_norm.h index 2aa093a2169..06aa5cae59a 100644 --- a/mindspore/core/c_ops/batch_norm.h +++ b/mindspore/core/ops/batch_norm.h @@ -14,17 +14,18 @@ * limitations under the License. */ -#ifndef MINDSPORE_CORE_C_OPS_BATCH_NORMAL_H_ -#define MINDSPORE_CORE_C_OPS_BATCH_NORMAL_H_ +#ifndef MINDSPORE_CORE_OPS_BATCH_NORMAL_H_ +#define MINDSPORE_CORE_OPS_BATCH_NORMAL_H_ #include #include #include #include -#include "c_ops/op_utils.h" -#include "c_ops/primitive_c.h" +#include "ops/op_utils.h" +#include "ops/primitive_c.h" #include "abstract/abstract_value.h" namespace mindspore { +namespace ops { constexpr auto kNameBatchNorm = "BatchNorm"; class BatchNorm : public PrimitiveC { public: @@ -34,19 +35,23 @@ class BatchNorm : public PrimitiveC { } ~BatchNorm() = default; MS_DECLARE_PARENT(BatchNorm, PrimitiveC); - void Init(bool is_training = false, float epsilon = 1e-5, const Format &format = NCHW); - void set_is_training(bool is_training); - void set_epsilon(float epsilon); + void Init(const bool is_training = false, const float epsilon = 1e-5, const float momentun = 0.1, + const Format &format = NCHW); + void set_is_training(const bool is_training); + void set_epsilon(const float epsilon); void set_format(const Format &format); - bool get_is_trainging(); - float get_epsilon(); + void set_momentum(const float momentum); + bool get_is_training() const; + float get_epsilon() const; Format get_format() const; + float get_momentum() const; }; AbstractBasePtr BatchNormInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, const std::vector &input_args); using PrimBatchNormPtr = std::shared_ptr; +} // namespace ops } // namespace mindspore -#endif // MINDSPORE_CORE_C_OPS_BatchNorm_H_ +#endif // MINDSPORE_CORE_OPS_BatchNorm_H_ diff --git a/mindspore/core/ops/batch_norm_fold.cc b/mindspore/core/ops/batch_norm_fold.cc new file mode 100644 index 00000000000..8801b859249 --- /dev/null +++ b/mindspore/core/ops/batch_norm_fold.cc @@ -0,0 +1,116 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include "ops/batch_norm_fold.h" +#include "ops/op_utils.h" + +namespace mindspore { +namespace ops { + +void BatchNormFold::Init(const float momentum, const float epsilon, const bool is_training, const int64_t freeze_bn) { + set_momentum(momentum); + set_epsilon(epsilon); + set_is_training(is_training); + set_freeze_bn(freeze_bn); +} + +void BatchNormFold::set_momentum(const float momentum) { + CheckAndConvertUtils::CheckInRange(kMomentum, momentum, kIncludeBoth, {0.0, 1.0}, this->name()); + this->AddAttr(kMomentum, MakeValue(momentum)); +} + +float BatchNormFold::get_momentum() const { + auto value_ptr = GetAttr(kMomentum); + return GetValue(value_ptr); +} + +void BatchNormFold::set_epsilon(const float epsilon) { + float match_value = 0.0; + CheckAndConvertUtils::CheckValue(kEpsilon, epsilon, kGreaterThan, match_value, this->name()); + this->AddAttr(kEpsilon, MakeValue(epsilon)); +} + +float BatchNormFold::get_epsilon() const { + auto value_ptr = GetAttr(kEpsilon); + return GetValue(value_ptr); +} + +void BatchNormFold::set_is_training(const bool is_training) { this->AddAttr(kIsTraining, MakeValue(is_training)); } + +bool BatchNormFold::get_is_training() const { + auto value_ptr = GetAttr(kIsTraining); + return GetValue(value_ptr); +} + +void BatchNormFold::set_freeze_bn(const int64_t freeze_bn) { this->AddAttr(kFreezeBn, MakeValue(freeze_bn)); } + +int64_t BatchNormFold::get_freeze_bn() const { + auto value_ptr = GetAttr(kFreezeBn); + return GetValue(value_ptr); +} + +AbstractBasePtr BatchNormFoldInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(primitive); + auto BatchNormFold_prim = primitive->cast(); + MS_EXCEPTION_IF_NULL(BatchNormFold_prim); + auto op_name = BatchNormFold_prim->name(); + auto mean_shape = CheckAndConvertUtils::ConvertShapePtrToShape("mean_shape", input_args[1]->BuildShape(), op_name); + auto variance_shape = + CheckAndConvertUtils::ConvertShapePtrToShape("variance_shape", input_args[2]->BuildShape(), op_name); + auto x_shape = CheckAndConvertUtils::ConvertShapePtrToShape("x_shape", input_args[0]->BuildShape(), op_name); + auto global_step_shape = + CheckAndConvertUtils::ConvertShapePtrToShape("global_step_shape", input_args[3]->BuildShape(), op_name); + CheckAndConvertUtils::Check("mean_shape", mean_shape, kEqual, "gamma_shape", variance_shape, op_name); + CheckAndConvertUtils::Check("mean_shape[0]", mean_shape[0], kEqual, "input channel", x_shape[1], op_name); + CheckAndConvertUtils::CheckInteger("global step shape len", global_step_shape.size(), kEqual, 1, op_name); + + auto mean_type = input_args[1]->BuildType(); + auto variance_type = input_args[2]->BuildType(); + auto x_type = input_args[0]->BuildType(); + auto global_step_type = input_args[3]->BuildType(); + + std::map args = {{"x", x_type}, {"mean", mean_type}, {"variance", variance_type}}; + CheckAndConvertUtils::CheckTensorTypeSame(args, {kNumberTypeFloat16, kNumberTypeFloat32}, op_name); + CheckAndConvertUtils::CheckTensorTypeValid("gloabal_step", global_step_type, {kNumberTypeInt32}, op_name); + + auto tensor_type0 = x_type->cast(); + MS_EXCEPTION_IF_NULL(tensor_type0); + auto element0 = tensor_type0->element(); + + auto tensor_type1 = mean_type->cast(); + MS_EXCEPTION_IF_NULL(tensor_type1); + auto element1 = tensor_type1->element(); + + auto tensor_type2 = variance_type->cast(); + MS_EXCEPTION_IF_NULL(tensor_type2); + auto element2 = tensor_type2->element(); + + CheckAndConvertUtils::Check("input type", element0->type_id(), kEqual, "mean_type", element1->type_id(), op_name); + CheckAndConvertUtils::Check("input type", element0->type_id(), kEqual, "variance_type", element2->type_id(), op_name); + + auto output = std::make_shared(element0, mean_shape); + AbstractBasePtrList output1 = {output, output, output, output}; + return std::make_shared(output1); +} + +REGISTER_PRIMITIVE_EVAL_IMPL(BatchNormFold, prim::kPrimBatchNormFold, BatchNormFoldInfer); +REGISTER_PRIMITIVE_C(kNameBatchNormFold, BatchNormFold); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/batch_norm_fold.h b/mindspore/core/ops/batch_norm_fold.h new file mode 100644 index 00000000000..d1cc903871c --- /dev/null +++ b/mindspore/core/ops/batch_norm_fold.h @@ -0,0 +1,54 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_CORE_OPS_BATCH_NORM_FOLD_H_ +#define MINDSPORE_CORE_OPS_BATCH_NORM_FOLD_H_ +#include +#include + +#include "ops/primitive_c.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameBatchNormFold = "BatchNormFold"; +class BatchNormFold : public PrimitiveC { + public: + BatchNormFold() : PrimitiveC(kNameBatchNormFold) { + InitIOName({"x", "mean", "variance", "global_step"}, {"batch_mean", "batch_std", "running_mean", "running_std"}); + } + ~BatchNormFold() = default; + MS_DECLARE_PARENT(BatchNormFold, PrimitiveC); + void Init(const float momentum = 0.9, const float epsilon = 1e-5, const bool is_training = true, + const int64_t freeze_bn = 0); + void set_momentum(const float momentum); + void set_epsilon(const float epsilon); + void set_is_training(const bool is_training); + void set_freeze_bn(const int64_t freeze_bn); + + float get_momentum() const; + float get_epsilon() const; + bool get_is_training() const; + int64_t get_freeze_bn() const; +}; + +AbstractBasePtr BatchNormFoldInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); +using PrimBatchNormFoldPtr = std::shared_ptr; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_BATCH_NORM_FOLD_H_ diff --git a/mindspore/core/ops/batch_to_space.cc b/mindspore/core/ops/batch_to_space.cc new file mode 100644 index 00000000000..c39a213dbf3 --- /dev/null +++ b/mindspore/core/ops/batch_to_space.cc @@ -0,0 +1,81 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops/batch_to_space.h" +#include "ops/op_utils.h" +#include "utils/check_convert_utils.h" +#include "abstract/primitive_infer_map.h" + +namespace mindspore { +namespace ops { +void BatchToSpace::Init(const std::vector &block_size, const std::vector> &crops) { + this->set_block_size(block_size); + this->set_crops(crops); +} + +void BatchToSpace::set_block_size(const std::vector &block_size) { + this->AddAttr(kBlockSize, MakeValue(block_size)); +} + +std::vector BatchToSpace::get_block_size() const { + auto value_ptr = this->GetAttr(kBlockSize); + return GetValue>(value_ptr); +} + +void BatchToSpace::set_crops(const std::vector> &crops) { + this->AddAttr(kCrops, MakeValue(crops)); +} + +std::vector> BatchToSpace::get_crops() const { + auto value_ptr = this->GetAttr(kCrops); + return GetValue>>(value_ptr); +} + +AbstractBasePtr BatchToSpaceInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(primitive); + auto prim = primitive->cast(); + MS_EXCEPTION_IF_NULL(prim); + auto prim_name = prim->name(); + CheckAndConvertUtils::CheckInteger("input number", input_args.size(), kEqual, 1, prim_name); + for (const auto &item : input_args) { + MS_EXCEPTION_IF_NULL(item); + } + CheckAndConvertUtils::CheckTensorTypeValid("input_x", input_args[0]->BuildType(), common_valid_types, prim_name); + + auto x_shape = CheckAndConvertUtils::ConvertShapePtrToShape("x_shape", input_args[0]->BuildShape(), prim_name); + CheckAndConvertUtils::CheckInteger("x rank", x_shape.size(), kEqual, 4, prim_name); + auto block_size = prim->get_block_size(); + auto crops = prim->get_crops(); + auto out_shape = x_shape; + for (size_t i = 0; i < 2; ++i) { + auto x_block_prod = out_shape[i + 2] * block_size[i]; + auto crops_sum = crops[i][0] + crops[i][1]; + CheckAndConvertUtils::Check("x block shape prod", x_block_prod, kGreaterThan, "crops sum", 4, prim_name); + out_shape[i + 2] = x_block_prod - crops_sum; + } + CheckAndConvertUtils::CheckInteger("x_shape[0] % (block_size[0]*block_size[1])", + out_shape[0] % (block_size[0] * block_size[1]), kEqual, 0, prim_name); + out_shape[0] /= block_size[0] * block_size[1]; + + auto ret = input_args[0]->Broaden(); + ret->set_shape(std::make_shared(out_shape)); + return ret; +} +REGISTER_PRIMITIVE_EVAL_IMPL(BatchToSpace, prim::kPrimBatchToSpace, BatchToSpaceInfer); +REGISTER_PRIMITIVE_C(kNameBatchToSpace, BatchToSpace); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/batch_to_space.h b/mindspore/core/ops/batch_to_space.h new file mode 100644 index 00000000000..8812999e02f --- /dev/null +++ b/mindspore/core/ops/batch_to_space.h @@ -0,0 +1,47 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_CORE_OPS_BATCH_TO_SPACE_H_ +#define MINDSPORE_CORE_OPS_BATCH_TO_SPACE_H_ + +#include +#include + +#include "ops/primitive_c.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameBatchToSpace = "BatchToSpace"; +class BatchToSpace : public PrimitiveC { + public: + BatchToSpace() : PrimitiveC(kNameBatchToSpace) {} + ~BatchToSpace() = default; + MS_DECLARE_PARENT(BatchToSpace, PrimitiveC); + void Init(const std::vector &block_size, const std::vector> &crops); + void set_block_size(const std::vector &block_size); + void set_crops(const std::vector> &crops); + std::vector get_block_size() const; + std::vector> get_crops() const; +}; + +AbstractBasePtr BatchToSpaceInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); +using PrimBatchToSpacePtr = std::shared_ptr; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_BATCH_TO_SPACE_H_ diff --git a/mindspore/core/ops/batch_to_space_nd.cc b/mindspore/core/ops/batch_to_space_nd.cc new file mode 100644 index 00000000000..4153dc3a161 --- /dev/null +++ b/mindspore/core/ops/batch_to_space_nd.cc @@ -0,0 +1,109 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "ops/batch_to_space_nd.h" +#include +#include +#include +#include +#include +#include "ops/op_utils.h" +#include "utils/check_convert_utils.h" +#include "abstract/primitive_infer_map.h" + +namespace mindspore { +namespace ops { +namespace { +abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(primitive); + auto batch_prim = primitive->cast(); + MS_EXCEPTION_IF_NULL(batch_prim); + auto prim_name = batch_prim->name(); + auto x_shape = CheckAndConvertUtils::ConvertShapePtrToShape("x_shape", input_args[0]->BuildShape(), prim_name); + CheckAndConvertUtils::CheckInteger("input_x rank", x_shape.size(), kEqual, 4, prim_name); + auto out_shape = x_shape; + int64_t block_shape_prod = 1; + int64_t offset = 2; + auto block_shape = batch_prim->get_block_shape(); + auto crops = batch_prim->get_crops(); + int64_t size = block_shape.size(); + for (int64_t i = 0; i < size; i++) { + block_shape_prod = block_shape_prod * block_shape[i]; + auto x_block_prod = out_shape[i + offset] * block_shape[i]; + auto crops_sum = crops[i][0] + crops[i][1]; + CheckAndConvertUtils::Check("x block shape prod", x_block_prod, kGreaterThan, "crops sum", crops_sum, prim_name); + out_shape[i + offset] = x_block_prod - crops_sum; + } + if (out_shape[0] % block_shape_prod != 0) { + MS_EXCEPTION(ValueError) << prim_name << " input_x dimension 0 " << out_shape[0] + << " should be divisible by block_shape_prod " << block_shape_prod; + } + out_shape[0] = int64_t(floor(out_shape[0] / block_shape_prod)); + return std::make_shared(out_shape); +} + +TypePtr InferType(const PrimitivePtr &prim, const std::vector &input_args) { + for (const auto &item : input_args) { + MS_EXCEPTION_IF_NULL(item); + } + auto infer_type = input_args[0]->BuildType(); + return infer_type; +} +} // namespace + +void BatchToSpaceND::set_crops(std::vector> crops) { + CheckAndConvertUtils::CheckInteger(kCrops, crops.size(), kEqual, 2, this->name()); + int64_t h = crops.size(); + int64_t w = crops[0].size(); + std::vector temp_w = {2, 2}; + CheckAndConvertUtils::Check(kCrops, {h, w}, kEqual, "paddings_shape(2,2)", temp_w, this->name()); + for (int64_t i = 0; i < h; i++) { + for (int64_t j = 0; j < w; j++) { + CheckAndConvertUtils::CheckInteger(kCrops, crops[i][j], kGreaterEqual, 0, this->name()); + } + } + this->AddAttr(kCrops, MakeValue(crops)); +} + +std::vector> BatchToSpaceND::get_crops() const { + auto value_ptr = GetAttr(kCrops); + return GetValue>>(value_ptr); +} +void BatchToSpaceND::set_block_shape(std::vector block_shape) { + CheckAndConvertUtils::CheckInteger(kBlockShape, block_shape.size(), kEqual, 2, this->name()); + for (int64_t i = 0; i < (int64_t)block_shape.size(); i++) { + CheckAndConvertUtils::CheckInteger(kBlockShape, block_shape[i], kGreaterEqual, 1, this->name()); + } + this->AddAttr(kBlockShape, MakeValue(block_shape)); +} + +std::vector BatchToSpaceND::get_block_shape() const { + auto value_ptr = GetAttr(kBlockShape); + return GetValue>(value_ptr); +} + +void BatchToSpaceND::Init(std::vector block_shape, std::vector> crops) { + this->set_crops(crops); + this->set_block_shape(block_shape); +} +AbstractBasePtr BatchToSpaceNDInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + return std::make_shared(InferType(primitive, input_args), + InferShape(primitive, input_args)->shape()); +} +REGISTER_PRIMITIVE_EVAL_IMPL(BatchToSpaceND, prim::kPrimBatchToSpaceND, BatchToSpaceNDInfer); +REGISTER_PRIMITIVE_C(kNameBatchToSpaceND, BatchToSpaceND); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/batch_to_space_nd.h b/mindspore/core/ops/batch_to_space_nd.h new file mode 100644 index 00000000000..3a745b5f42e --- /dev/null +++ b/mindspore/core/ops/batch_to_space_nd.h @@ -0,0 +1,48 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_BATCH_TO_SPACE_ND_H_ +#define MINDSPORE_CORE_OPS_BATCH_TO_SPACE_ND_H_ + +#include +#include +#include +#include +#include "ops/primitive_c.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameBatchToSpaceND = "BatchToSpaceND"; +class BatchToSpaceND : public PrimitiveC { + public: + BatchToSpaceND() : PrimitiveC(kNameBatchToSpaceND) {} + ~BatchToSpaceND() = default; + MS_DECLARE_PARENT(BatchToSpaceND, PrimitiveC); + void Init(std::vector block_shape, std::vector> crops); + void set_crops(std::vector> crops); + void set_block_shape(std::vector block_shape); + std::vector get_block_shape() const; + std::vector> get_crops() const; +}; +AbstractBasePtr BatchToSpaceNDInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); +using PrimBatchToSpaceNDPtr = std::shared_ptr; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_BATCH_TO_SPACE_ND_H_ diff --git a/mindspore/core/c_ops/bias_add.cc b/mindspore/core/ops/bias_add.cc similarity index 88% rename from mindspore/core/c_ops/bias_add.cc rename to mindspore/core/ops/bias_add.cc index 358a146ed5d..6b3331eab68 100644 --- a/mindspore/core/c_ops/bias_add.cc +++ b/mindspore/core/ops/bias_add.cc @@ -14,12 +14,15 @@ * limitations under the License. */ -#include "c_ops/bias_add.h" +#include "ops/bias_add.h" #include -#include "c_ops/op_utils.h" +#include "ops/op_utils.h" #include "utils/check_convert_utils.h" +// Add +#include "abstract/primitive_infer_map.h" namespace mindspore { +namespace ops { void BiasAdd::set_format(const Format &format) { int64_t f = format; this->AddAttr(kFormat, MakeValue(f)); @@ -29,5 +32,7 @@ Format BiasAdd::get_format() const { return Format(GetValue(value_ptr)); } void BiasAdd::Init(const Format &format) { this->set_format(format); } +// Add REGISTER_PRIMITIVE_C(kNameBiasAdd, BiasAdd); +} // namespace ops } // namespace mindspore diff --git a/mindspore/core/c_ops/bias_add.h b/mindspore/core/ops/bias_add.h similarity index 84% rename from mindspore/core/c_ops/bias_add.h rename to mindspore/core/ops/bias_add.h index 6fd4733380c..6d79834478b 100644 --- a/mindspore/core/c_ops/bias_add.h +++ b/mindspore/core/ops/bias_add.h @@ -14,17 +14,20 @@ * limitations under the License. */ -#ifndef MINDSPORE_CORE_C_OPS_BIASADD_H_ -#define MINDSPORE_CORE_C_OPS_BIASADD_H_ +#ifndef MINDSPORE_CORE_OPS_BIAS_ADD_H_ +#define MINDSPORE_CORE_OPS_BIAS_ADD_H_ #include #include #include #include -#include "c_ops/primitive_c.h" +#include "ops/primitive_c.h" #include "abstract/abstract_value.h" #include "utils/check_convert_utils.h" +// Add +#include "ops/op_utils.h" namespace mindspore { +namespace ops { constexpr auto kNameBiasAdd = "BiasAdd"; class BiasAdd : public PrimitiveC { public: @@ -35,6 +38,7 @@ class BiasAdd : public PrimitiveC { void set_format(const Format &format); Format get_format() const; }; +} // namespace ops } // namespace mindspore -#endif // MINDSPORE_CORE_C_OPS_BIASADD_H_ +#endif // MINDSPORE_CORE_OPS_BIAS_ADD_H_ diff --git a/mindspore/core/ops/binary_cross_entropy.cc b/mindspore/core/ops/binary_cross_entropy.cc new file mode 100644 index 00000000000..67d7540eb5e --- /dev/null +++ b/mindspore/core/ops/binary_cross_entropy.cc @@ -0,0 +1,94 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include +#include +#include + +#include "ops/binary_cross_entropy.h" +#include "ops/op_utils.h" +#include "utils/check_convert_utils.h" +#include "abstract/primitive_infer_map.h" + +namespace mindspore { +namespace ops { +namespace { +abstract::ShapePtr BinaryCrossEntroyInferShape(const PrimitivePtr &primitive, + const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(primitive); + auto binary_cross_entropy_prim = primitive->cast(); + MS_EXCEPTION_IF_NULL(binary_cross_entropy_prim); + auto prim_name = binary_cross_entropy_prim->name(); + CheckAndConvertUtils::CheckInRange("binary_cross_entropy_infer", input_args.size(), kIncludeBoth, {2, 3}, prim_name); + auto x_shape = CheckAndConvertUtils::ConvertShapePtrToShape("x_shape", input_args[0]->BuildShape(), prim_name); + auto y_shape = CheckAndConvertUtils::ConvertShapePtrToShape("y_shape", input_args[1]->BuildShape(), prim_name); + auto weight_shape = + CheckAndConvertUtils::ConvertShapePtrToShape("weight_shape", input_args[2]->BuildShape(), prim_name); + CheckAndConvertUtils::Check("x shape", x_shape, kEqual, "y shape", y_shape, prim_name); + std::vector infer_shape; + if (weight_shape.size() < 1) { + CheckAndConvertUtils::Check("x shape", y_shape, kEqual, "weight shape", weight_shape, prim_name); + } + if (binary_cross_entropy_prim->get_reduction() != REDUCTION_SUM && + binary_cross_entropy_prim->get_reduction() != MEAN) { + infer_shape = {x_shape.begin(), infer_shape.end()}; + } + return std::make_shared(infer_shape); +} + +TypePtr BinaryCrossEntroyInferType(const PrimitivePtr &prim, const std::vector &input_args) { + CheckAndConvertUtils::CheckInteger("binary_cross_entropy_infer", input_args.size(), kEqual, 3, prim->name()); + for (const auto &item : input_args) { + MS_EXCEPTION_IF_NULL(item); + } + const std::set valid_types = {kNumberTypeFloat16, kNumberTypeFloat32}; + std::map types; + types.emplace("x_shape", input_args[0]->BuildType()); + types.emplace("y_shape", input_args[1]->BuildType()); + auto infer_type = CheckAndConvertUtils::CheckTensorTypeSame(types, valid_types, prim->name()); + if (input_args[3]->BuildType() != nullptr) { + types.emplace("x_shape", input_args[0]->BuildType()); + types.emplace("weight_shape", input_args[2]->BuildType()); + infer_type = CheckAndConvertUtils::CheckTensorTypeSame(types, valid_types, prim->name()); + } + return TypeIdToType(infer_type); +} +} // namespace + +void BinaryCrossEntropy::set_reduction(const Reduction &reduction) { + int64_t swi = reduction; + this->AddAttr(kReduction, MakeValue(swi)); +} + +Reduction BinaryCrossEntropy::get_reduction() const { + auto value_ptr = GetAttr(kReduction); + return Reduction(GetValue(value_ptr)); +} +void BinaryCrossEntropy::Init(const Reduction &reduction) { this->set_reduction(reduction); } + +AbstractBasePtr BinaryCrossEntropyInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + return std::make_shared(BinaryCrossEntroyInferType(primitive, input_args), + BinaryCrossEntroyInferShape(primitive, input_args)->shape()); +} + +REGISTER_PRIMITIVE_EVAL_IMPL(BinaryCrossEntropy, prim::kPrimBinaryCrossEntropy, BinaryCrossEntropyInfer); +REGISTER_PRIMITIVE_C(kNameBinaryCrossEntropy, BinaryCrossEntropy); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/c_ops/binary_cross_entropy.h b/mindspore/core/ops/binary_cross_entropy.h similarity index 59% rename from mindspore/core/c_ops/binary_cross_entropy.h rename to mindspore/core/ops/binary_cross_entropy.h index 900c949f334..ce7a771d688 100644 --- a/mindspore/core/c_ops/binary_cross_entropy.h +++ b/mindspore/core/ops/binary_cross_entropy.h @@ -14,25 +14,32 @@ * limitations under the License. */ -#ifndef MINDSPORE_CORE_C_OPS_BINARY_CROSS_ENTROPY_GRAD_H_ -#define MINDSPORE_CORE_C_OPS_BINARY_CROSS_ENTROPY_GRAD_H_ +#ifndef MINDSPORE_CORE_OPS_BINARY_CROSS_ENTROPY_H_ +#define MINDSPORE_CORE_OPS_BINARY_CROSS_ENTROPY_H_ #include +#include +#include -#include "c_ops/primitive_c.h" -#include "c_ops/op_utils.h" +#include "ops/primitive_c.h" +#include "ops/op_utils.h" #include "abstract/abstract_value.h" #include "utils/check_convert_utils.h" namespace mindspore { +namespace ops { constexpr auto kNameBinaryCrossEntropy = "BinaryCrossEntropy"; class BinaryCrossEntropy : public PrimitiveC { public: BinaryCrossEntropy() : PrimitiveC(kNameBinaryCrossEntropy) {} ~BinaryCrossEntropy() = default; MS_DECLARE_PARENT(BinaryCrossEntropy, PrimitiveC); - void Init(const std::string &reduction = "mean"); - void set_reduction(const std::string &reduction); - std::string get_reduction() const; + void Init(const Reduction &reduction = MEAN); + void set_reduction(const Reduction &reduction); + Reduction get_reduction() const; }; +AbstractBasePtr BinaryCrossEntropyGradInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); +using PrimBinaryCrossEntropyPtr = std::shared_ptr; +} // namespace ops } // namespace mindspore -#endif // MINDSPORE_CORE_C_OPS_BINARY_CROSS_ENTROPY_GRAD_H_ +#endif // MINDSPORE_CORE_OPS_BINARY_CROSS_ENTROPY_H_ diff --git a/mindspore/core/c_ops/black_box.cc b/mindspore/core/ops/black_box.cc similarity index 83% rename from mindspore/core/c_ops/black_box.cc rename to mindspore/core/ops/black_box.cc index 3d4ce946d3c..dd251939ee6 100644 --- a/mindspore/core/c_ops/black_box.cc +++ b/mindspore/core/ops/black_box.cc @@ -14,13 +14,14 @@ * limitations under the License. */ -#include "c_ops/black_box.h" -#include "c_ops/op_utils.h" +#include "ops/black_box.h" +#include "ops/op_utils.h" #include "utils/check_convert_utils.h" #include "abstract/primitive_infer_map.h" namespace mindspore { -void BlackBox::Init(const std::string &id, int64_t size, const std::vector &address) { +namespace ops { +void BlackBox::Init(const std::string &id, const int64_t size, const std::vector &address) { this->set_id(id); this->set_size(size); this->set_address(address); @@ -33,7 +34,7 @@ std::string BlackBox::get_id() const { return GetValue(value_ptr); } -void BlackBox::set_size(int64_t size) { this->AddAttr(kSize, MakeValue(size)); } +void BlackBox::set_size(const int64_t size) { this->AddAttr(kSize, MakeValue(size)); } int64_t BlackBox::get_size() const { auto value_ptr = this->GetAttr(kSize); @@ -47,4 +48,5 @@ std::vector BlackBox::get_address() const { return GetValue>(value_ptr); } REGISTER_PRIMITIVE_C(kNameBlackBox, BlackBox); +} // namespace ops } // namespace mindspore diff --git a/mindspore/core/c_ops/black_box.h b/mindspore/core/ops/black_box.h similarity index 79% rename from mindspore/core/c_ops/black_box.h rename to mindspore/core/ops/black_box.h index d2d41e583f6..61da9ae01b4 100644 --- a/mindspore/core/c_ops/black_box.h +++ b/mindspore/core/ops/black_box.h @@ -14,25 +14,26 @@ * limitations under the License. */ -#ifndef MINDSPORE_CORE_C_OPS_BLACKBOX_H_ -#define MINDSPORE_CORE_C_OPS_BLACKBOX_H_ +#ifndef MINDSPORE_CORE_OPS_BLACK_BOX_H_ +#define MINDSPORE_CORE_OPS_BLACK_BOX_H_ #include #include #include -#include "c_ops/primitive_c.h" +#include "ops/primitive_c.h" #include "abstract/abstract_value.h" #include "utils/check_convert_utils.h" namespace mindspore { +namespace ops { constexpr auto kNameBlackBox = "BlackBox"; class BlackBox : public PrimitiveC { public: BlackBox() : PrimitiveC(kNameBlackBox) {} ~BlackBox() = default; MS_DECLARE_PARENT(BlackBox, PrimitiveC); - void Init(const std::string &id, int64_t size, const std::vector &address); + void Init(const std::string &id, const int64_t size, const std::vector &address); void set_id(const std::string &id); - void set_size(int64_t size); + void set_size(const int64_t size); void set_address(const std::vector &address); std::string get_id() const; int64_t get_size() const; @@ -40,6 +41,7 @@ class BlackBox : public PrimitiveC { }; using PrimBlackBoxPtr = std::shared_ptr; +} // namespace ops } // namespace mindspore -#endif // MINDSPORE_CORE_C_OPS_BLACKBOX_H_ +#endif // MINDSPORE_CORE_OPS_BLACK_BOX_H_ diff --git a/mindspore/core/ops/broadcast.cc b/mindspore/core/ops/broadcast.cc new file mode 100644 index 00000000000..d426bf1b975 --- /dev/null +++ b/mindspore/core/ops/broadcast.cc @@ -0,0 +1,70 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include "ops/broadcast.h" +#include "ops/op_utils.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +void Broadcast::Init(const int64_t root_rank, const std::string &group) { + this->set_root_rank(root_rank); + this->set_group(group); +} +void Broadcast::set_root_rank(const int64_t root_rank) { this->AddAttr(kKeepProb, MakeValue(root_rank)); } + +void Broadcast::set_group(const std::string &group) { + CheckAndConvertUtils::CheckString(kGroup, group, {"hccl_world_group", "hccl_world_group"}, this->name()); + this->AddAttr(kGroup, MakeValue(group)); +} +int64_t Broadcast::get_root_rank() const { + auto value_ptr = this->GetAttr(kRootRank); + return GetValue(value_ptr); +} + +std::string Broadcast::get_group() const { + auto value_ptr = this->GetAttr(kGroup); + return GetValue(value_ptr); +} +AbstractBasePtr BroadcastInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(primitive); + auto broadcast_prim = primitive->cast(); + MS_EXCEPTION_IF_NULL(broadcast_prim); + auto prim_name = broadcast_prim->name(); + for (const auto &item : input_args) { + MS_EXCEPTION_IF_NULL(item); + } + // infer shape + auto in_shape = CheckAndConvertUtils::ConvertShapePtrToShape("x_shape", input_args[0]->BuildShape(), prim_name); + // infer type + auto x_type = input_args[0]->BuildType()->cast()->element(); + std::vector output_types; + const std::set valid_types = {kNumberTypeInt8, kNumberTypeInt32, kNumberTypeFloat16, kNumberTypeFloat32}; + for (size_t i = 0; i < input_args.size(); i++) { + auto out_type = input_args[i]->BuildType()->cast()->element(); + output_types.push_back(out_type); + CheckAndConvertUtils::CheckTensorTypeValid("index_type", out_type, valid_types, prim_name); + } + return std::make_shared(x_type, in_shape); +} +REGISTER_PRIMITIVE_EVAL_IMPL(Broadcast, prim::kPrimBroadcast, BroadcastInfer); +REGISTER_PRIMITIVE_C(kNameBroadcast, Broadcast); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/c_ops/broadcast.h b/mindspore/core/ops/broadcast.h similarity index 62% rename from mindspore/core/c_ops/broadcast.h rename to mindspore/core/ops/broadcast.h index e9a864f72de..e124b1e95e6 100644 --- a/mindspore/core/c_ops/broadcast.h +++ b/mindspore/core/ops/broadcast.h @@ -14,27 +14,34 @@ * limitations under the License. */ -#ifndef MINDSPORE_CORE_C_OPS_BROADCAST_H_ -#define MINDSPORE_CORE_C_OPS_BROADCAST_H_ +#ifndef MINDSPORE_CORE_OPS_BROADCAST_H_ +#define MINDSPORE_CORE_OPS_BROADCAST_H_ #include +#include +#include -#include "c_ops/primitive_c.h" +#include "ops/primitive_c.h" #include "abstract/abstract_value.h" #include "utils/check_convert_utils.h" namespace mindspore { +namespace ops { constexpr auto kNameBroadcast = "Broadcast"; class Broadcast : public PrimitiveC { public: Broadcast() : PrimitiveC(kNameBroadcast) {} ~Broadcast() = default; MS_DECLARE_PARENT(Broadcast, PrimitiveC); - void Init(int64_t root_rank, const std::string &group = "hccl_world_group"); - void set_root_rank(int64_t root_rank); + void Init(const int64_t root_rank, const std::string &group = "hccl_world_group"); + void set_root_rank(const int64_t root_rank); void set_group(const std::string &group); - int64_t get_root_rank(); + int64_t get_root_rank() const; std::string get_group() const; }; +AbstractBasePtr BroadcastInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); +using PrimBroadcast = std::shared_ptr; +} // namespace ops } // namespace mindspore -#endif // MINDSPORE_CORE_C_OPS_BROADCAST_H_ +#endif // MINDSPORE_CORE_OPS_BROADCAST_H_ diff --git a/mindspore/core/ops/broadcast_to.cc b/mindspore/core/ops/broadcast_to.cc new file mode 100644 index 00000000000..95e44e96030 --- /dev/null +++ b/mindspore/core/ops/broadcast_to.cc @@ -0,0 +1,88 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include "ops/broadcast_to.h" + +namespace mindspore { +namespace ops { +namespace { +abstract::ShapePtr BroadcastToInferShape(const PrimitivePtr &primitive, + const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(primitive); + auto broad_cast_to = primitive->cast(); + MS_EXCEPTION_IF_NULL(broad_cast_to); + auto prim_name = broad_cast_to->name(); + auto x_shape = CheckAndConvertUtils::ConvertShapePtrToShape("x_shape", input_args[0]->BuildShape(), prim_name); + auto input_x = broad_cast_to->get_shape(); + int64_t outer_dim_offset = input_x.size() - x_shape.size(); + CheckAndConvertUtils::Check("x shape", x_shape, kLessEqual, "input_x", input_x, prim_name); + bool flag = true; + if (input_x.end() == find(input_x.begin(), input_x.end(), -1)) { + flag = false; + } else { + flag = true; + } + if (flag == true) { + for (int64_t i = 0; i < (int64_t)input_x.size(); i++) { + if (input_x[i] == -1) { + if (i < outer_dim_offset) { + MS_EXCEPTION(ValueError) << " -1 in init shape is in an incompatible " + "location with given input tensor, -1 index in init shape: " + << i << " but -1 can only be in index" << x_shape.size() + << "onwards for this input."; + } + input_x[i] = x_shape[i - outer_dim_offset]; + } + } + } + std::reverse(input_x.begin(), input_x.end()); + return std::make_shared(input_x); +} + +TypePtr BroadcastToInferType(const PrimitivePtr &prim, const std::vector &input_args) { + for (const auto &item : input_args) { + MS_EXCEPTION_IF_NULL(item); + } + auto x_dtype = input_args[0]->BuildType()->cast()->element(); + std::set template_types = {TypeIdToType(kObjectTypeTensorType)}; + CheckAndConvertUtils::CheckSubClass("x_dtype", x_dtype, template_types, prim->name()); + auto infer_dtype = input_args[0]->BuildType()->type_id(); + return TypeIdToType(infer_dtype); +} +} // namespace + +void BroadcastTo::Init(const std::vector &shape) { set_shape(shape); } + +void BroadcastTo::set_shape(const std::vector &shape) { + CheckAndConvertUtils::CheckInteger(kShapeSize, shape.size(), kGreaterThan, 0, name()); + AddAttr(kShape, MakeValue(shape)); +} + +std::vector BroadcastTo::get_shape() const { + auto value_ptr = GetAttr(kShape); + return GetValue>(value_ptr); +} +AbstractBasePtr BroadcastToInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + return std::make_shared(BroadcastToInferType(primitive, input_args), + BroadcastToInferShape(primitive, input_args)->shape()); +} + +REGISTER_PRIMITIVE_EVAL_IMPL(BroadcastTo, prim::kPrimBroadcastTo, BroadcastToInfer); +REGISTER_PRIMITIVE_C(kNameBroadcastTo, BroadcastTo); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/c_ops/broadcast_to.h b/mindspore/core/ops/broadcast_to.h similarity index 86% rename from mindspore/core/c_ops/broadcast_to.h rename to mindspore/core/ops/broadcast_to.h index a411b97467d..e6baf7ce51c 100644 --- a/mindspore/core/c_ops/broadcast_to.h +++ b/mindspore/core/ops/broadcast_to.h @@ -14,18 +14,19 @@ * limitations under the License. */ -#ifndef MINDSPORE_CORE_C_OPS_BROADCAST_H_ -#define MINDSPORE_CORE_C_OPS_BROADCAST_H_ +#ifndef MINDSPORE_CORE_OPS_BROADCAST_TO_H_ +#define MINDSPORE_CORE_OPS_BROADCAST_TO_H_ #include #include #include #include -#include "c_ops/op_utils.h" -#include "c_ops/primitive_c.h" +#include "ops/op_utils.h" +#include "ops/primitive_c.h" #include "abstract/abstract_value.h" #include "utils/check_convert_utils.h" namespace mindspore { +namespace ops { constexpr auto kNameBroadcastTo = "BroadcastTo"; class BroadcastTo : public PrimitiveC { public: @@ -41,6 +42,7 @@ AbstractBasePtr BroadcastToInfer(const abstract::AnalysisEnginePtr &, const Prim const std::vector &input_args); using PrimBroadcastToPtr = std::shared_ptr; +} // namespace ops } // namespace mindspore -#endif // MINDSPORE_CORE_C_OPS_BROADCAST_H_ +#endif // MINDSPORE_CORE_OPS_BROADCAST_TO_H_ diff --git a/mindspore/core/c_ops/cast.cc b/mindspore/core/ops/cast.cc similarity index 88% rename from mindspore/core/c_ops/cast.cc rename to mindspore/core/ops/cast.cc index 43180a69690..2babc975bef 100644 --- a/mindspore/core/c_ops/cast.cc +++ b/mindspore/core/ops/cast.cc @@ -14,8 +14,10 @@ * limitations under the License. */ -#include "c_ops/cast.h" +#include "ops/cast.h" namespace mindspore { +namespace ops { REGISTER_PRIMITIVE_C(kNameCast, Cast); -} +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/c_ops/cast.h b/mindspore/core/ops/cast.h similarity index 85% rename from mindspore/core/c_ops/cast.h rename to mindspore/core/ops/cast.h index 795c4a9fada..d543c5dcf19 100644 --- a/mindspore/core/c_ops/cast.h +++ b/mindspore/core/ops/cast.h @@ -14,17 +14,18 @@ * limitations under the License. */ -#ifndef MINDSPORE_CORE_C_OPS_CAST_H_ -#define MINDSPORE_CORE_C_OPS_CAST_H_ +#ifndef MINDSPORE_CORE_OPS_CAST_H_ +#define MINDSPORE_CORE_OPS_CAST_H_ #include #include -#include "c_ops/primitive_c.h" -#include "c_ops/op_utils.h" +#include "ops/primitive_c.h" +#include "ops/op_utils.h" #include "abstract/abstract_value.h" #include "utils/check_convert_utils.h" namespace mindspore { +namespace ops { constexpr auto kNameCast = "Cast"; class Cast : public PrimitiveC { public: @@ -35,5 +36,6 @@ class Cast : public PrimitiveC { AbstractBasePtr CastInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, const std::vector &input_args); using PrimCast = std::shared_ptr; +} // namespace ops } // namespace mindspore -#endif // MINDSPORE_CORE_C_OPS_CAST_H_ +#endif // MINDSPORE_CORE_OPS_CAST_H_ diff --git a/mindspore/core/ops/ceil.cc b/mindspore/core/ops/ceil.cc new file mode 100644 index 00000000000..e3d54d6404d --- /dev/null +++ b/mindspore/core/ops/ceil.cc @@ -0,0 +1,49 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include + +#include "ops/ceil.h" +#include "ops/op_utils.h" +#include "utils/check_convert_utils.h" +#include "abstract/primitive_infer_map.h" + +namespace mindspore { +namespace ops { + +AbstractBasePtr CeilInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + for (const auto &item : input_args) { + MS_EXCEPTION_IF_NULL(item); + } + auto x_shape = CheckAndConvertUtils::ConvertShapePtrToShape("x_shape", input_args[0]->BuildShape(), "Ceil"); + const std::set valid_types = {kNumberTypeFloat16, kNumberTypeFloat32}; + auto infer_type = input_args[0]->BuildType(); + CheckAndConvertUtils::CheckTensorTypeValid("x type", infer_type, valid_types, primitive->name()); + MS_EXCEPTION_IF_NULL(infer_type); + auto tensor_type = infer_type->cast(); + MS_EXCEPTION_IF_NULL(tensor_type); + auto data_type = tensor_type->element(); + MS_EXCEPTION_IF_NULL(data_type); + return std::make_shared(data_type, x_shape); +} +REGISTER_PRIMITIVE_EVAL_IMPL(Ceil, prim::kPrimCeil, CeilInfer); +REGISTER_PRIMITIVE_C(kNameCeil, Ceil); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/c_ops/ceil.h b/mindspore/core/ops/ceil.h similarity index 81% rename from mindspore/core/c_ops/ceil.h rename to mindspore/core/ops/ceil.h index c532aeed4f9..2b6df01ab0d 100644 --- a/mindspore/core/c_ops/ceil.h +++ b/mindspore/core/ops/ceil.h @@ -14,26 +14,29 @@ * limitations under the License. */ -#ifndef MINDSPORE_CORE_C_OPS_CEIL_H_ -#define MINDSPORE_CORE_C_OPS_CEIL_H_ +#ifndef MINDSPORE_CORE_OPS_CEIL_H_ +#define MINDSPORE_CORE_OPS_CEIL_H_ #include #include -#include "c_ops/primitive_c.h" -#include "c_ops/op_utils.h" +#include "ops/primitive_c.h" +#include "ops/op_utils.h" #include "abstract/abstract_value.h" namespace mindspore { +namespace ops { constexpr auto kNameCeil = "Ceil"; class Ceil : public PrimitiveC { public: Ceil() : PrimitiveC(kNameCeil) { InitIOName({"x"}, {"y"}); } ~Ceil() = default; MS_DECLARE_PARENT(Ceil, PrimitiveC); + void init() {} }; AbstractBasePtr CeilInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, const std::vector &input_args); -using PrimCeil = std::shared_ptr; +using PrimCeilPtr = std::shared_ptr; +} // namespace ops } // namespace mindspore -#endif // MINDSPORE_CORE_C_OPS_CEIL_H_ +#endif // MINDSPORE_CORE_OPS_CEIL_H_ diff --git a/mindspore/core/c_ops/clip.cc b/mindspore/core/ops/clip.cc similarity index 93% rename from mindspore/core/c_ops/clip.cc rename to mindspore/core/ops/clip.cc index f982f6299a5..f53b44e1e33 100644 --- a/mindspore/core/c_ops/clip.cc +++ b/mindspore/core/ops/clip.cc @@ -14,12 +14,13 @@ * limitations under the License. */ -#include "c_ops/clip.h" +#include "ops/clip.h" #include "utils/check_convert_utils.h" #include "abstract/primitive_infer_map.h" -#include "c_ops/op_utils.h" +#include "ops/op_utils.h" namespace mindspore { +namespace ops { void Clip::Init(const float max, const float min) { this->set_max(max); this->set_min(min); @@ -39,4 +40,5 @@ float Clip::get_min() const { return GetValue(value_ptr); } REGISTER_PRIMITIVE_C(kNameClip, Clip); +} // namespace ops } // namespace mindspore diff --git a/mindspore/core/c_ops/clip.h b/mindspore/core/ops/clip.h similarity index 86% rename from mindspore/core/c_ops/clip.h rename to mindspore/core/ops/clip.h index e90a9d17d47..a62768643f5 100644 --- a/mindspore/core/c_ops/clip.h +++ b/mindspore/core/ops/clip.h @@ -13,15 +13,16 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -#ifndef MINDSPORE_CORE_C_OPS_CLIP_H_ -#define MINDSPORE_CORE_C_OPS_CLIP_H_ +#ifndef MINDSPORE_CORE_OPS_CLIP_H_ +#define MINDSPORE_CORE_OPS_CLIP_H_ #include -#include "c_ops/primitive_c.h" +#include "ops/primitive_c.h" #include "abstract/abstract_value.h" #include "utils/check_convert_utils.h" namespace mindspore { +namespace ops { constexpr auto kNameClip = "Clip"; class Clip : public PrimitiveC { public: @@ -36,6 +37,7 @@ class Clip : public PrimitiveC { }; using PrimClipPtr = std::shared_ptr; +} // namespace ops } // namespace mindspore -#endif // MINDSPORE_CORE_C_OPS_CLIP_H_ +#endif // MINDSPORE_CORE_OPS_CLIP_H_ diff --git a/mindspore/core/ops/concat.cc b/mindspore/core/ops/concat.cc new file mode 100644 index 00000000000..09d69b075c9 --- /dev/null +++ b/mindspore/core/ops/concat.cc @@ -0,0 +1,85 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include "ops/concat.h" +#include "ops/op_utils.h" +#include "utils/check_convert_utils.h" +namespace mindspore { +namespace ops { + +void Concat::Init(const int64_t axis) { this->set_axis(axis); } +int64_t Concat::get_axis() const { + auto value_ptr = this->GetAttr(kAxis); + return GetValue(value_ptr); +} + +void Concat::set_axis(const int64_t axis) { this->AddAttr(kAxis, MakeValue(axis)); } + +AbstractBasePtr ConcatInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(primitive); + auto prim = primitive->cast(); + MS_EXCEPTION_IF_NULL(prim); + auto prim_name = prim->name(); + CheckAndConvertUtils::CheckInteger("input number", input_args.size(), kEqual, 1, prim_name); + for (const auto &item : input_args) { + MS_EXCEPTION_IF_NULL(item); + } + auto input_tuple = input_args[0]->cast(); + MS_EXCEPTION_IF_NULL(input_tuple); + auto elements = input_tuple->elements(); + CheckAndConvertUtils::CheckInteger("concat element num", elements.size(), kGreaterEqual, 1, prim_name); + auto element0 = elements[0]->cast(); + MS_EXCEPTION_IF_NULL(element0); + auto element0_shape = + CheckAndConvertUtils::ConvertShapePtrToShape("element0 shape", element0->BuildShape(), prim_name); + auto element0_rank = SizeToLong(element0_shape.size()); + auto axis = prim->get_axis(); + CheckAndConvertUtils::CheckInRange("Concat axis", axis, kIncludeBoth, {-element0_rank - 1, element0_rank}, + prim_name); + axis = axis < 0 ? axis + element0_rank : axis; + + std::map types; + types.emplace("element0", element0->BuildType()); + int64_t all_shp = element0_shape[axis]; + for (size_t i = 1; i < elements.size(); ++i) { + std::string elementi = "element" + std::to_string(i); + auto elementi_shape = + CheckAndConvertUtils::ConvertShapePtrToShape(elementi + " shape", elements[i]->BuildShape(), prim_name); + CheckAndConvertUtils::CheckInteger(elementi + " shape rank", elementi_shape.size(), kEqual, element0_shape.size(), + prim_name); + for (int64_t j = 0; j < element0_rank; ++j) { + if (j != axis && elementi_shape[j] != element0_shape[j]) { + MS_LOG(EXCEPTION) << "element " << i << " shape in input can not concat with first element."; + } + } + all_shp = all_shp == -1 || elementi_shape[axis] == -1 ? -1 : all_shp + elementi_shape[axis]; + types.emplace(elementi, elements[i]->BuildType()); + } + auto infer_type = CheckAndConvertUtils::CheckTensorTypeSame(types, all_types, prim_name); + auto ret_shape = element0_shape; + ret_shape[axis] = all_shp; + + return std::make_shared(TypeIdToType(infer_type), + std::make_shared(ret_shape)); +} + +REGISTER_PRIMITIVE_EVAL_IMPL(Concat, prim::kPrimConcat, ConcatInfer); +REGISTER_PRIMITIVE_C(kNameConcat, Concat); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/c_ops/concat.h b/mindspore/core/ops/concat.h similarity index 81% rename from mindspore/core/c_ops/concat.h rename to mindspore/core/ops/concat.h index fafffd24459..1a13537905f 100644 --- a/mindspore/core/c_ops/concat.h +++ b/mindspore/core/ops/concat.h @@ -14,29 +14,31 @@ * limitations under the License. */ -#ifndef MINDSPORE_CORE_C_OPS_CONCAT_H_ -#define MINDSPORE_CORE_C_OPS_CONCAT_H_ +#ifndef MINDSPORE_CORE_OPS_CONCAT_H_ +#define MINDSPORE_CORE_OPS_CONCAT_H_ #include #include -#include "c_ops/primitive_c.h" -#include "c_ops/op_utils.h" +#include "ops/primitive_c.h" +#include "ops/op_utils.h" #include "abstract/abstract_value.h" #include "utils/check_convert_utils.h" namespace mindspore { +namespace ops { constexpr auto kNameConcat = "Concat"; class Concat : public PrimitiveC { public: Concat() : PrimitiveC(kNameConcat) {} ~Concat() = default; MS_DECLARE_PARENT(Concat, PrimitiveC); - void Init(int64_t axis = 0); - void set_axis(int64_t axis); + void Init(const int64_t axis = 0); + void set_axis(const int64_t axis); int64_t get_axis() const; }; AbstractBasePtr ConcatInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, const std::vector &input_args); using PrimConcatPtr = std::shared_ptr; +} // namespace ops } // namespace mindspore -#endif // MINDSPORE_CORE_C_OPS_CONCAT_H_ +#endif // MINDSPORE_CORE_OPS_CONCAT_H_ diff --git a/mindspore/core/ops/constant.cc b/mindspore/core/ops/constant.cc new file mode 100644 index 00000000000..7c87d4e7d59 --- /dev/null +++ b/mindspore/core/ops/constant.cc @@ -0,0 +1,58 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include + +#include "ops/constant.h" +#include "utils/check_convert_utils.h" +#include "abstract/primitive_infer_map.h" +#include "ops/op_utils.h" + +namespace mindspore { +namespace ops { +namespace { +abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(primitive); + auto x = input_args[0]->BuildShape(); + auto shape_element = x->cast(); + MS_EXCEPTION_IF_NULL(shape_element); + return shape_element; +} + +TypePtr InferType(const PrimitivePtr &prim, const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(prim); + CheckAndConvertUtils::CheckInteger("input number", input_args.size(), kEqual, 1, prim->name()); + for (const auto &item : input_args) { + MS_EXCEPTION_IF_NULL(item); + } + std::map types; + types.emplace("x", input_args[0]->BuildType()); + auto infer_type = CheckAndConvertUtils::CheckTensorTypeSame(types, common_valid_types, prim->name()); + return TypeIdToType(infer_type); +} +} // namespace + +AbstractBasePtr ConstantInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + return std::make_shared(InferType(primitive, input_args), + InferShape(primitive, input_args)->shape()); +} +REGISTER_PRIMITIVE_EVAL_IMPL(Constant, prim::kPrimConstant, ConstantInfer); +REGISTER_PRIMITIVE_C(kNameConstant, Constant); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/constant.h b/mindspore/core/ops/constant.h new file mode 100644 index 00000000000..550b05a93dc --- /dev/null +++ b/mindspore/core/ops/constant.h @@ -0,0 +1,42 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_CONSTANT_H_ +#define MINDSPORE_CORE_OPS_CONSTANT_H_ +#include +#include +#include "ops/primitive_c.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameConstant = "Constant"; +class Constant : public PrimitiveC { + public: + Constant() : PrimitiveC(kNameConstant) {} + ~Constant() = default; + MS_DECLARE_PARENT(Constant, PrimitiveC); + void Init() {} +}; + +AbstractBasePtr ConstantInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); +using PrimConstantPtr = std::shared_ptr; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_CONSTANT_H_ diff --git a/mindspore/core/c_ops/constant_of_shape.cc b/mindspore/core/ops/constant_of_shape.cc similarity index 51% rename from mindspore/core/c_ops/constant_of_shape.cc rename to mindspore/core/ops/constant_of_shape.cc index 5c2e079daf7..a4ffc2130ac 100644 --- a/mindspore/core/c_ops/constant_of_shape.cc +++ b/mindspore/core/ops/constant_of_shape.cc @@ -14,12 +14,30 @@ * limitations under the License. */ -#include "c_ops/constant_of_shape.h" -#include "c_ops/op_utils.h" +#include "ops/constant_of_shape.h" +#include "ops/op_utils.h" #include "utils/check_convert_utils.h" #include "abstract/primitive_infer_map.h" namespace mindspore { +namespace ops { +namespace { +abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector &input_args) { + CheckAndConvertUtils::CheckInteger("input args size", input_args.size(), kEqual, 1, "ConstantOfShape"); + auto input_shape = + CheckAndConvertUtils::ConvertShapePtrToShape("x_shape", input_args[0]->BuildShape(), "ConstantOfShape"); + return std::make_shared(input_shape); +} + +TypePtr InferType(const PrimitivePtr &primitive, const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(primitive); + auto constant_prim = primitive->cast(); + MS_EXCEPTION_IF_NULL(constant_prim); + auto data_type = TypeId(constant_prim->get_data_type()); + return TypeIdToType(data_type); +} +} // namespace + void ConstantOfShape::Init(int64_t data_type, const std::vector &value) { this->set_data_type(data_type); this->set_value(value); @@ -38,5 +56,12 @@ std::vector ConstantOfShape::get_value() const { auto value_ptr = this->GetAttr(kValue); return GetValue>(value_ptr); } +AbstractBasePtr ConstantOfShapeInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + return std::make_shared(InferType(primitive, input_args), + InferShape(primitive, input_args)->shape()); +} +REGISTER_PRIMITIVE_EVAL_IMPL(ConstantOfShape, prim::kPrimConstantOfShape, ConstantOfShapeInfer); REGISTER_PRIMITIVE_C(kNameConstantOfShape, ConstantOfShape); +} // namespace ops } // namespace mindspore diff --git a/mindspore/core/c_ops/constant_of_shape.h b/mindspore/core/ops/constant_of_shape.h similarity index 76% rename from mindspore/core/c_ops/constant_of_shape.h rename to mindspore/core/ops/constant_of_shape.h index 082573c22ab..71e7e02fe8b 100644 --- a/mindspore/core/c_ops/constant_of_shape.h +++ b/mindspore/core/ops/constant_of_shape.h @@ -14,15 +14,16 @@ * limitations under the License. */ -#ifndef MINDSPORE_CORE_C_OPS_CONSTANTOFSHAPE_H_ -#define MINDSPORE_CORE_C_OPS_CONSTANTOFSHAPE_H_ +#ifndef MINDSPORE_CORE_OPS_CONSTANT_OF_SHAPE_H_ +#define MINDSPORE_CORE_OPS_CONSTANT_OF_SHAPE_H_ #include #include -#include "c_ops/primitive_c.h" +#include "ops/primitive_c.h" #include "abstract/abstract_value.h" #include "utils/check_convert_utils.h" namespace mindspore { +namespace ops { constexpr auto kNameConstantOfShape = "ConstantOfShape"; class ConstantOfShape : public PrimitiveC { public: @@ -36,7 +37,10 @@ class ConstantOfShape : public PrimitiveC { std::vector get_value() const; }; +AbstractBasePtr ConstantOfShapeInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); using PrimConstantOfShapePtr = std::shared_ptr; +} // namespace ops } // namespace mindspore -#endif // MINDSPORE_CORE_C_OPS_CONSTANTOFSHAPE_H_ +#endif // MINDSPORE_CORE_OPS_CONSTANT_OF_SHAPE_H_ diff --git a/mindspore/core/c_ops/control_depend.cc b/mindspore/core/ops/control_depend.cc similarity index 69% rename from mindspore/core/c_ops/control_depend.cc rename to mindspore/core/ops/control_depend.cc index 28872727e7b..80d48930671 100644 --- a/mindspore/core/c_ops/control_depend.cc +++ b/mindspore/core/ops/control_depend.cc @@ -14,19 +14,21 @@ * limitations under the License. */ -#include "c_ops/control_depend.h" +#include "ops/control_depend.h" namespace mindspore { -void ControlDepend::Init(int64_t depend_mode) { this->set_depend_mode(depend_mode); } +namespace ops { +void ControlDepend::Init(const int64_t depend_mode) { this->set_depend_mode(depend_mode); } -void ControlDepend::set_depend_mode(int64_t depend_mode) { - CheckAndConvertUtils::CheckInRange(kDependMode, depend_mode, kIncludeBoth, {0, 1}, name()); +void ControlDepend::set_depend_mode(const int64_t depend_mode) { + CheckAndConvertUtils::CheckInRange(kDependMode, depend_mode, kIncludeBoth, {0, 1}, name()); AddAttr(kDependMode, MakeValue(depend_mode)); } -int64_t ControlDepend::get_depend_mode() { +int64_t ControlDepend::get_depend_mode() const { auto value_ptr = GetAttr(kDependMode); return GetValue(value_ptr); } REGISTER_PRIMITIVE_C(kNameControlDepend, ControlDepend); +} // namespace ops } // namespace mindspore diff --git a/mindspore/core/c_ops/control_depend.h b/mindspore/core/ops/control_depend.h similarity index 68% rename from mindspore/core/c_ops/control_depend.h rename to mindspore/core/ops/control_depend.h index db2c5cc0543..91feede28b8 100644 --- a/mindspore/core/c_ops/control_depend.h +++ b/mindspore/core/ops/control_depend.h @@ -14,29 +14,29 @@ * limitations under the License. */ -#ifndef MINDSPORE_CORE_C_OPS_CONTROL_DEPEND_H_ -#define MINDSPORE_CORE_C_OPS_CONTROL_DEPEND_H_ +#ifndef MINDSPORE_CORE_OPS_CONTROL_DEPEND_H_ +#define MINDSPORE_CORE_OPS_CONTROL_DEPEND_H_ #include #include -#include "c_ops/primitive_c.h" -#include "c_ops/op_utils.h" +#include "ops/primitive_c.h" +#include "ops/op_utils.h" #include "abstract/abstract_value.h" #include "utils/check_convert_utils.h" namespace mindspore { +namespace ops { constexpr auto kNameControlDepend = "ControlDepend"; class ControlDepend : public PrimitiveC { public: ControlDepend() : PrimitiveC(kNameControlDepend) {} ~ControlDepend() = default; MS_DECLARE_PARENT(ControlDepend, PrimitiveC); - void Init(int64_t depend_mode); - void set_depend_mode(int64_t depend_mode); - int64_t get_depend_mode(); + void Init(const int64_t depend_mode); + void set_depend_mode(const int64_t depend_mode = 0); + int64_t get_depend_mode() const; }; -AbstractBasePtr ControlDependInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, - const std::vector &input_args); using PrimControlDepend = std::shared_ptr; +} // namespace ops } // namespace mindspore -#endif // MINDSPORE_CORE_C_OPS_CONTROl_DEPEND_H_ +#endif // MINDSPORE_CORE_OPS_CONTROl_DEPEND_H_ diff --git a/mindspore/core/c_ops/conv2d.cc b/mindspore/core/ops/conv2d.cc similarity index 66% rename from mindspore/core/c_ops/conv2d.cc rename to mindspore/core/ops/conv2d.cc index d9d33448d5c..98bf0ed2c4d 100644 --- a/mindspore/core/c_ops/conv2d.cc +++ b/mindspore/core/ops/conv2d.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2020 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ * limitations under the License. */ -#include "c_ops/conv2d.h" +#include "ops/conv2d.h" #include #include #include @@ -23,102 +23,29 @@ #include "ir/dtype/tensor_type.h" #include "utils/check_convert_utils.h" #include "abstract/primitive_infer_map.h" +#include "ops/control_depend.h" namespace mindspore { -Conv2D::Conv2D() : PrimitiveC(kNameConv2D) { InitIOName({"x", "w"}, {"output"}); } - -void Conv2D::Init(int64_t out_channel, const std::vector &kernel_size, int64_t mode, - const std::string &pad_mode, const std::vector &pad, const std::vector &stride, - const std::vector &dilation, int64_t group) { - auto prim_name = this->name(); - this->AddAttr("data_format", MakeValue("NCHW")); - this->AddAttr("offset_a", MakeValue(static_cast(0))); - this->set_kernel_size(CheckAndConvertUtils::CheckPositiveVector(kKernelSize, kernel_size, prim_name)); - this->set_stride(CheckAndConvertUtils::CheckPositiveVector(kStride, stride, this->name(), true, true)); - this->set_dilation(CheckAndConvertUtils::CheckPositiveVector(kDilation, dilation, this->name(), true, true)); - this->set_pad_mode(CheckAndConvertUtils::CheckString(kPadMode, pad_mode, {"valid", "same", "pad"}, prim_name)); - CheckAndConvertUtils::CheckInteger("pad_size", pad.size(), kEqual, 4, prim_name); - if (pad_mode == "pad") { - for (auto item : pad) { - CheckAndConvertUtils::Check("pad_item", item, kGreaterEqual, "zeros_list", 0, prim_name); - } - } else { - CheckAndConvertUtils::Check(kPad, pad, kEqual, "zeros_list", {0, 0, 0, 0}, prim_name); - } - this->set_pad(CheckAndConvertUtils::CheckPositiveVector(kPad, pad, this->name(), true, true)); - this->set_mode(CheckAndConvertUtils::CheckInteger("mode", mode, kEqual, 1, prim_name)); - this->set_out_channel(CheckAndConvertUtils::CheckInteger("out_channel", out_channel, kGreaterThan, 0, prim_name)); - this->set_group(CheckAndConvertUtils::CheckInteger("group", group, kGreaterThan, 0, prim_name)); -} - -std::vector Conv2D::get_kernel_size() const { - auto value_ptr = GetAttr(kKernelSize); - return GetValue>(value_ptr); -} - -std::vector Conv2D::get_stride() const { - auto value_ptr = GetAttr(kStride); - return GetValue>(value_ptr); -} - -std::vector Conv2D::get_dilation() const { - auto value_ptr = GetAttr(kDilation); - return GetValue>(value_ptr); -} - -std::string Conv2D::get_pad_mode() const { - auto value_ptr = this->GetAttr(kPadMode); - return GetValue(value_ptr); -} - -std::vector Conv2D::get_pad() const { - auto value_ptr = this->GetAttr(kPad); - return GetValue>(value_ptr); -} -std::vector Conv2D::get_pad_list() const { - auto value_ptr = this->GetAttr(kPadList); - return GetValue>(value_ptr); -} -int64_t Conv2D::get_mode() const { - auto value_ptr = this->GetAttr(kMode); - return GetValue(value_ptr); -} - -int64_t Conv2D::get_group() const { - auto value_ptr = this->GetAttr(kGroup); - return GetValue(value_ptr); -} -int64_t Conv2D::get_output_channel() const { - auto value_ptr = this->GetAttr(kOutputChannel); - return GetValue(value_ptr); -} - -void Conv2D::set_kernel_size(const std::vector &kernel_size) { - this->AddAttr(kKernelSize, MakeValue(kernel_size)); -} -void Conv2D::set_stride(const std::vector &stride) { this->AddAttr(kStride, MakeValue(stride)); } -void Conv2D::set_dilation(const std::vector &dilation) { this->AddAttr(kDilation, MakeValue(dilation)); } -void Conv2D::set_pad_mode(const std::string &pad_mode) { this->AddAttr(kPadMode, MakeValue(pad_mode)); } -void Conv2D::set_pad(const std::vector &pad) { this->AddAttr(kPad, MakeValue(pad)); } -void Conv2D::set_mode(int64_t mode) { this->AddAttr(kMode, MakeValue(mode)); } -void Conv2D::set_group(int64_t group) { this->AddAttr(kGroup, MakeValue(group)); } -void Conv2D::set_out_channel(int64_t output_channel) { this->AddAttr(kOutputChannel, MakeValue(output_channel)); } -void Conv2D::set_pad_list(const std::vector &pad_list) { this->AddAttr(kPadList, MakeValue(pad_list)); } - +namespace ops { +namespace { abstract::ShapePtr Conv2dInferShape(const PrimitivePtr &primitive, const std::vector &input_args) { MS_EXCEPTION_IF_NULL(primitive); auto conv_prim = primitive->cast(); MS_EXCEPTION_IF_NULL(conv_prim); auto prim_name = conv_prim->name(); - CheckAndConvertUtils::CheckInRange("conv2d_infer", input_args.size(), kIncludeBoth, {2, 3}, prim_name); + CheckAndConvertUtils::CheckInRange("conv2d_infer", input_args.size(), kIncludeBoth, {2, 3}, prim_name); auto x_shape = CheckAndConvertUtils::ConvertShapePtrToShape("x_shape", input_args[0]->BuildShape(), prim_name); auto w_shape = CheckAndConvertUtils::ConvertShapePtrToShape("w_shape", input_args[1]->BuildShape(), prim_name); + if (conv_prim->get_format() == NHWC) { + x_shape = {x_shape[0], x_shape[3], x_shape[1], x_shape[2]}; + w_shape = {w_shape[0], w_shape[3], w_shape[1], w_shape[2]}; + } CheckAndConvertUtils::CheckInteger("weight rank", w_shape.size(), kEqual, 4, prim_name); CheckAndConvertUtils::CheckInteger("x rank", x_shape.size(), kEqual, 4, prim_name); CheckAndConvertUtils::Check("x_shape[1] / group", x_shape[1] / conv_prim->get_group(), kEqual, "w_shape[1]", w_shape[1], conv_prim->name()); - auto out_channel = conv_prim->get_output_channel(); + auto out_channel = conv_prim->get_out_channel(); CheckAndConvertUtils::Check("out_channel", out_channel, kEqual, "w_shape[0]", w_shape[0], conv_prim->name()); std::vector temp_w; std::copy(w_shape.begin() + 2, w_shape.end(), std::back_inserter(temp_w)); @@ -137,10 +64,10 @@ abstract::ShapePtr Conv2dInferShape(const PrimitivePtr &primitive, const std::ve int64_t w_out = -1; std::vector pad_list(4, 0); auto pad_mode = conv_prim->get_pad_mode(); - if (pad_mode == "valid") { + if (pad_mode == VALID) { h_out = ceil((x_shape[2] - dilation_h * (kernel_size_h - 1)) / stride_h); w_out = ceil((x_shape[3] - dilation_w * (kernel_size_w - 1)) / stride_w); - } else if (pad_mode == "same") { + } else if (pad_mode == SAME) { h_out = ceil(x_shape[2] / stride_h); w_out = ceil(x_shape[3] / stride_w); @@ -153,7 +80,7 @@ abstract::ShapePtr Conv2dInferShape(const PrimitivePtr &primitive, const std::ve auto pad_left = floor(pad_needed_w / 2); pad_list.emplace_back(pad_left); pad_list.emplace_back(pad_needed_h - pad_left); - } else if (pad_mode == "pad") { + } else if (pad_mode == PAD) { std::copy(conv_prim->get_pad().begin(), conv_prim->get_pad().end(), std::back_inserter(pad_list)); auto pad_top = conv_prim->get_pad()[0]; auto pad_bottom = conv_prim->get_pad()[1]; @@ -165,13 +92,17 @@ abstract::ShapePtr Conv2dInferShape(const PrimitivePtr &primitive, const std::ve h_out = floor(h_out); w_out = floor(w_out); } - conv_prim->set_pad_list(pad_list); + conv_prim->set_pad(pad_list); std::vector out_shape = {x_shape[0], out_channel, h_out, w_out}; + if (conv_prim->get_format() == NHWC) { + out_shape = {x_shape[0], h_out, w_out, out_channel}; + } + return std::make_shared(out_shape); } TypePtr Conv2dInferType(const PrimitivePtr &prim, const std::vector &input_args) { - CheckAndConvertUtils::CheckInRange("", input_args.size(), kIncludeBoth, {2, 3}, prim->name()); + CheckAndConvertUtils::CheckInRange("", input_args.size(), kIncludeBoth, {2, 3}, prim->name()); for (const auto &item : input_args) { MS_EXCEPTION_IF_NULL(item); } @@ -186,12 +117,121 @@ TypePtr Conv2dInferType(const PrimitivePtr &prim, const std::vector &kernel_size, int64_t mode, const PadMode &pad_mode, + const std::vector &pad, const std::vector &stride, + const std::vector &dilation, int64_t group, const Format &format) { + set_kernel_size(kernel_size); + set_stride(stride); + set_dilation(dilation); + set_pad(pad); + set_pad_mode(pad_mode); + set_mode(mode); + set_out_channel(out_channel); + set_group(group); + set_format(format); +} + +void Conv2D::set_out_channel(int64_t out_channel) { + AddAttr(kOutChannel, + MakeValue(CheckAndConvertUtils::CheckInteger(kOutChannel, out_channel, kGreaterThan, 0, name()))); +} + +void Conv2D::set_kernel_size(const std::vector &kernel_size) { + AddAttr(kKernelSize, MakeValue(CheckAndConvertUtils::CheckPositiveVector(kKernelSize, kernel_size, name()))); +} + +void Conv2D::set_stride(const std::vector &stride) { + AddAttr(kStride, MakeValue(CheckAndConvertUtils::CheckPositiveVector(kStride, stride, name(), true, true))); +} + +void Conv2D::set_dilation(const std::vector &dilation) { + AddAttr(kDilation, MakeValue(CheckAndConvertUtils::CheckPositiveVector(kDilation, dilation, name(), true, true))); +} + +void Conv2D::set_pad_mode(const PadMode &pad_mode) { + std::vector pad = get_pad(); + if (pad_mode == PAD) { + for (auto item : pad) { + CheckAndConvertUtils::Check(kPadItem, item, kGreaterEqual, "zeros_list", 0, name()); + } + } else { + CheckAndConvertUtils::Check(kPad, pad, kEqual, "zeros_list", {0, 0, 0, 0}, name()); + } + int64_t swi = pad_mode; + AddAttr(kPadMode, MakeValue(swi)); +} + +void Conv2D::set_pad(const std::vector &pad) { + CheckAndConvertUtils::CheckInteger("pad_size", pad.size(), kEqual, 4, name()); + AddAttr(kPad, MakeValue(CheckAndConvertUtils::CheckPositiveVector(kPad, pad, name(), true, true))); +} + +void Conv2D::set_mode(int64_t mode) { + AddAttr(kMode, MakeValue(CheckAndConvertUtils::CheckInteger(kMode, mode, kEqual, 1, name()))); +} + +void Conv2D::set_group(int64_t group) { + AddAttr(kGroup, MakeValue(CheckAndConvertUtils::CheckInteger(kGroup, group, kGreaterThan, 0, name()))); +} + +void Conv2D::set_format(const Format &format) { + int64_t f = format; + AddAttr(kFormat, MakeValue(f)); +} + +int64_t Conv2D::get_out_channel() const { + auto value_ptr = GetAttr(kOutChannel); + return GetValue(value_ptr); +} + +std::vector Conv2D::get_kernel_size() const { + auto value_ptr = GetAttr(kKernelSize); + return GetValue>(value_ptr); +} + +std::vector Conv2D::get_stride() const { + auto value_ptr = GetAttr(kStride); + return GetValue>(value_ptr); +} + +std::vector Conv2D::get_dilation() const { + auto value_ptr = GetAttr(kDilation); + return GetValue>(value_ptr); +} + +PadMode Conv2D::get_pad_mode() const { + auto value_ptr = GetAttr(kPadMode); + return PadMode(GetValue(value_ptr)); +} + +std::vector Conv2D::get_pad() const { + auto value_ptr = GetAttr(kPad); + return GetValue>(value_ptr); +} + +int64_t Conv2D::get_mode() const { + auto value_ptr = GetAttr(kMode); + return GetValue(value_ptr); +} + +int64_t Conv2D::get_group() const { + auto value_ptr = GetAttr(kGroup); + return GetValue(value_ptr); +} + +Format Conv2D::get_format() const { + auto value_ptr = GetAttr(kFormat); + return Format(GetValue(value_ptr)); +} AbstractBasePtr Conv2dInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, const std::vector &input_args) { return std::make_shared(Conv2dInferType(primitive, input_args), Conv2dInferShape(primitive, input_args)->shape()); } + REGISTER_PRIMITIVE_EVAL_IMPL(Conv2D, prim::kPrimConv2D, Conv2dInfer); REGISTER_PRIMITIVE_C(kNameConv2D, Conv2D); +} // namespace ops } // namespace mindspore diff --git a/mindspore/core/c_ops/conv2d.h b/mindspore/core/ops/conv2d.h similarity index 69% rename from mindspore/core/c_ops/conv2d.h rename to mindspore/core/ops/conv2d.h index 786a8dbd17b..9639d8383da 100644 --- a/mindspore/core/c_ops/conv2d.h +++ b/mindspore/core/ops/conv2d.h @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2020 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -14,49 +14,52 @@ * limitations under the License. */ -#ifndef MINDSPORE_CORE_C_OPS_CONV2D_H_ -#define MINDSPORE_CORE_C_OPS_CONV2D_H_ +#ifndef MINDSPORE_CORE_OPS_CONV2D_H_ +#define MINDSPORE_CORE_OPS_CONV2D_H_ #include #include #include #include -#include "c_ops/op_utils.h" -#include "c_ops/primitive_c.h" +#include "ops/op_utils.h" +#include "ops/primitive_c.h" #include "abstract/abstract_value.h" #include "utils/check_convert_utils.h" namespace mindspore { +namespace ops { constexpr auto kNameConv2D = "Conv2D"; class Conv2D : public PrimitiveC { public: - Conv2D(); + Conv2D() : PrimitiveC(kNameConv2D) { InitIOName({"x", "w"}, {"output"}); } + explicit Conv2D(const std::string k_name) : PrimitiveC(k_name) { InitIOName({"x", "w"}, {"output"}); } ~Conv2D() = default; MS_DECLARE_PARENT(Conv2D, PrimitiveC); void Init(int64_t out_channel, const std::vector &kernel_size, int64_t mode = 1, - const std::string &pad_mode = "valid", const std::vector &pad = {0, 0, 0, 0}, + const PadMode &pad_mode = VALID, const std::vector &pad = {0, 0, 0, 0}, const std::vector &stride = {1, 1, 1, 1}, const std::vector &dilation = {1, 1, 1, 1}, - int64_t group = 1); - std::vector get_kernel_size() const; - std::vector get_stride() const; - std::vector get_dilation() const; - std::string get_pad_mode() const; - std::vector get_pad() const; - std::vector get_pad_list() const; - int64_t get_mode() const; - int64_t get_group() const; - int64_t get_output_channel() const; + int64_t group = 1, const Format &format = NCHW); void set_kernel_size(const std::vector &kernel_size); void set_stride(const std::vector &stride); void set_dilation(const std::vector &dilation); - void set_pad_mode(const std::string &pad_mode); + void set_pad_mode(const PadMode &pad_mode); void set_pad(const std::vector &pad); void set_mode(int64_t mode); void set_group(int64_t group); - void set_out_channel(int64_t output_channel); - void set_pad_list(const std::vector &pad_list); + void set_out_channel(int64_t out_channel); + void set_format(const Format &format); + std::vector get_kernel_size() const; + std::vector get_stride() const; + std::vector get_dilation() const; + PadMode get_pad_mode() const; + std::vector get_pad() const; + int64_t get_mode() const; + int64_t get_group() const; + int64_t get_out_channel() const; + Format get_format() const; }; AbstractBasePtr Conv2dInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, const std::vector &input_args); using PrimConv2dPtr = std::shared_ptr; +} // namespace ops } // namespace mindspore -#endif // MINDSPORE_CORE_C_OPS_CONV2D_H_ +#endif // MINDSPORE_CORE_OPS_CONV2D_H_ diff --git a/mindspore/core/ops/conv2d_transpose.cc b/mindspore/core/ops/conv2d_transpose.cc new file mode 100644 index 00000000000..ff9716bb1a1 --- /dev/null +++ b/mindspore/core/ops/conv2d_transpose.cc @@ -0,0 +1,199 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include +#include + +#include "ops/conv2d_transpose.h" + +namespace mindspore { +namespace ops { +namespace { +abstract::ShapePtr Conv2dTransposeInferShape(const PrimitivePtr &primitive, + const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(primitive); + auto conv2d_transpose_prim = primitive->cast(); + MS_EXCEPTION_IF_NULL(conv2d_transpose_prim); + auto prim_name = conv2d_transpose_prim->name(); + auto input_shape = CheckAndConvertUtils::ConvertShapePtrToShape("x_shape", input_args[3]->BuildShape(), prim_name); + return std::make_shared(input_shape); +} + +TypePtr Conv2dTransposeInferType(const PrimitivePtr &prim, const std::vector &input_args) { + CheckAndConvertUtils::CheckInteger("conv2d_transpose_infer", input_args.size(), kEqual, 3, prim->name()); + for (const auto &item : input_args) { + MS_EXCEPTION_IF_NULL(item); + } + const std::set valid_types = {kNumberTypeInt8, kNumberTypeInt32, kNumberTypeFloat16, kNumberTypeFloat32}; + std::map types; + types.emplace("doutput_dtye", input_args[0]->BuildType()); + types.emplace("w_dtype", input_args[1]->BuildType()); + auto infer_type = CheckAndConvertUtils::CheckTensorTypeSame(types, valid_types, prim->name()); + return TypeIdToType(infer_type); +} +} // namespace + +void Conv2dTranspose::Init(int64_t in_channel, int64_t out_channel, const std::vector &kernel_size, + int64_t mode, const PadMode &pad_mode, const std::vector &pad, + const std::vector &stride, const std::vector &dilation, int64_t group, + const Format &format, const std::vector &pad_list) { + set_in_channel(in_channel); + set_out_channel(out_channel); + set_kernel_size(kernel_size); + set_mode(mode); + set_pad(pad); + set_pad_mode(pad_mode); + set_stride(stride); + set_dilation(dilation); + set_group(group); + set_format(format); + set_pad_list(pad_list); +} + +void Conv2dTranspose::set_in_channel(int64_t in_channel) { + AddAttr(kOutChannel, MakeValue(CheckAndConvertUtils::CheckInteger(kInChannel, in_channel, kGreaterThan, 0, name()))); +} + +void Conv2dTranspose::set_out_channel(int64_t out_channel) { + AddAttr(kOutChannel, + MakeValue(CheckAndConvertUtils::CheckInteger(kOutChannel, out_channel, kGreaterThan, 0, name()))); +} + +void Conv2dTranspose::set_kernel_size(const std::vector &kernel_size) { + CheckAndConvertUtils::CheckInteger(kKernelSize, kernel_size.size(), kEqual, 2, name()); + for (int64_t item : kernel_size) { + CheckAndConvertUtils::CheckInteger(kKernelSize, item, kGreaterEqual, 1, name()); + } + AddAttr(kKernelSize, MakeValue(kernel_size)); +} + +void Conv2dTranspose::set_stride(const std::vector &stride) { + CheckAndConvertUtils::CheckInteger(kStride, stride.size(), kEqual, 2, name()); + for (int64_t item : stride) { + CheckAndConvertUtils::CheckInteger(kStride, item, kGreaterEqual, 1, name()); + } + AddAttr(kStride, MakeValue(stride)); +} + +void Conv2dTranspose::set_dilation(const std::vector &dilation) { + CheckAndConvertUtils::CheckInteger(kDilation, dilation.size(), kGreaterEqual, 2, name()); + AddAttr(kDilation, MakeValue(dilation)); +} + +void Conv2dTranspose::set_pad_mode(const PadMode &pad_mode) { + std::vector pad = get_pad(); + if (pad_mode == PAD) { + for (auto item : pad) { + CheckAndConvertUtils::Check(kPadItem, item, kGreaterEqual, "zeros_list", 0, name()); + } + } else { + CheckAndConvertUtils::Check(kPad, pad, kEqual, "zeros_list", {0, 0, 0, 0}, name()); + } + int64_t swi = pad_mode; + AddAttr(kPadMode, MakeValue(swi)); +} + +void Conv2dTranspose::set_pad(const std::vector &pad) { + CheckAndConvertUtils::CheckInteger("pad_size", pad.size(), kEqual, 4, name()); + AddAttr(kPad, MakeValue(CheckAndConvertUtils::CheckPositiveVector(kPad, pad, name(), true, true))); +} + +void Conv2dTranspose::set_mode(int64_t mode) { + AddAttr(kMode, MakeValue(CheckAndConvertUtils::CheckInteger(kMode, mode, kEqual, 1, name()))); +} + +void Conv2dTranspose::set_group(int64_t group) { + AddAttr(kGroup, MakeValue(CheckAndConvertUtils::CheckInteger(kGroup, group, kGreaterThan, 0, name()))); +} + +void Conv2dTranspose::set_format(const Format &format) { + int64_t f = format; + AddAttr(kFormat, MakeValue(f)); +} + +void Conv2dTranspose::set_pad_list(const std::vector &pad_list) { + CheckAndConvertUtils::CheckInteger(kPadList, pad_list.size(), kEqual, 4, name()); + this->AddAttr(kPadList, MakeValue(pad_list)); +} + +int64_t Conv2dTranspose::get_in_channel() const { + auto value_ptr = GetAttr(kInChannel); + return GetValue(value_ptr); +} + +int64_t Conv2dTranspose::get_out_channel() const { + auto value_ptr = GetAttr(kOutChannel); + return GetValue(value_ptr); +} + +std::vector Conv2dTranspose::get_kernel_size() const { + auto value_ptr = GetAttr(kKernelSize); + return GetValue>(value_ptr); +} + +std::vector Conv2dTranspose::get_stride() const { + auto value_ptr = GetAttr(kStride); + return GetValue>(value_ptr); +} + +std::vector Conv2dTranspose::get_dilation() const { + auto value_ptr = GetAttr(kDilation); + return GetValue>(value_ptr); +} + +PadMode Conv2dTranspose::get_pad_mode() const { + auto value_ptr = GetAttr(kPadMode); + return PadMode(GetValue(value_ptr)); +} + +std::vector Conv2dTranspose::get_pad() const { + auto value_ptr = GetAttr(kPad); + return GetValue>(value_ptr); +} + +int64_t Conv2dTranspose::get_mode() const { + auto value_ptr = GetAttr(kMode); + return GetValue(value_ptr); +} + +int64_t Conv2dTranspose::get_group() const { + auto value_ptr = GetAttr(kGroup); + return GetValue(value_ptr); +} + +Format Conv2dTranspose::get_format() const { + auto value_ptr = GetAttr(kFormat); + return Format(GetValue(value_ptr)); +} + +std::vector Conv2dTranspose::get_pad_list() const { + auto value_ptr = GetAttr(kPadList); + return GetValue>(value_ptr); +} + +AbstractBasePtr Conv2dTransposeInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + return std::make_shared(Conv2dTransposeInferType(primitive, input_args), + Conv2dTransposeInferShape(primitive, input_args)->shape()); +} + +REGISTER_PRIMITIVE_EVAL_IMPL(Conv2dTranspose, prim::kPrimConv2DTranspose, Conv2dTransposeInfer); +REGISTER_PRIMITIVE_C(kNameConv2dTranspose, Conv2dTranspose); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/conv2d_transpose.h b/mindspore/core/ops/conv2d_transpose.h new file mode 100644 index 00000000000..08be3ef0156 --- /dev/null +++ b/mindspore/core/ops/conv2d_transpose.h @@ -0,0 +1,74 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_CONV2D_TRANSPOSE_H_ +#define MINDSPORE_CORE_OPS_CONV2D_TRANSPOSE_H_ +#include +#include +#include +#include + +#include "ops/op_utils.h" +#include "ops/primitive_c.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" +namespace mindspore { +namespace ops { +constexpr auto kNameConv2dTranspose = "Conv2dTranspose"; +class Conv2dTranspose : public PrimitiveC { + public: + Conv2dTranspose() : PrimitiveC(kNameConv2dTranspose) { + InitIOName({"out_backprop", "filter", "input_sizes"}, {"output"}); + } + explicit Conv2dTranspose(const std::string k_name) : PrimitiveC(k_name) { + InitIOName({"out_backprop", "filter", "input_sizes"}, {"output"}); + } + ~Conv2dTranspose() = default; + MS_DECLARE_PARENT(Conv2dTranspose, PrimitiveC); + void Init(int64_t in_channel, int64_t out_channel, const std::vector &kernel_size, int64_t mode = 1, + const PadMode &pad_mode = VALID, const std::vector &pad = {0, 0, 0, 0}, + const std::vector &stride = {1, 1}, const std::vector &dilation = {1, 1}, + int64_t group = 1, const Format &format = NCHW, const std::vector &pad_list = {0, 0, 0, 0}); + void set_in_channel(int64_t in_channel); + void set_out_channel(int64_t out_channel); + virtual void set_kernel_size(const std::vector &kernel_size); + void set_stride(const std::vector &stride); + virtual void set_dilation(const std::vector &dilation); + void set_pad_mode(const PadMode &pad_mode); + void set_pad(const std::vector &pad); + void set_mode(int64_t mode); + void set_group(int64_t group); + void set_format(const Format &format); + void set_pad_list(const std::vector &pad_list); + + int64_t get_in_channel() const; + int64_t get_out_channel() const; + std::vector get_kernel_size() const; + std::vector get_stride() const; + std::vector get_dilation() const; + PadMode get_pad_mode() const; + std::vector get_pad() const; + int64_t get_mode() const; + int64_t get_group() const; + Format get_format() const; + std::vector get_pad_list() const; +}; +AbstractBasePtr Conv2dTransposeInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); +using PrimConv2dTransposePtr = std::shared_ptr; +} // namespace ops +} // namespace mindspore +#endif // MINDSPORE_CORE_OPS_CONV2D_TRANSPOSE_H_ diff --git a/mindspore/core/ops/cos.cc b/mindspore/core/ops/cos.cc new file mode 100644 index 00000000000..3094af960ab --- /dev/null +++ b/mindspore/core/ops/cos.cc @@ -0,0 +1,56 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include "ops/cos.h" + +namespace mindspore { +namespace ops { +namespace { +abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(primitive); + auto cos_prim = primitive->cast(); + MS_EXCEPTION_IF_NULL(cos_prim); + auto prim_name = cos_prim->name(); + for (const auto &item : input_args) { + MS_EXCEPTION_IF_NULL(item); + } + auto in_shape = CheckAndConvertUtils::ConvertShapePtrToShape("x_shape", input_args[0]->BuildShape(), prim_name); + return std::make_shared(in_shape); +} + +TypePtr InferType(const PrimitivePtr &prim, const std::vector &input_args) { + if (std::any_of(input_args.begin(), input_args.end(), [](AbstractBasePtr a) { return a == nullptr; })) { + MS_LOG(EXCEPTION) << "nullptr"; + } + std::map types; + types.emplace("x", input_args[0]->BuildType()); + auto infer_type = CheckAndConvertUtils::CheckTensorTypeSame(types, common_valid_types, prim->name()); + return TypeIdToType(infer_type); +} +} // namespace + +AbstractBasePtr CosInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + return std::make_shared(InferType(primitive, input_args), + InferShape(primitive, input_args)->shape()); +} +REGISTER_PRIMITIVE_EVAL_IMPL(Cos, prim::kPrimCos, CosInfer); +REGISTER_PRIMITIVE_C(kNameCos, Cos); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/c_ops/cos.h b/mindspore/core/ops/cos.h similarity index 85% rename from mindspore/core/c_ops/cos.h rename to mindspore/core/ops/cos.h index a9dbc897747..136e4f96e57 100644 --- a/mindspore/core/c_ops/cos.h +++ b/mindspore/core/ops/cos.h @@ -14,16 +14,17 @@ * limitations under the License. */ -#ifndef MINDSPORE_CORE_C_OPS_COS_H_ -#define MINDSPORE_CORE_C_OPS_COS_H_ +#ifndef MINDSPORE_CORE_OPS_COS_H_ +#define MINDSPORE_CORE_OPS_COS_H_ #include #include -#include "c_ops/primitive_c.h" -#include "c_ops/op_utils.h" +#include "ops/primitive_c.h" +#include "ops/op_utils.h" #include "abstract/abstract_value.h" namespace mindspore { +namespace ops { constexpr auto kNameCos = "Cos"; class Cos : public PrimitiveC { public: @@ -35,5 +36,6 @@ class Cos : public PrimitiveC { AbstractBasePtr CosInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, const std::vector &input_args); using PrimCos = std::shared_ptr; +} // namespace ops } // namespace mindspore -#endif // MINDSPORE_CORE_C_OPS_COS_H_ +#endif // MINDSPORE_CORE_OPS_COS_H_ diff --git a/mindspore/core/ops/crop.cc b/mindspore/core/ops/crop.cc new file mode 100644 index 00000000000..9c1a0769034 --- /dev/null +++ b/mindspore/core/ops/crop.cc @@ -0,0 +1,62 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include "ops/crop.h" +#include "utils/check_convert_utils.h" +#include "abstract/primitive_infer_map.h" +#include "ops/op_utils.h" + +namespace mindspore { +namespace ops { +void Crop::Init(const int64_t axis, const std::vector &offsets) { + this->set_axis(axis); + this->set_offsets(offsets); +} + +void Crop::set_axis(const int64_t axis) { this->AddAttr(kAxis, MakeValue(axis)); } + +int64_t Crop::get_axis() const { + auto value_ptr = this->GetAttr(kAxis); + return GetValue(value_ptr); +} + +void Crop::set_offsets(const std::vector &offsets) { this->AddAttr(kOffsets, MakeValue(offsets)); } + +std::vector Crop::get_offsets() const { + auto value_ptr = this->GetAttr(kOffsets); + return GetValue>(value_ptr); +} +AbstractBasePtr CropInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(primitive); + auto crop_prim = primitive->cast(); + MS_EXCEPTION_IF_NULL(crop_prim); + auto prim_name = crop_prim->name(); + CheckAndConvertUtils::CheckInteger("input number", input_args.size(), kEqual, 2, prim_name); + for (const auto &item : input_args) { + MS_EXCEPTION_IF_NULL(item); + } + // infer shape + auto out_shape = CheckAndConvertUtils::ConvertShapePtrToShape("x_shape", input_args[1]->BuildShape(), prim_name); + // infer type + auto x_type = input_args[0]->BuildType()->cast()->element(); + return std::make_shared(x_type, out_shape); +} +REGISTER_PRIMITIVE_EVAL_IMPL(Crop, prim::kPrimCrop, CropInfer); +REGISTER_PRIMITIVE_C(kNameCrop, Crop); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/crop.h b/mindspore/core/ops/crop.h new file mode 100644 index 00000000000..676df602cc8 --- /dev/null +++ b/mindspore/core/ops/crop.h @@ -0,0 +1,46 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_CROP_H_ +#define MINDSPORE_CORE_OPS_CROP_H_ +#include +#include + +#include "ops/primitive_c.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameCrop = "Crop"; +class Crop : public PrimitiveC { + public: + Crop() : PrimitiveC(kNameCrop) {} + ~Crop() = default; + MS_DECLARE_PARENT(Crop, PrimitiveC); + void Init(const int64_t axis, const std::vector &offsets); + void set_axis(const int64_t axis); + void set_offsets(const std::vector &offsets); + int64_t get_axis() const; + std::vector get_offsets() const; +}; +AbstractBasePtr CropInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); +using PrimCrop = std::shared_ptr; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_CROP_H_ diff --git a/mindspore/core/c_ops/custom.cc b/mindspore/core/ops/custom.cc similarity index 92% rename from mindspore/core/c_ops/custom.cc rename to mindspore/core/ops/custom.cc index 2a2f529f59f..dbcd5f528be 100644 --- a/mindspore/core/c_ops/custom.cc +++ b/mindspore/core/ops/custom.cc @@ -14,12 +14,13 @@ * limitations under the License. */ -#include "c_ops/custom.h" +#include "ops/custom.h" #include "utils/check_convert_utils.h" #include "abstract/primitive_infer_map.h" -#include "c_ops/op_utils.h" +#include "ops/op_utils.h" namespace mindspore { +namespace ops { void Custom::Init(const std::vector &custom) { this->set_custom(custom); } void Custom::set_custom(const std::vector &custom) { this->AddAttr(kCustom, MakeValue(custom)); } @@ -29,4 +30,5 @@ std::vector Custom::get_custom() const { return GetValue>(value_ptr); } REGISTER_PRIMITIVE_C(kNameCustom, Custom); +} // namespace ops } // namespace mindspore diff --git a/mindspore/core/c_ops/custom.h b/mindspore/core/ops/custom.h similarity index 86% rename from mindspore/core/c_ops/custom.h rename to mindspore/core/ops/custom.h index 23ee2231bf2..9773f3c7dc1 100644 --- a/mindspore/core/c_ops/custom.h +++ b/mindspore/core/ops/custom.h @@ -14,16 +14,17 @@ * limitations under the License. */ -#ifndef MINDSPORE_CORE_C_OPS_CUSTOM_H_ -#define MINDSPORE_CORE_C_OPS_CUSTOM_H_ +#ifndef MINDSPORE_CORE_OPS_CUSTOM_H_ +#define MINDSPORE_CORE_OPS_CUSTOM_H_ #include #include -#include "c_ops/primitive_c.h" +#include "ops/primitive_c.h" #include "abstract/abstract_value.h" #include "utils/check_convert_utils.h" namespace mindspore { +namespace ops { constexpr auto kNameCustom = "Custom"; class Custom : public PrimitiveC { public: @@ -36,6 +37,7 @@ class Custom : public PrimitiveC { }; using PrimCustomPtr = std::shared_ptr; +} // namespace ops } // namespace mindspore -#endif // MINDSPORE_CORE_C_OPS_CUSTOM_H_ +#endif // MINDSPORE_CORE_OPS_CUSTOM_H_ diff --git a/mindspore/core/ops/custom_extract_features.cc b/mindspore/core/ops/custom_extract_features.cc new file mode 100644 index 00000000000..0ef9d64f643 --- /dev/null +++ b/mindspore/core/ops/custom_extract_features.cc @@ -0,0 +1,56 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops/custom_extract_features.h" +#include "utils/check_convert_utils.h" +#include "abstract/primitive_infer_map.h" +#include "ops/op_utils.h" + +namespace mindspore { +namespace ops { +AbstractBasePtr CustomExtractFeaturesInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(primitive); + auto extract_prim = primitive->cast(); + MS_EXCEPTION_IF_NULL(extract_prim); + auto prim_name = extract_prim->name(); + MS_EXCEPTION_IF_NULL(input_args[0]); + // auto input = input_args[0]; + + // Infer type + auto output0_type = TypeIdToType(kNumberTypeInt32); + auto output1_type = TypeIdToType(kNumberTypeFloat32); + + // Infer shape + std::vector out_shape; + auto input_shape = + CheckAndConvertUtils::ConvertShapePtrToShape("input_shape", input_args[0]->BuildShape(), prim_name); + auto string_num = input_shape[0]; + if (string_num == 0) { + out_shape.push_back(1); + } else { + out_shape.push_back(string_num); + } + + auto output0 = std::make_shared(output0_type, out_shape); + auto output1 = std::make_shared(output1_type, out_shape); + AbstractBasePtrList output = {output0, output1}; + return std::make_shared(output); +} +REGISTER_PRIMITIVE_EVAL_IMPL(CustomExtractFeatures, prim::kPrimCustomExtractFeatures, CustomExtractFeaturesInfer); +REGISTER_PRIMITIVE_C(kNameCustomExtractFeatures, CustomExtractFeatures); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/custom_extract_features.h b/mindspore/core/ops/custom_extract_features.h new file mode 100644 index 00000000000..f9976cdea4f --- /dev/null +++ b/mindspore/core/ops/custom_extract_features.h @@ -0,0 +1,41 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_CUSTOM_EXTRACT_FEATURES_H_ +#define MINDSPORE_CORE_OPS_CUSTOM_EXTRACT_FEATURES_H_ +#include +#include +#include "ops/primitive_c.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameCustomExtractFeatures = "CustomExtractFeatures"; +class CustomExtractFeatures : public PrimitiveC { + public: + CustomExtractFeatures() : PrimitiveC(kNameCustomExtractFeatures) {} + ~CustomExtractFeatures() = default; + MS_DECLARE_PARENT(CustomExtractFeatures, PrimitiveC); + void Init() {} +}; +AbstractBasePtr CustomExtractFeaturesInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); +using PrimCustomExtractFeaturesPtr = std::shared_ptr; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_CUSTOM_EXTRACT_FEATURES_H_ diff --git a/mindspore/core/ops/custom_normalize.cc b/mindspore/core/ops/custom_normalize.cc new file mode 100644 index 00000000000..b1f05f777b1 --- /dev/null +++ b/mindspore/core/ops/custom_normalize.cc @@ -0,0 +1,69 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops/custom_normalize.h" +#include "utils/check_convert_utils.h" +#include "abstract/primitive_infer_map.h" + +namespace mindspore { +namespace ops { +namespace { +abstract::ShapePtr CustomNormalizeInferShape(const PrimitivePtr &primitive, + const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(primitive); + auto custom_normalize_prim = primitive->cast(); + MS_EXCEPTION_IF_NULL(custom_normalize_prim); + // auto prim_name = custom_normalize_prim->name(); + MS_EXCEPTION_IF_NULL(input_args[0]); + MS_EXCEPTION_IF_NULL(input_args[0]->BuildShape()); + // auto input_shape = + // CheckAndConvertUtils::ConvertShapePtrToShape("input_shape", input_args[0]->BuildShape(), prim_name); + if (input_args[0]->BuildValue()->cast()->data_c() == nullptr) { + MS_LOG(ERROR) << "Do infer shape in runtime."; + } + std::vector infer_shape; + auto string_num = reinterpret_cast(input_args[0]->BuildValue()->cast()->data_c()); + if (*string_num == 0) { + infer_shape.push_back(1); + } else { + infer_shape.push_back(*string_num); + } + return std::make_shared(infer_shape); +} + +TypePtr CustomNormalizeInferType(const PrimitivePtr &prim, const std::vector &input_args) { + for (const auto &item : input_args) { + MS_EXCEPTION_IF_NULL(item); + } + auto infer_type = input_args[0]->BuildType(); + auto tensor_type = infer_type->cast(); + MS_EXCEPTION_IF_NULL(tensor_type); + auto data_type = tensor_type->element(); + MS_EXCEPTION_IF_NULL(data_type); + return data_type; +} +} // namespace + +AbstractBasePtr CustomNormalizeInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + return std::make_shared(CustomNormalizeInferType(primitive, input_args), + CustomNormalizeInferShape(primitive, input_args)->shape()); +} + +REGISTER_PRIMITIVE_EVAL_IMPL(CustomNormalize, prim::kPrimCustomNormalize, CustomNormalizeInfer); +REGISTER_PRIMITIVE_C(kNameCustomNormalize, CustomNormalize); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/c_ops/custom_normalize.h b/mindspore/core/ops/custom_normalize.h similarity index 72% rename from mindspore/core/c_ops/custom_normalize.h rename to mindspore/core/ops/custom_normalize.h index 7cce4efcb0b..21256921c2a 100644 --- a/mindspore/core/c_ops/custom_normalize.h +++ b/mindspore/core/ops/custom_normalize.h @@ -13,15 +13,17 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -#ifndef MINDSPORE_CORE_C_OPS_CUSTOMNORMALIZE_H_ -#define MINDSPORE_CORE_C_OPS_CUSTOMNORMALIZE_H_ +#ifndef MINDSPORE_CORE_OPS_CUSTOM_NORMALIZE_H_ +#define MINDSPORE_CORE_OPS_CUSTOM_NORMALIZE_H_ #include +#include -#include "c_ops/primitive_c.h" +#include "ops/primitive_c.h" #include "abstract/abstract_value.h" #include "utils/check_convert_utils.h" namespace mindspore { +namespace ops { constexpr auto kNameCustomNormalize = "CustomNormalize"; class CustomNormalize : public PrimitiveC { public: @@ -31,7 +33,10 @@ class CustomNormalize : public PrimitiveC { void Init() {} }; +AbstractBasePtr CustomNormalizeInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); using PrimCustomNormalizePtr = std::shared_ptr; +} // namespace ops } // namespace mindspore -#endif // MINDSPORE_CORE_C_OPS_CUSTOMNORMALIZE_H_ +#endif // MINDSPORE_CORE_OPS_CUSTOM_NORMALIZE_H_ diff --git a/mindspore/core/ops/custom_predict.cc b/mindspore/core/ops/custom_predict.cc new file mode 100644 index 00000000000..a7ddc0c820b --- /dev/null +++ b/mindspore/core/ops/custom_predict.cc @@ -0,0 +1,65 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops/custom_predict.h" +#include "ops/op_utils.h" +#include "utils/check_convert_utils.h" +#include "abstract/primitive_infer_map.h" + +namespace mindspore { +namespace ops { +void CustomPredict::Init(const int64_t output_num, const float weight_threshold) { + this->set_output_num(output_num); + this->set_weight_threshold(weight_threshold); +} + +void CustomPredict::set_output_num(const int64_t output_num) { this->AddAttr(kOutputNum, MakeValue(output_num)); } + +int64_t CustomPredict::get_output_num() const { + auto value_ptr = this->GetAttr(kOutputNum); + return GetValue(value_ptr); +} + +void CustomPredict::set_weight_threshold(const float weight_threshold) { + this->AddAttr(kWeightThreshold, MakeValue(weight_threshold)); +} + +float CustomPredict::get_weight_threshold() const { + auto value_ptr = this->GetAttr(kWeightThreshold); + return GetValue(value_ptr); +} + +AbstractBasePtr CustomPredictInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(primitive); + auto CustomPredict_prim = primitive->cast(); + MS_EXCEPTION_IF_NULL(CustomPredict_prim); + for (auto input : input_args) { + MS_EXCEPTION_IF_NULL(input); + } + std::vector shape; + shape.push_back(CustomPredict_prim->get_output_num()); + + auto output0 = std::make_shared(TypeIdToType(kNumberTypeInt32), shape); + auto output1 = std::make_shared(TypeIdToType(kNumberTypeFloat32), shape); + AbstractBasePtrList output = {output0, output1}; + return std::make_shared(output); +} + +REGISTER_PRIMITIVE_EVAL_IMPL(CustomPredict, prim::kPrimCustomPredict, CustomPredictInfer); +REGISTER_PRIMITIVE_C(kNameCustomPredict, CustomPredict); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/c_ops/custom_predict.h b/mindspore/core/ops/custom_predict.h similarity index 62% rename from mindspore/core/c_ops/custom_predict.h rename to mindspore/core/ops/custom_predict.h index 02b4c1bd835..aadb72b2602 100644 --- a/mindspore/core/c_ops/custom_predict.h +++ b/mindspore/core/ops/custom_predict.h @@ -13,30 +13,34 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -#ifndef MINDSPORE_CORE_C_OPS_CUSTOMPREDICT_H_ -#define MINDSPORE_CORE_C_OPS_CUSTOMPREDICT_H_ +#ifndef MINDSPORE_CORE_OPS_CUSTOM_PREDICT_H_ +#define MINDSPORE_CORE_OPS_CUSTOM_PREDICT_H_ #include +#include -#include "c_ops/primitive_c.h" -#include "c_ops/op_utils.h" +#include "ops/primitive_c.h" +#include "ops/op_utils.h" #include "abstract/abstract_value.h" #include "utils/check_convert_utils.h" namespace mindspore { +namespace ops { constexpr auto kNameCustomPredict = "CustomPredict"; class CustomPredict : public PrimitiveC { public: CustomPredict() : PrimitiveC(kNameCustomPredict) {} ~CustomPredict() = default; MS_DECLARE_PARENT(CustomPredict, PrimitiveC); - void Init(int64_t outputNum, float weight_threshold); - void set_outputNum(int64_t outputNum); - void set_weight_threshold(float weight_threshold); - int64_t get_outputNum() const; + void Init(const int64_t output_num, const float weight_threshold); + void set_output_num(const int64_t output_num); + void set_weight_threshold(const float weight_threshold); + int64_t get_output_num() const; float get_weight_threshold() const; }; - +AbstractBasePtr CustomPredictInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); using PrimCustomPredictPtr = std::shared_ptr; +} // namespace ops } // namespace mindspore -#endif // MINDSPORE_CORE_C_OPS_CUSTOMPREDICT_H_ +#endif // MINDSPORE_CORE_OPS_CUSTOM_PREDICT_H_ diff --git a/mindspore/core/c_ops/depend.cc b/mindspore/core/ops/depend.cc similarity index 88% rename from mindspore/core/c_ops/depend.cc rename to mindspore/core/ops/depend.cc index cbe7f520379..10dc1d14c4e 100644 --- a/mindspore/core/c_ops/depend.cc +++ b/mindspore/core/ops/depend.cc @@ -14,8 +14,10 @@ * limitations under the License. */ -#include "c_ops/depend.h" +#include "ops/depend.h" namespace mindspore { +namespace ops { REGISTER_PRIMITIVE_C(kNameDepend, Depend); -} +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/c_ops/depend.h b/mindspore/core/ops/depend.h similarity index 84% rename from mindspore/core/c_ops/depend.h rename to mindspore/core/ops/depend.h index 5a1d8d8d7f5..8d138124555 100644 --- a/mindspore/core/c_ops/depend.h +++ b/mindspore/core/ops/depend.h @@ -14,16 +14,17 @@ * limitations under the License. */ -#ifndef MINDSPORE_CORE_C_OPS_DEPEND_H_ -#define MINDSPORE_CORE_C_OPS_DEPEND_H_ +#ifndef MINDSPORE_CORE_OPS_DEPEND_H_ +#define MINDSPORE_CORE_OPS_DEPEND_H_ #include #include -#include "c_ops/primitive_c.h" -#include "c_ops/op_utils.h" +#include "ops/primitive_c.h" +#include "ops/op_utils.h" #include "abstract/abstract_value.h" namespace mindspore { +namespace ops { constexpr auto kNameDepend = "Depend"; class Depend : public PrimitiveC { public: @@ -35,5 +36,6 @@ class Depend : public PrimitiveC { AbstractBasePtr DependInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, const std::vector &input_args); using PrimDepend = std::shared_ptr; +} // namespace ops } // namespace mindspore -#endif // MINDSPORE_CORE_C_OPS_DEPEND_H_ +#endif // MINDSPORE_CORE_OPS_DEPEND_H_ diff --git a/mindspore/core/ops/depth_to_space.cc b/mindspore/core/ops/depth_to_space.cc new file mode 100644 index 00000000000..cb81399d5ce --- /dev/null +++ b/mindspore/core/ops/depth_to_space.cc @@ -0,0 +1,87 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "ops/depth_to_space.h" +#include +#include +#include +#include +#include +#include "ops/op_utils.h" +#include "utils/check_convert_utils.h" +#include "abstract/primitive_infer_map.h" + +namespace mindspore { +namespace ops { +void DepthToSpace::set_block_size(const int64_t block_size) { + CheckAndConvertUtils::Check(kBlockSize, block_size, kGreaterEqual, "", 2, this->name()); + this->AddAttr(kBlockSize, MakeValue(block_size)); +} + +int64_t DepthToSpace::get_block_size() const { + auto value_ptr = GetAttr(kBlockSize); + return GetValue(value_ptr); +} +void DepthToSpace::set_format(const Format &format) { + int64_t f = format; + this->AddAttr(kFormat, MakeValue(f)); +} + +Format DepthToSpace::get_format() const { + auto value_ptr = GetAttr(kFormat); + return Format(GetValue(value_ptr)); +} + +void DepthToSpace::Init(const int64_t block_size, const Format &format) { + this->set_block_size(block_size); + this->set_format(format); +} + +AbstractBasePtr DepthToSpaceInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(primitive); + auto prim = primitive->cast(); + MS_EXCEPTION_IF_NULL(prim); + auto prim_name = prim->name(); + CheckAndConvertUtils::CheckInteger("input number", input_args.size(), kEqual, 1, prim_name); + for (const auto &item : input_args) { + MS_EXCEPTION_IF_NULL(item); + } + auto input_x = input_args[0]->cast(); + MS_EXCEPTION_IF_NULL(input_x); + + auto x_shape = CheckAndConvertUtils::ConvertShapePtrToShape("x_shape", input_args[0]->BuildShape(), prim_name); + if (prim->get_format() == NHWC) { + x_shape = {x_shape[0], x_shape[3], x_shape[1], x_shape[2]}; + } + CheckAndConvertUtils::CheckInteger("x rank", x_shape.size(), kEqual, 4, prim_name); + int64_t block_size = prim->get_block_size(); + CheckAndConvertUtils::CheckInteger("x_shape[1] % (block_size*block_size)", x_shape[1] % (block_size * block_size), + kEqual, 0, prim_name); + auto out_shape = x_shape; + out_shape[1] /= block_size * block_size; + out_shape[2] *= block_size; + out_shape[3] *= block_size; + if (prim->get_format() == NHWC) { + out_shape = {out_shape[0], out_shape[2], out_shape[3], out_shape[1]}; + } + auto ret = input_x->Broaden(); + ret->set_shape(std::make_shared(out_shape)); + return ret; +} +REGISTER_PRIMITIVE_EVAL_IMPL(DepthToSpace, prim::kPrimDepthToSpace, DepthToSpaceInfer); +REGISTER_PRIMITIVE_C(kNameDepthToSpace, DepthToSpace); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/c_ops/depth_to_space.h b/mindspore/core/ops/depth_to_space.h similarity index 67% rename from mindspore/core/c_ops/depth_to_space.h rename to mindspore/core/ops/depth_to_space.h index 7dcd7330177..c8cb0263fff 100644 --- a/mindspore/core/c_ops/depth_to_space.h +++ b/mindspore/core/ops/depth_to_space.h @@ -14,30 +14,36 @@ * limitations under the License. */ -#ifndef MINDSPORE_CORE_C_OPS_DEPTH_TO_SPACE_H_ -#define MINDSPORE_CORE_C_OPS_DEPTH_TO_SPACE_H_ +#ifndef MINDSPORE_CORE_OPS_DEPTH_TO_SPACE_H_ +#define MINDSPORE_CORE_OPS_DEPTH_TO_SPACE_H_ #include #include #include #include -#include "c_ops/primitive_c.h" +#include "ops/primitive_c.h" #include "abstract/abstract_value.h" #include "utils/check_convert_utils.h" namespace mindspore { +namespace ops { constexpr auto kNameDepthToSpace = "DepthToSpace"; class DepthToSpace : public PrimitiveC { public: DepthToSpace() : PrimitiveC(kNameDepthToSpace) { InitIOName({"x"}, {"y"}); } ~DepthToSpace() = default; MS_DECLARE_PARENT(DepthToSpace, PrimitiveC); - void Init(const int64_t &block_size, const Format &format = NCHW); - void set_block_size(const int64_t &block_size); + void Init(const int64_t block_size, const Format &format = NCHW); + void set_block_size(const int64_t block_size); int64_t get_block_size() const; void set_format(const Format &format); Format get_format() const; }; + +AbstractBasePtr DepthToSpaceInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); +using PrimDepthToSpacePtr = std::shared_ptr; +} // namespace ops } // namespace mindspore -#endif // MINDSPORE_CORE_C_OPS_DEPTH_TO_SPACE_H_ +#endif // MINDSPORE_CORE_OPS_DEPTH_TO_SPACE_H_ diff --git a/mindspore/core/c_ops/depthwise_conv2d.cc b/mindspore/core/ops/depthwise_conv2d.cc similarity index 84% rename from mindspore/core/c_ops/depthwise_conv2d.cc rename to mindspore/core/ops/depthwise_conv2d.cc index b2745685387..93290dbc9da 100644 --- a/mindspore/core/c_ops/depthwise_conv2d.cc +++ b/mindspore/core/ops/depthwise_conv2d.cc @@ -14,19 +14,21 @@ * limitations under the License. */ -#include "c_ops/depthwise_conv2d.h" +#include "ops/depthwise_conv2d.h" #include #include #include #include -#include "c_ops/op_utils.h" +#include "ops/op_utils.h" #include "utils/check_convert_utils.h" #include "abstract/primitive_infer_map.h" namespace mindspore { -void DepthWiseConv2D::Init(int64_t channel_multiplier, const std::vector &kernel_size, int64_t mode, - const std::string &pad_mode, const std::vector &pad, - const std::vector &stride, const std::vector &dilation, int64_t group) { +namespace ops { +void DepthWiseConv2D::Init(const int64_t channel_multiplier, const std::vector &kernel_size, + const int64_t mode, const PadMode &pad_mode, const std::vector &pad, + const std::vector &stride, const std::vector &dilation, + const int64_t group) { auto prim_name = this->name(); this->set_format(NCHW); this->AddAttr("offset_a", MakeValue(0)); @@ -45,10 +47,10 @@ void DepthWiseConv2D::Init(int64_t channel_multiplier, const std::vectorset_dilation(dilations); - this->set_pad_mode(CheckAndConvertUtils::CheckString(kPadMode, pad_mode, {"valid", "same", "pad"}, prim_name)); + this->set_pad_mode(pad_mode); CheckAndConvertUtils::CheckInteger("pad_size", pad.size(), kEqual, 4, prim_name); - if (pad_mode == "pad") { + if (pad_mode == PAD) { for (auto item : pad) { CheckAndConvertUtils::Check("pad_item", item, kGreaterEqual, "zeros_list", 0, prim_name); } @@ -74,9 +76,9 @@ std::vector DepthWiseConv2D::get_dilation() const { auto value_ptr = GetAttr(kDilation); return GetValue>(value_ptr); } -std::string DepthWiseConv2D::get_pad_mode() const { +PadMode DepthWiseConv2D::get_pad_mode() const { auto value_ptr = this->GetAttr(kPadMode); - return GetValue(value_ptr); + return PadMode(GetValue(value_ptr)); } std::vector DepthWiseConv2D::get_pad() const { auto value_ptr = this->GetAttr(kPad); @@ -110,11 +112,14 @@ void DepthWiseConv2D::set_stride(const std::vector &stride) { this->Add void DepthWiseConv2D::set_dilation(const std::vector &dilation) { this->AddAttr(kDilation, MakeValue(dilation)); } -void DepthWiseConv2D::set_pad_mode(const std::string &pad_mode) { this->AddAttr(kPadMode, MakeValue(pad_mode)); } +void DepthWiseConv2D::set_pad_mode(const PadMode &pad_mode) { + int64_t swi = pad_mode; + this->AddAttr(kPadMode, MakeValue(swi)); +} void DepthWiseConv2D::set_pad(const std::vector &pad) { this->AddAttr(kPad, MakeValue(pad)); } -void DepthWiseConv2D::set_mode(int64_t mode) { this->AddAttr(kMode, MakeValue(mode)); } -void DepthWiseConv2D::set_group(int64_t group) { this->AddAttr(kGroup, MakeValue(group)); } -void DepthWiseConv2D::set_out_channel(int64_t out_channel) { this->AddAttr(kOutChannel, MakeValue(out_channel)); } +void DepthWiseConv2D::set_mode(const int64_t mode) { this->AddAttr(kMode, MakeValue(mode)); } +void DepthWiseConv2D::set_group(const int64_t group) { this->AddAttr(kGroup, MakeValue(group)); } +void DepthWiseConv2D::set_out_channel(const int64_t out_channel) { this->AddAttr(kOutChannel, MakeValue(out_channel)); } void DepthWiseConv2D::set_pads(const std::vector &pad_list) { this->AddAttr(kPads, MakeValue(pad_list)); } void DepthWiseConv2D::set_format(const Format &format) { int64_t f = format; @@ -132,10 +137,13 @@ abstract::ShapePtr DepthWiseConv2DInferShape(const PrimitivePtr &primitive, auto conv_prim = primitive->cast(); MS_EXCEPTION_IF_NULL(conv_prim); auto prim_name = conv_prim->name(); - CheckAndConvertUtils::CheckInRange("conv2d_Infer", input_args.size(), kIncludeBoth, {2, 3}, prim_name); + CheckAndConvertUtils::CheckInRange("conv2d_Infer", input_args.size(), kIncludeBoth, {2, 3}, prim_name); auto x_shape = CheckAndConvertUtils::ConvertShapePtrToShape("x_shape", input_args[0]->GetShapeTrack(), prim_name); auto w_shape = CheckAndConvertUtils::ConvertShapePtrToShape("w_shape", input_args[1]->GetShapeTrack(), prim_name); - + if (conv_prim->get_format() == NHWC) { + x_shape = {x_shape[0], x_shape[3], x_shape[1], x_shape[2]}; + w_shape = {w_shape[0], w_shape[3], w_shape[1], w_shape[2]}; + } CheckAndConvertUtils::CheckInteger("weight_rank", w_shape.size(), kEqual, 4, prim_name); CheckAndConvertUtils::CheckInteger("x_rank", x_shape.size(), kEqual, 4, prim_name); CheckAndConvertUtils::Check("x_shape[1]", x_shape[1], kEqual, "w_shape[1]", w_shape[1], conv_prim->name()); @@ -162,10 +170,10 @@ abstract::ShapePtr DepthWiseConv2DInferShape(const PrimitivePtr &primitive, int64_t w_out = -1; std::vector pad_list(4, 0); auto pad_mode = conv_prim->get_pad_mode(); - if (pad_mode == "valid") { + if (pad_mode == VALID) { h_out = ceil((x_shape[2] - dilation_h * (kernel_size_h - 1)) / stride_h); w_out = ceil((x_shape[3] - dilation_w * (kernel_size_w - 1)) / stride_w); - } else if (pad_mode == "same") { + } else if (pad_mode == SAME) { h_out = ceil(x_shape[2] / stride_h); w_out = ceil(x_shape[3] / stride_w); @@ -178,7 +186,7 @@ abstract::ShapePtr DepthWiseConv2DInferShape(const PrimitivePtr &primitive, auto pad_left = floor(pad_needed_w / 2); pad_list.emplace_back(pad_left); pad_list.emplace_back(pad_needed_h - pad_left); - } else if (pad_mode == "pad") { + } else if (pad_mode == PAD) { std::copy(conv_prim->get_pad().begin(), conv_prim->get_pad().end(), std::back_inserter(pad_list)); auto pad_top = conv_prim->get_pad()[0]; auto pad_bottom = conv_prim->get_pad()[1]; @@ -192,11 +200,14 @@ abstract::ShapePtr DepthWiseConv2DInferShape(const PrimitivePtr &primitive, } conv_prim->set_pads(pad_list); std::vector out_shape = {x_shape[0], out_channel * x_shape[1], h_out, w_out}; + if (conv_prim->get_format() == NHWC) { + out_shape = {x_shape[0], h_out, w_out, out_channel * x_shape[1]}; + } return std::make_shared(out_shape); } TypePtr DepthWiseConv2DInferType(const PrimitivePtr &prim, const std::vector &input_args) { - CheckAndConvertUtils::CheckInRange("", input_args.size(), kIncludeBoth, {2, 3}, prim->name()); + CheckAndConvertUtils::CheckInRange("", input_args.size(), kIncludeBoth, {2, 3}, prim->name()); for (const auto &item : input_args) { MS_EXCEPTION_IF_NULL(item); } @@ -217,4 +228,5 @@ AbstractBasePtr DepthWiseConv2DInfer(const abstract::AnalysisEnginePtr &, const DepthWiseConv2DInferShape(primitive, input_args)->shape()); } REGISTER_PRIMITIVE_C(kNameDepthWiseConv2D, DepthWiseConv2D); +} // namespace ops } // namespace mindspore diff --git a/mindspore/core/c_ops/depthwise_conv2d.h b/mindspore/core/ops/depthwise_conv2d.h similarity index 73% rename from mindspore/core/c_ops/depthwise_conv2d.h rename to mindspore/core/ops/depthwise_conv2d.h index 2780b76ab60..3fd1a5be686 100644 --- a/mindspore/core/c_ops/depthwise_conv2d.h +++ b/mindspore/core/ops/depthwise_conv2d.h @@ -14,51 +14,54 @@ * limitations under the License. */ -#ifndef MINDSPORE_CORE_C_OPS_DEPTHWISE_CONV2D_H -#define MINDSPORE_CORE_C_OPS_DEPTHWISE_CONV2D_H +#ifndef MINDSPORE_CORE_OPS_DEPTHWISE_CONV2D_H +#define MINDSPORE_CORE_OPS_DEPTHWISE_CONV2D_H #include #include #include #include -#include "c_ops/primitive_c.h" +#include "ops/primitive_c.h" #include "abstract/abstract_value.h" #include "utils/check_convert_utils.h" namespace mindspore { +namespace ops { constexpr auto kNameDepthWiseConv2D = "DepthwiseConv2dNative"; class DepthWiseConv2D : public PrimitiveC { public: DepthWiseConv2D() : PrimitiveC(kNameDepthWiseConv2D) { InitIOName({"x", "w"}, {"output"}); } + explicit DepthWiseConv2D(const std::string k_name) : PrimitiveC(k_name) { InitIOName({"x", "w"}, {"output"}); } ~DepthWiseConv2D() = default; MS_DECLARE_PARENT(DepthWiseConv2D, PrimitiveC); - void Init(int64_t out_channel, const std::vector &kernel_size, int64_t mode = 1, - const std::string &pad_mode = "valid", const std::vector &pad = {0, 0, 0, 0}, + void Init(const int64_t out_channel, const std::vector &kernel_size, const int64_t mode = 1, + const PadMode &pad_mode = VALID, const std::vector &pad = {0, 0, 0, 0}, const std::vector &stride = {1, 1, 1, 1}, const std::vector &dilation = {1, 1, 1, 1}, - int64_t group = 1); + const int64_t group = 1); + void set_kernel_size(const std::vector &kernel_size); + void set_stride(const std::vector &stride); + void set_dilation(const std::vector &dilation); + void set_pad_mode(const PadMode &pad_mode); + void set_pad(const std::vector &pad); + void set_mode(const int64_t mode); + void set_group(const int64_t group); + void set_out_channel(const int64_t out_channel); + void set_pads(const std::vector &pad_list); + void set_format(const Format &format); std::vector get_kernel_size() const; std::vector get_stride() const; std::vector get_dilation() const; - std::string get_pad_mode() const; + PadMode get_pad_mode() const; std::vector get_pad() const; std::vector get_pads() const; int64_t get_mode() const; int64_t get_group() const; int64_t get_out_channel() const; - void set_kernel_size(const std::vector &kernel_size); - void set_stride(const std::vector &stride); - void set_dilation(const std::vector &dilation); - void set_pad_mode(const std::string &pad_mode); - void set_pad(const std::vector &pad); - void set_mode(int64_t mode); - void set_group(int64_t group); - void set_out_channel(int64_t out_channel); - void set_pads(const std::vector &pad_list); - void set_format(const Format &format); Format get_format() const; }; AbstractBasePtr DepthWiseConv2DInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, const std::vector &input_args); using PrimDepthWiseConv2DPtr = std::shared_ptr; +} // namespace ops } // namespace mindspore -#endif // MINDSPORE_CORE_C_OPS_DEPTHWISE_CONV2D_H +#endif // MINDSPORE_CORE_OPS_DEPTHWISE_CONV2D_H diff --git a/mindspore/core/ops/detection_post_process.cc b/mindspore/core/ops/detection_post_process.cc new file mode 100644 index 00000000000..ebba540e7d8 --- /dev/null +++ b/mindspore/core/ops/detection_post_process.cc @@ -0,0 +1,174 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops/detection_post_process.h" +#include "ops/op_utils.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +void DetectionPostProcess::Init(const int64_t inputSize, const std::vector &scale, const float NmsIouThreshold, + const float NmsScoreThreshold, const int64_t MaxDetections, + const int64_t DetectionsPerClass, const int64_t MaxClassesPerDetection, + const int64_t NumClasses, const bool UseRegularNms, const bool OutQuantized, + const Format &format) { + set_input_size(inputSize); + set_scale(scale); + set_nms_iou_threshold(NmsIouThreshold); + set_nms_score_threshold(NmsScoreThreshold); + set_max_detections(MaxDetections); + set_detections_per_class(DetectionsPerClass); + set_max_classes_per_detection(MaxClassesPerDetection); + set_num_classes(NumClasses); + set_use_regular_nms(UseRegularNms); + set_out_quantized(OutQuantized); + set_format(format); +} +void DetectionPostProcess::set_input_size(const int64_t inputSize) { this->AddAttr(kInputSize, MakeValue(inputSize)); } +int64_t DetectionPostProcess::get_input_size() const { + auto value_ptr = this->GetAttr(kInputSize); + return GetValue(value_ptr); +} + +void DetectionPostProcess::set_scale(const std::vector &scale) { this->AddAttr(kScale, MakeValue(scale)); } +std::vector DetectionPostProcess::get_scale() const { + auto value_ptr = this->GetAttr(kScale); + return GetValue>(value_ptr); +} + +void DetectionPostProcess::set_nms_iou_threshold(const float NmsIouThreshold) { + this->AddAttr(kNmsIouThreshold, MakeValue(NmsIouThreshold)); +} +float DetectionPostProcess::get_nms_iou_threshold() const { + auto value_ptr = this->GetAttr(kNmsIouThreshold); + return GetValue(value_ptr); +} + +void DetectionPostProcess::set_nms_score_threshold(const float NmsScoreThreshold) { + this->AddAttr(kNmsScoreThreshold, MakeValue(NmsScoreThreshold)); +} +float DetectionPostProcess::get_nms_score_threshold() const { + auto value_ptr = this->GetAttr(kNmsScoreThreshold); + return GetValue(value_ptr); +} + +void DetectionPostProcess::set_max_detections(const int64_t MaxDetections) { + this->AddAttr(kMaxDetections, MakeValue(MaxDetections)); +} +int64_t DetectionPostProcess::get_max_detections() const { + auto value_ptr = this->GetAttr(kMaxDetections); + return GetValue(value_ptr); +} + +void DetectionPostProcess::set_detections_per_class(const int64_t DetectionsPerClass) { + this->AddAttr(kDetectionsPerClass, MakeValue(DetectionsPerClass)); +} +int64_t DetectionPostProcess::get_detections_per_class() const { + auto value_ptr = this->GetAttr(kDetectionsPerClass); + return GetValue(value_ptr); +} + +void DetectionPostProcess::set_max_classes_per_detection(const int64_t MaxClassesPerDetection) { + this->AddAttr(kMaxClassesPerDetection, MakeValue(MaxClassesPerDetection)); +} +int64_t DetectionPostProcess::get_max_classes_per_detection() const { + auto value_ptr = this->GetAttr(kMaxClassesPerDetection); + return GetValue(value_ptr); +} + +void DetectionPostProcess::set_num_classes(const int64_t NumClasses) { + this->AddAttr(kNumClasses, MakeValue(NumClasses)); +} +int64_t DetectionPostProcess::get_num_classes() const { + auto value_ptr = this->GetAttr(kNumClasses); + return GetValue(value_ptr); +} +void DetectionPostProcess::set_use_regular_nms(const bool UseRegularNms) { + this->AddAttr(kUseRegularNms, MakeValue(UseRegularNms)); +} +bool DetectionPostProcess::get_use_regular_nms() const { + auto value_ptr = this->GetAttr(kUseRegularNms); + return GetValue(value_ptr); +} + +void DetectionPostProcess::set_out_quantized(const bool OutQuantized) { + this->AddAttr(kOutQuantized, MakeValue(OutQuantized)); +} +bool DetectionPostProcess::get_out_quantized() const { + auto value_ptr = this->GetAttr(kOutQuantized); + return GetValue(value_ptr); +} +void DetectionPostProcess::set_format(const Format &format) { + int64_t f = format; + this->AddAttr(kFormat, MakeValue(f)); +} +Format DetectionPostProcess::get_format() const { + auto value_ptr = this->GetAttr(kFormat); + return Format(GetValue(value_ptr)); +} +AbstractBasePtr DetectionPostProcessInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(primitive); + auto detection_prim = primitive->cast(); + MS_EXCEPTION_IF_NULL(detection_prim); + auto prim_name = detection_prim->name(); + CheckAndConvertUtils::CheckInteger("detection_post_process_infer", input_args.size(), kEqual, 3, prim_name); + MS_EXCEPTION_IF_NULL(input_args[0]); + MS_EXCEPTION_IF_NULL(input_args[1]); + MS_EXCEPTION_IF_NULL(input_args[2]); + auto boxes = input_args[0]; + auto scores = input_args[1]; + auto anchors = input_args[2]; + auto boxes_shape = CheckAndConvertUtils::ConvertShapePtrToShape("boxes_shape", boxes->BuildShape(), prim_name); + auto scores_shape = CheckAndConvertUtils::ConvertShapePtrToShape("scores_shape", scores->BuildShape(), prim_name); + auto anchors_shape = CheckAndConvertUtils::ConvertShapePtrToShape("anchors_shape", anchors->BuildShape(), prim_name); + if (detection_prim->get_format() == NHWC) { + boxes_shape = {boxes_shape[0], boxes_shape[3], boxes_shape[1], boxes_shape[2]}; + scores_shape = {scores_shape[0], scores_shape[3], scores_shape[1], scores_shape[2]}; + anchors_shape = {anchors_shape[0], anchors_shape[3], anchors_shape[1], anchors_shape[2]}; + } + auto num_classes = detection_prim->get_num_classes(); + CheckAndConvertUtils::CheckInRange("scores_shape[2]", scores_shape[2], kIncludeBoth, {num_classes, num_classes + 1}, + prim_name); + CheckAndConvertUtils::Check("boxes_shape[1]", boxes_shape[1], kEqual, "scores_shape[1]", scores_shape[1], prim_name, + ValueError); + CheckAndConvertUtils::Check("boxes_shape[1]", boxes_shape[1], kEqual, "anchors_shape[0]", anchors_shape[0], prim_name, + ValueError); + + // Infer shape + auto max_detections = detection_prim->get_max_detections(); + auto max_classes_per_detection = detection_prim->get_max_classes_per_detection(); + auto num_detected_boxes = max_detections * max_classes_per_detection; + std::vector output_boxes_shape = {1, num_detected_boxes, 4}; + std::vector output_class_shape = {1, num_detected_boxes}; + std::vector output_num_shape = {1}; + + // Infer type + auto output_type = TypeIdToType(kNumberTypeFloat32); + + auto output0 = std::make_shared(output_type, output_boxes_shape); + auto output1 = std::make_shared(output_type, output_class_shape); + auto output2 = std::make_shared(output_type, output_num_shape); + AbstractBasePtrList output = {output0, output1, output1, output2}; + if (detection_prim->get_format() == NHWC) { + output = {output0, output1, output2, output1}; + } + return std::make_shared(output); +} +REGISTER_PRIMITIVE_EVAL_IMPL(DetectionPostProcess, prim::kPrimDetectionPostProcess, DetectionPostProcessInfer); +REGISTER_PRIMITIVE_C(kNameDetectionPostProcess, DetectionPostProcess); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/detection_post_process.h b/mindspore/core/ops/detection_post_process.h new file mode 100644 index 00000000000..e6308858ce2 --- /dev/null +++ b/mindspore/core/ops/detection_post_process.h @@ -0,0 +1,71 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_CORE_OPS_DETECTION_POST_PROCESS_H_ +#define MINDSPORE_CORE_OPS_DETECTION_POST_PROCESS_H_ + +#include +#include +#include +#include +#include "ops/primitive_c.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameDetectionPostProcess = "DetectionPostProcess"; +class DetectionPostProcess : public PrimitiveC { + public: + DetectionPostProcess() : PrimitiveC(kNameDetectionPostProcess) {} + ~DetectionPostProcess() = default; + MS_DECLARE_PARENT(DetectionPostProcess, PrimitiveC); + void Init(const int64_t inputSize, const std::vector &scale, const float NmsIouThreshold, + const float NmsScoreThreshold, const int64_t MaxDetections, const int64_t DetectionsPerClass, + const int64_t MaxClassesPerDetection, const int64_t NumClasses, const bool UseRegularNms, + const bool OutQuantized, const Format &format = NCHW); + // scale:(h,w,x,y) + void set_input_size(const int64_t inputSize); + void set_scale(const std::vector &scale); + void set_nms_iou_threshold(const float NmsIouThreshold); + void set_nms_score_threshold(const float NmsScoreThreshold); + void set_max_detections(const int64_t MaxDetections); + void set_detections_per_class(const int64_t DetectionsPerClass); + void set_max_classes_per_detection(const int64_t MaxClassesPerDetection); + void set_num_classes(const int64_t NumClasses); + void set_use_regular_nms(const bool UseRegularNms); + void set_out_quantized(const bool OutQuantized); + void set_format(const Format &format); + + int64_t get_input_size() const; + std::vector get_scale() const; + float get_nms_iou_threshold() const; + float get_nms_score_threshold() const; + int64_t get_max_detections() const; + int64_t get_detections_per_class() const; + int64_t get_max_classes_per_detection() const; + int64_t get_num_classes() const; + + bool get_use_regular_nms() const; + bool get_out_quantized() const; + Format get_format() const; +}; +AbstractBasePtr DetectionPostProcessInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); +using PrimDetectionPostProcessPtr = std::shared_ptr; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_DETECTION_POST_PROCESS_H_ diff --git a/mindspore/core/ops/div.cc b/mindspore/core/ops/div.cc new file mode 100644 index 00000000000..9f1b54b3bfb --- /dev/null +++ b/mindspore/core/ops/div.cc @@ -0,0 +1,57 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include +#include "ops/div.h" +#include "ops/op_utils.h" +#include "utils/check_convert_utils.h" +#include "abstract/primitive_infer_map.h" + +namespace mindspore { +namespace ops { +namespace { +abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(primitive); + auto div_prim = primitive->cast(); + MS_EXCEPTION_IF_NULL(div_prim); + auto prim_name = div_prim->name(); + return BroadCastInferShape(prim_name, input_args); +} + +TypePtr InferType(const PrimitivePtr &prim, const std::vector &input_args) { + for (const auto &item : input_args) { + MS_EXCEPTION_IF_NULL(item); + } + std::map types; + types.emplace("x", input_args[0]->BuildType()); + types.emplace("y", input_args[1]->BuildType()); + auto infer_type = CheckAndConvertUtils::CheckTensorTypeSame(types, common_valid_types, prim->name()); + return TypeIdToType(infer_type); +} +} // namespace + +AbstractBasePtr DivInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + return std::make_shared(InferType(primitive, input_args), + InferShape(primitive, input_args)->shape()); +} +REGISTER_PRIMITIVE_EVAL_IMPL(Div, prim::kPrimDiv, DivInfer); +REGISTER_PRIMITIVE_C(kNameDiv, Div); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/div.h b/mindspore/core/ops/div.h new file mode 100644 index 00000000000..5ee3ebf57c1 --- /dev/null +++ b/mindspore/core/ops/div.h @@ -0,0 +1,43 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_DIV_H_ +#define MINDSPORE_CORE_OPS_DIV_H_ +#include +#include +#include +#include "ops/primitive_c.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameDiv = "Div"; +class Div : public PrimitiveC { + public: + Div() : PrimitiveC(kNameDiv) { InitIOName({"x", "y"}, {"output"}); } + explicit Div(const std::string k_name) : PrimitiveC(k_name) { InitIOName({"x", "y"}, {"output"}); } + ~Div() = default; + MS_DECLARE_PARENT(Div, PrimitiveC); + void Init() {} +}; +AbstractBasePtr DivInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); +using PrimDivPtr = std::shared_ptr
; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_DIV_H_ diff --git a/mindspore/core/ops/dropout.cc b/mindspore/core/ops/dropout.cc new file mode 100644 index 00000000000..04ba0a585ca --- /dev/null +++ b/mindspore/core/ops/dropout.cc @@ -0,0 +1,70 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include + +#include "ops/dropout.h" +#include "ops/op_utils.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +void Dropout::Init(const float keep_prob) { this->set_keep_prob(keep_prob); } + +void Dropout::set_keep_prob(const float keep_prob) { + CheckAndConvertUtils::CheckInRange(kKeepProb, keep_prob, kIncludeRight, {0.0, 1.0}, this->name()); + this->AddAttr(kKeepProb, MakeValue(keep_prob)); +} + +float Dropout::get_keep_prob() const { + auto value_ptr = this->GetAttr(kKeepProb); + return GetValue(value_ptr); +} + +AbstractBasePtr DropoutInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(primitive); + auto dropout_prim = primitive->cast(); + MS_EXCEPTION_IF_NULL(dropout_prim); + auto prim_name = dropout_prim->name(); + CheckAndConvertUtils::CheckInteger("dropout_infer", input_args.size(), kEqual, 1, prim_name); + + // Infer shape + auto x_shape = CheckAndConvertUtils::ConvertShapePtrToShape("x_shape", input_args[0]->BuildShape(), prim_name); + CheckAndConvertUtils::CheckInteger("x_shape", x_shape.size(), kGreaterEqual, 1, prim_name); + std::vector out_shape; + out_shape.insert(out_shape.end(), x_shape.begin(), x_shape.end()); + out_shape.insert(out_shape.end(), x_shape.begin(), x_shape.end()); + auto infer_shape = std::make_shared(out_shape); + + // Infer type + auto dtype = input_args[0]->BuildType(); + const std::set valid_types = {kNumberTypeFloat16, kNumberTypeFloat32}; + CheckAndConvertUtils::CheckTensorTypeValid("x_dtype", dtype, valid_types, prim_name); + auto tensor_type = dtype->cast(); + MS_EXCEPTION_IF_NULL(tensor_type); + auto element = tensor_type->element(); + MS_EXCEPTION_IF_NULL(element); + auto infer_type = std::make_shared(TypeIdToType(element->type_id())); + + return std::make_shared(infer_type, infer_shape->shape()); +} +REGISTER_PRIMITIVE_EVAL_IMPL(Dropout, prim::kPrimDropout, DropoutInfer); +REGISTER_PRIMITIVE_C(kNameDropout, Dropout); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/c_ops/dropout.h b/mindspore/core/ops/dropout.h similarity index 62% rename from mindspore/core/c_ops/dropout.h rename to mindspore/core/ops/dropout.h index c28368deee7..e8e19400c13 100644 --- a/mindspore/core/c_ops/dropout.h +++ b/mindspore/core/ops/dropout.h @@ -14,22 +14,30 @@ * limitations under the License. */ -#ifndef MINDSPORE_CORE_C_OPS_DROPOUT_H_ -#define MINDSPORE_CORE_C_OPS_DROPOUT_H_ -#include "c_ops/primitive_c.h" +#ifndef MINDSPORE_CORE_OPS_DROPOUT_H_ +#define MINDSPORE_CORE_OPS_DROPOUT_H_ +#include +#include + +#include "ops/primitive_c.h" #include "abstract/abstract_value.h" #include "utils/check_convert_utils.h" namespace mindspore { +namespace ops { constexpr auto kNameDropout = "Dropout"; class Dropout : public PrimitiveC { public: Dropout() : PrimitiveC(kNameDropout) {} ~Dropout() = default; MS_DECLARE_PARENT(Dropout, PrimitiveC); - void Init(float keep_prob = 0.5); - void set_keep_prob(float keep_prob); - float get_keep_prob(); + void Init(const float keep_prob = 0.5); + void set_keep_prob(const float keep_prob); + float get_keep_prob() const; }; +AbstractBasePtr DropoutInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); +using PrimDropoutPtr = std::shared_ptr; +} // namespace ops } // namespace mindspore -#endif // MINDSPORE_CORE_C_OPS_DROPOUT_H_ +#endif // MINDSPORE_CORE_OPS_DROPOUT_H_ diff --git a/mindspore/core/c_ops/embedding_lookup.cc b/mindspore/core/ops/eltwise.cc similarity index 61% rename from mindspore/core/c_ops/embedding_lookup.cc rename to mindspore/core/ops/eltwise.cc index f7e08acdcd9..cd76e85945b 100644 --- a/mindspore/core/c_ops/embedding_lookup.cc +++ b/mindspore/core/ops/eltwise.cc @@ -14,20 +14,21 @@ * limitations under the License. */ -#include "c_ops/embedding_lookup.h" -#include "c_ops/op_utils.h" +#include "ops/eltwise.h" +#include "ops/op_utils.h" #include "utils/check_convert_utils.h" -#include "abstract/primitive_infer_map.h" namespace mindspore { - -void EmbeddingLookup::set_setattr_flag(const bool &setattr_flag) { - this->AddAttr(kSetattrFlag, MakeValue(setattr_flag)); +namespace ops { +void Eltwise::Init(const EltwiseMode &mode) { this->set_mode(mode); } +void Eltwise::set_mode(const EltwiseMode &mode) { + int64_t m = mode; + this->AddAttr(kMode, MakeValue(m)); } - -bool EmbeddingLookup::get_setattr_flag() const { - auto value_ptr = GetAttr(kSetattrFlag); - return GetValue(value_ptr); +EltwiseMode Eltwise::get_mode() const { + auto value_ptr = this->GetAttr(kMode); + return EltwiseMode(GetValue(value_ptr)); } -REGISTER_PRIMITIVE_C(kNameEmbeddingLookup, EmbeddingLookup); +REGISTER_PRIMITIVE_C(kNameEltwise, Eltwise); +} // namespace ops } // namespace mindspore diff --git a/mindspore/core/ops/eltwise.h b/mindspore/core/ops/eltwise.h new file mode 100644 index 00000000000..ec0347a9f2b --- /dev/null +++ b/mindspore/core/ops/eltwise.h @@ -0,0 +1,38 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_ELTWISE_H_ +#define MINDSPORE_CORE_OPS_ELTWISE_H_ +#include "ops/primitive_c.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameEltwise = "Eltwise"; +class Eltwise : public PrimitiveC { + public: + Eltwise() : PrimitiveC(kNameEltwise) {} + ~Eltwise() = default; + MS_DECLARE_PARENT(Eltwise, PrimitiveC); + void Init(const EltwiseMode &mode); + void set_mode(const EltwiseMode &mode); + EltwiseMode get_mode() const; +}; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_ELTWISE_H_ diff --git a/mindspore/core/ops/elu.cc b/mindspore/core/ops/elu.cc new file mode 100644 index 00000000000..98e6bd6bcb2 --- /dev/null +++ b/mindspore/core/ops/elu.cc @@ -0,0 +1,75 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include +#include +#include +#include "ops/elu.h" + +#include "ops/op_utils.h" +#include "utils/check_convert_utils.h" +#include "abstract/primitive_infer_map.h" + +namespace mindspore { +namespace ops { +namespace { +abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(primitive); + auto elu_prim = primitive->cast(); + MS_EXCEPTION_IF_NULL(elu_prim); + auto op_name = elu_prim->name(); + for (const auto &item : input_args) { + MS_EXCEPTION_IF_NULL(item); + } + auto in_shape = CheckAndConvertUtils::ConvertShapePtrToShape("input_shape", input_args[0]->GetShapeTrack(), op_name); + return std::make_shared(in_shape); +} + +TypePtr InferType(const PrimitivePtr &prim, const std::vector &input_args) { + if (std::any_of(input_args.begin(), input_args.end(), [](const AbstractBasePtr &a) { return a == nullptr; })) { + MS_LOG(EXCEPTION) << "nullptr"; + } + std::map types; + const std::set valid_types = {kNumberTypeFloat16, kNumberTypeFloat32, kNumberTypeFloat64}; + types.emplace("x", input_args[0]->BuildType()); + auto infer_type = CheckAndConvertUtils::CheckTensorTypeSame(types, valid_types, prim->name()); + return TypeIdToType(infer_type); +} +} // namespace +void Elu::Init(const float alpha) { this->set_alpha(alpha); } + +void Elu::set_alpha(const float alpha) { + AddAttr(kAlpha, MakeValue(CheckAndConvertUtils::CheckValue(kAlpha, alpha, kEqual, 1.0, name()))); +} + +float Elu::get_alpha() const { + auto value_ptr = this->GetAttr(kAlpha); + return GetValue(value_ptr); +} + +AbstractBasePtr EluInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + return std::make_shared(InferType(primitive, input_args), + InferShape(primitive, input_args)->shape()); +} + +REGISTER_PRIMITIVE_EVAL_IMPL(Elu, prim::kPrimElu, EluInfer); +REGISTER_PRIMITIVE_C(kNameElu, Elu); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/elu.h b/mindspore/core/ops/elu.h new file mode 100644 index 00000000000..3da8c1b202d --- /dev/null +++ b/mindspore/core/ops/elu.h @@ -0,0 +1,43 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_ELU_H_ +#define MINDSPORE_CORE_OPS_ELU_H_ +#include +#include +#include "ops/primitive_c.h" +#include "ops/op_utils.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameElu = "Elu"; +class Elu : public PrimitiveC { + public: + Elu() : PrimitiveC(kNameElu) {} + ~Elu() = default; + MS_DECLARE_PARENT(Elu, PrimitiveC); + void Init(const float alpha = 0.0); + void set_alpha(const float alpha); + float get_alpha() const; +}; +AbstractBasePtr EluInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); +using PrimElu = std::shared_ptr; +} // namespace ops +} // namespace mindspore +#endif // MINDSPORE_CORE_OPS_ELU_H_ diff --git a/mindspore/core/ops/embedding_lookup.cc b/mindspore/core/ops/embedding_lookup.cc new file mode 100644 index 00000000000..e35da3296c5 --- /dev/null +++ b/mindspore/core/ops/embedding_lookup.cc @@ -0,0 +1,87 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include "ops/embedding_lookup.h" +#include "ops/op_utils.h" +#include "utils/check_convert_utils.h" +#include "abstract/primitive_infer_map.h" + +namespace mindspore { +namespace ops { +void EmbeddingLookup::Init(const bool setattr_flag) { this->set_setattr_flag(setattr_flag); } + +void EmbeddingLookup::set_setattr_flag(const bool setattr_flag) { + this->AddAttr(kSetattrFlag, MakeValue(setattr_flag)); +} + +bool EmbeddingLookup::get_setattr_flag() const { + auto value_ptr = GetAttr(kSetattrFlag); + return GetValue(value_ptr); +} + +AbstractBasePtr EmbeddingLookupInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(primitive); + auto prim_name = primitive->name(); + CheckAndConvertUtils::CheckInteger("input number", input_args.size(), kEqual, 3, prim_name); + for (const auto &item : input_args) { + MS_EXCEPTION_IF_NULL(item); + } + auto params = input_args[0]->cast(); + MS_EXCEPTION_IF_NULL(params); + auto indices = input_args[1]->cast(); + MS_EXCEPTION_IF_NULL(indices); + const std::set int_valid_types = {kNumberTypeInt8, kNumberTypeInt16, kNumberTypeInt32, kNumberTypeInt64}; + CheckAndConvertUtils::CheckTensorTypeValid("indices type", indices->BuildType(), int_valid_types, prim_name); + MS_EXCEPTION_IF_NULL(input_args[2]->BuildType()); + auto offset_type = input_args[2]->BuildType()->type_id(); + if (int_valid_types.find(offset_type) == int_valid_types.end()) { + MS_LOG(EXCEPTION) << "offset must be int."; + } + + MS_EXCEPTION_IF_NULL(params->shape()); + auto params_shp = params->shape()->shape(); + MS_EXCEPTION_IF_NULL(indices->shape()); + auto indices_shp = indices->shape()->shape(); + ShapeVector shape; + shape.insert(shape.end(), indices_shp.begin(), indices_shp.end()); + shape.insert(shape.end(), params_shp.begin() + 1, params_shp.end()); + auto indices_max_shape = indices->shape()->max_shape(); + ShapeVector max_shape; + if (!indices_max_shape.empty()) { + max_shape.insert(max_shape.end(), indices_max_shape.begin(), indices_max_shape.end()); + max_shape.insert(max_shape.end(), params_shp.begin() + 1, params_shp.end()); + } else { + max_shape = shape; + } + auto indices_min_shape = indices->shape()->min_shape(); + ShapeVector min_shape; + if (!indices_min_shape.empty()) { + min_shape.insert(min_shape.end(), indices_min_shape.begin(), indices_min_shape.end()); + min_shape.insert(min_shape.end(), params_shp.begin() + 1, params_shp.end()); + } else { + min_shape = shape; + } + + return std::make_shared(params->element(), + std::make_shared(shape, min_shape, max_shape)); +} + +REGISTER_PRIMITIVE_EVAL_IMPL(EmbeddingLookup, prim::kPrimEmbeddingLookup, EmbeddingLookupInfer); +REGISTER_PRIMITIVE_C(kNameEmbeddingLookup, EmbeddingLookup); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/c_ops/embedding_lookup.h b/mindspore/core/ops/embedding_lookup.h similarity index 64% rename from mindspore/core/c_ops/embedding_lookup.h rename to mindspore/core/ops/embedding_lookup.h index 9c358d3e163..9997232fc1f 100644 --- a/mindspore/core/c_ops/embedding_lookup.h +++ b/mindspore/core/ops/embedding_lookup.h @@ -14,27 +14,32 @@ * limitations under the License. */ -#ifndef MINDSPORE_CORE_C_OPS_EMBEDDINGLOOKUP_H_ -#define MINDSPORE_CORE_C_OPS_EMBEDDINGLOOKUP_H_ +#ifndef MINDSPORE_CORE_OPS_EMBEDDING_LOOKUP_H_ +#define MINDSPORE_CORE_OPS_EMBEDDING_LOOKUP_H_ #include #include #include #include -#include "c_ops/primitive_c.h" +#include "ops/primitive_c.h" #include "abstract/abstract_value.h" #include "utils/check_convert_utils.h" namespace mindspore { +namespace ops { constexpr auto kNameEmbeddingLookup = "EmbeddingLookup"; class EmbeddingLookup : public PrimitiveC { public: - EmbeddingLookup() : PrimitiveC(kNameEmbeddingLookup) { InitIOName({"params", "indices"}, {"offset"}); } + EmbeddingLookup() : PrimitiveC(kNameEmbeddingLookup) { InitIOName({"params", "indices", "offset"}, {"output"}); } ~EmbeddingLookup() = default; MS_DECLARE_PARENT(EmbeddingLookup, PrimitiveC); - void set_setattr_flag(const bool &setattr_flag = true); + void Init(const bool setattr_flag = true); + void set_setattr_flag(const bool setattr_flag); bool get_setattr_flag() const; - // void Init() {} }; +AbstractBasePtr EmbeddingLookupInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); +using PrimEmbeddingLookupPtr = std::shared_ptr; +} // namespace ops } // namespace mindspore -#endif // MINDSPORE_CORE_C_OPS_EMBEDDINGLOOKUP_H_ +#endif // MINDSPORE_CORE_OPS_EMBEDDING_LOOKUP_H_ diff --git a/mindspore/core/ops/equal.cc b/mindspore/core/ops/equal.cc new file mode 100644 index 00000000000..8e2f44c031a --- /dev/null +++ b/mindspore/core/ops/equal.cc @@ -0,0 +1,58 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include +#include +#include "ops/equal.h" +#include "ops/op_utils.h" +#include "utils/check_convert_utils.h" +#include "abstract/primitive_infer_map.h" + +namespace mindspore { +namespace ops { +namespace { +abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(primitive); + auto equal_prim = primitive->cast(); + MS_EXCEPTION_IF_NULL(equal_prim); + auto op_name = equal_prim->name(); + return BroadCastInferShape(op_name, input_args); +} + +TypePtr InferType(const PrimitivePtr &prim, const std::vector &input_args) { + if (std::any_of(input_args.begin(), input_args.end(), [](const AbstractBasePtr &a) { return a == nullptr; })) { + MS_LOG(EXCEPTION) << "nullptr"; + } + std::map types; + types.emplace("x", input_args[0]->BuildType()); + types.emplace("y", input_args[1]->BuildType()); + auto infer_type = CheckAndConvertUtils::CheckTensorTypeSame(types, common_valid_types, prim->name()); + return TypeIdToType(infer_type); +} +} // namespace + +AbstractBasePtr EqualInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + return std::make_shared(InferType(primitive, input_args), + InferShape(primitive, input_args)->shape()); +} +REGISTER_PRIMITIVE_EVAL_IMPL(Equal, prim::kPrimEqual, EqualInfer); +REGISTER_PRIMITIVE_C(kNameEqual, Equal); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/c_ops/equal.h b/mindspore/core/ops/equal.h similarity index 69% rename from mindspore/core/c_ops/equal.h rename to mindspore/core/ops/equal.h index 57009213aee..e5f86c19f2c 100644 --- a/mindspore/core/c_ops/equal.h +++ b/mindspore/core/ops/equal.h @@ -14,13 +14,17 @@ * limitations under the License. */ -#ifndef MINDSPORE_CORE_C_OPS_EQUAL_H_ -#define MINDSPORE_CORE_C_OPS_EQUAL_H_ -#include "c_ops/primitive_c.h" +#ifndef MINDSPORE_CORE_OPS_EQUAL_H_ +#define MINDSPORE_CORE_OPS_EQUAL_H_ +#include +#include + +#include "ops/primitive_c.h" #include "abstract/abstract_value.h" #include "utils/check_convert_utils.h" namespace mindspore { +namespace ops { constexpr auto kNameEqual = "Equal"; class Equal : public PrimitiveC { public: @@ -29,6 +33,11 @@ class Equal : public PrimitiveC { MS_DECLARE_PARENT(Equal, PrimitiveC); void Init() {} }; + +AbstractBasePtr EqualInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); +using PrimEqualPtr = std::shared_ptr; +} // namespace ops } // namespace mindspore -#endif // MINDSPORE_CORE_C_OPS_EQUAL_H_ +#endif // MINDSPORE_CORE_OPS_EQUAL_H_ diff --git a/mindspore/core/ops/exp.cc b/mindspore/core/ops/exp.cc new file mode 100644 index 00000000000..b8e714df63c --- /dev/null +++ b/mindspore/core/ops/exp.cc @@ -0,0 +1,54 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include +#include +#include "ops/exp.h" +#include "ops/op_utils.h" + +namespace mindspore { +namespace ops { +namespace { +abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(primitive); + auto x = input_args[0]->BuildShape(); + auto shape_element = x->cast(); + MS_EXCEPTION_IF_NULL(shape_element); + return shape_element; +} + +TypePtr InferType(const PrimitivePtr &prim, const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(prim); + CheckAndConvertUtils::CheckInteger("input number", input_args.size(), kEqual, 1, prim->name()); + for (const auto &item : input_args) { + MS_EXCEPTION_IF_NULL(item); + } + std::map types; + types.emplace("x", input_args[0]->BuildType()); + auto infer_type = CheckAndConvertUtils::CheckTensorTypeSame(types, common_valid_types, prim->name()); + return TypeIdToType(infer_type); +} +} // namespace +AbstractBasePtr ExpInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + return std::make_shared(InferType(primitive, input_args), + InferShape(primitive, input_args)->shape()); +} +REGISTER_PRIMITIVE_EVAL_IMPL(Exp, prim::kPrimExp, ExpInfer); +REGISTER_PRIMITIVE_C(kNameExp, Exp); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/c_ops/exp.h b/mindspore/core/ops/exp.h similarity index 64% rename from mindspore/core/c_ops/exp.h rename to mindspore/core/ops/exp.h index 0236997bf0f..975223cfd22 100644 --- a/mindspore/core/c_ops/exp.h +++ b/mindspore/core/ops/exp.h @@ -14,21 +14,32 @@ * limitations under the License. */ -#ifndef MINDSPORE_CORE_C_OPS_EXP_H_ -#define MINDSPORE_CORE_C_OPS_EXP_H_ -#include "c_ops/primitive_c.h" +#ifndef MINDSPORE_CORE_OPS_EXP_H_ +#define MINDSPORE_CORE_OPS_EXP_H_ +#include +#include +#include + +#include "ops/primitive_c.h" #include "abstract/abstract_value.h" #include "utils/check_convert_utils.h" namespace mindspore { +namespace ops { constexpr auto kNameExp = "Exp"; class Exp : public PrimitiveC { public: Exp() : PrimitiveC(kNameExp) { InitIOName({"x"}, {"y"}); } + explicit Exp(const std::string k_name) : PrimitiveC(k_name) { InitIOName({"x"}, {"y"}); } ~Exp() = default; MS_DECLARE_PARENT(Exp, PrimitiveC); void Init() {} }; + +AbstractBasePtr ExpInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); +using PrimExpPtr = std::shared_ptr; +} // namespace ops } // namespace mindspore -#endif // MINDSPORE_CORE_C_OPS_EXP_H_ +#endif // MINDSPORE_CORE_OPS_EXP_H_ diff --git a/mindspore/core/ops/expand_dims.cc b/mindspore/core/ops/expand_dims.cc new file mode 100644 index 00000000000..ac5555c77a0 --- /dev/null +++ b/mindspore/core/ops/expand_dims.cc @@ -0,0 +1,61 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include +#include +#include +#include "ops/expand_dims.h" +#include "utils/check_convert_utils.h" +#include "abstract/primitive_infer_map.h" +#include "ops/control_depend.h" +#include "utils/log_adapter.h" + +namespace mindspore { +namespace ops { +AbstractBasePtr ExpandDimsInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(primitive); + auto expand_dims_prim = primitive->cast(); + MS_EXCEPTION_IF_NULL(expand_dims_prim); + auto prim_name = expand_dims_prim->name(); + CheckAndConvertUtils::CheckInteger("input numbers", input_args.size(), kEqual, 2, prim_name); + for (const auto &item : input_args) { + MS_EXCEPTION_IF_NULL(item); + } + // Infer shape + auto x_shape = CheckAndConvertUtils::ConvertShapePtrToShape("x_shape", input_args[0]->BuildShape(), prim_name); + auto dim_val = GetValue(input_args[1]->BuildValue()); + auto rank = x_shape.size(); + CheckAndConvertUtils::CheckInRange("axis", dim_val, kIncludeBoth, {-rank - 1, rank}, prim_name); + if (dim_val < 0) { + dim_val += x_shape.size() + 1; + } + auto out_shape = x_shape; + out_shape.insert(out_shape.begin() + dim_val, 1, 1); + + // Infer type + auto x_type = input_args[0]->BuildType()->cast()->element(); + std::set valid_x_type = {TypeIdToType(kObjectTypeTensorType)}; + CheckAndConvertUtils::CheckSubClass("x_type", x_type, valid_x_type, prim_name); + return std::make_shared(x_type, out_shape); +} +REGISTER_PRIMITIVE_EVAL_IMPL(ExpandDims, prim::kPrimExpandDims, ExpandDimsInfer); +REGISTER_PRIMITIVE_C(kNameExpandDims, ExpandDims); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/c_ops/expand_dims.h b/mindspore/core/ops/expand_dims.h similarity index 68% rename from mindspore/core/c_ops/expand_dims.h rename to mindspore/core/ops/expand_dims.h index ba935122ac2..084d38d7b88 100644 --- a/mindspore/core/c_ops/expand_dims.h +++ b/mindspore/core/ops/expand_dims.h @@ -14,13 +14,16 @@ * limitations under the License. */ -#ifndef MINDSPORE_CORE_C_OPS_EXPANDDIMS_H_ -#define MINDSPORE_CORE_C_OPS_EXPANDDIMS_H_ -#include "c_ops/primitive_c.h" +#ifndef MINDSPORE_CORE_OPS_EXPAND_DIMS_H_ +#define MINDSPORE_CORE_OPS_EXPAND_DIMS_H_ +#include +#include +#include "ops/primitive_c.h" #include "abstract/abstract_value.h" #include "utils/check_convert_utils.h" namespace mindspore { +namespace ops { constexpr auto kNameExpandDims = "ExpandDims"; class ExpandDims : public PrimitiveC { public: @@ -29,6 +32,10 @@ class ExpandDims : public PrimitiveC { MS_DECLARE_PARENT(ExpandDims, PrimitiveC); void Init() {} }; +AbstractBasePtr ExpandDimsInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); +using PrimExpandDims = std::shared_ptr; +} // namespace ops } // namespace mindspore -#endif // MINDSPORE_CORE_C_OPS_EXPANDDIMS_H_ +#endif // MINDSPORE_CORE_OPS_EXPAND_DIMS_H_ diff --git a/mindspore/core/ops/fake_quant_with_min_max_vars.cc b/mindspore/core/ops/fake_quant_with_min_max_vars.cc new file mode 100644 index 00000000000..8f54b7b49b9 --- /dev/null +++ b/mindspore/core/ops/fake_quant_with_min_max_vars.cc @@ -0,0 +1,91 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include +#include "ops/fake_quant_with_min_max_vars.h" +#include "ops/op_utils.h" +#include "utils/check_convert_utils.h" +#include "abstract/primitive_infer_map.h" + +namespace mindspore { +namespace ops { +namespace { +abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(primitive); + auto fake_prim = primitive->cast(); + MS_EXCEPTION_IF_NULL(fake_prim); + auto prim_name = fake_prim->name(); + auto in_shape = CheckAndConvertUtils::ConvertShapePtrToShape("x_shape", input_args[0]->BuildShape(), prim_name); + auto min_shape = CheckAndConvertUtils::ConvertShapePtrToShape("min_shape", input_args[1]->BuildShape(), prim_name); + auto max_shape = CheckAndConvertUtils::ConvertShapePtrToShape("max_shape", input_args[2]->BuildShape(), prim_name); + CheckAndConvertUtils::CheckInteger("x_rank", in_shape.size(), kGreaterEqual, 1, prim_name); + CheckAndConvertUtils::Check("min_shape", min_shape, kEqual, "max_shape", max_shape, prim_name); + CheckAndConvertUtils::CheckInteger("min_shape", min_shape.size(), kEqual, 1, prim_name); + int64_t shape_val = 1; + for (size_t i = 0; i < in_shape.size(); i++) { + shape_val = shape_val * in_shape[i]; + if (min_shape[0] > 1 && min_shape[0] != shape_val) { + MS_EXCEPTION(ValueError) << "For" + prim_name + " the shape of \'min\' cannot broadcast to the shape of \'x\'"; + } + } + return std::make_shared(in_shape); +} + +TypePtr InferType(const PrimitivePtr &prim, const std::vector &input_args) { + const std::set valid_types = {kNumberTypeFloat16, kNumberTypeFloat32}; + if (std::any_of(input_args.begin(), input_args.end(), [](AbstractBasePtr a) { return a == nullptr; })) { + MS_LOG(EXCEPTION) << "nullptr"; + } + std::map types; + types.emplace("x", input_args[0]->BuildType()); + types.emplace("min", input_args[1]->BuildType()); + types.emplace("max", input_args[2]->BuildType()); + auto infer_type = CheckAndConvertUtils::CheckTensorTypeSame(types, valid_types, prim->name()); + return TypeIdToType(infer_type); +} +} // namespace +void FakeQuantWithMinMaxVars::Init(const bool narrow_range, const int64_t num_bits) { + this->set_narrow_range(narrow_range); + this->set_num_bits(num_bits); +} + +void FakeQuantWithMinMaxVars::set_narrow_range(const bool narrow_range) { + this->AddAttr(kNarrowRange, MakeValue(narrow_range)); +} + +bool FakeQuantWithMinMaxVars::get_narrow_range() const { + auto value_ptr = this->GetAttr(kNarrowRange); + return GetValue(value_ptr); +} + +void FakeQuantWithMinMaxVars::set_num_bits(const int64_t num_bits) { this->AddAttr(kNumBits, MakeValue(num_bits)); } + +int64_t FakeQuantWithMinMaxVars::get_num_bits() const { + auto value_ptr = this->GetAttr(kNumBits); + return GetValue(value_ptr); +} +AbstractBasePtr FakeQuantWithMinMaxVarsInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + return std::make_shared(InferType(primitive, input_args), + InferShape(primitive, input_args)->shape()); +} +REGISTER_PRIMITIVE_EVAL_IMPL(FakeQuantWithMinMaxVars, prim::kPrimFakeQuantWithMinMaxVars, FakeQuantWithMinMaxVarsInfer); +REGISTER_PRIMITIVE_C(kNameFakeQuantWithMinMaxVars, FakeQuantWithMinMaxVars); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/c_ops/fake_quant_with_min_max_vars.h b/mindspore/core/ops/fake_quant_with_min_max_vars.h similarity index 65% rename from mindspore/core/c_ops/fake_quant_with_min_max_vars.h rename to mindspore/core/ops/fake_quant_with_min_max_vars.h index 7af9f959a26..fb7091a69d5 100644 --- a/mindspore/core/c_ops/fake_quant_with_min_max_vars.h +++ b/mindspore/core/ops/fake_quant_with_min_max_vars.h @@ -13,29 +13,33 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -#ifndef MINDSPORE_CORE_C_OPS_FAKEQUANTWITHMINMAXVARS_H_ -#define MINDSPORE_CORE_C_OPS_FAKEQUANTWITHMINMAXVARS_H_ +#ifndef MINDSPORE_CORE_OPS_FAKE_QUANT_WITH_MIN_MAX_VARS_H_ +#define MINDSPORE_CORE_OPS_FAKE_QUANT_WITH_MIN_MAX_VARS_H_ +#include #include -#include "c_ops/primitive_c.h" +#include "ops/primitive_c.h" #include "abstract/abstract_value.h" #include "utils/check_convert_utils.h" namespace mindspore { +namespace ops { constexpr auto kNameFakeQuantWithMinMaxVars = "FakeQuantWithMinMaxVars"; class FakeQuantWithMinMaxVars : public PrimitiveC { public: FakeQuantWithMinMaxVars() : PrimitiveC(kNameFakeQuantWithMinMaxVars) {} ~FakeQuantWithMinMaxVars() = default; MS_DECLARE_PARENT(FakeQuantWithMinMaxVars, PrimitiveC); - void Init(const bool &narrow_range, int64_t num_bits); - void set_narrow_range(const bool &narrow_range); - void set_num_bits(int64_t num_bits); + void Init(const bool narrow_range = false, const int64_t num_bits = 8); + void set_narrow_range(const bool narrow_range); + void set_num_bits(const int64_t num_bits); bool get_narrow_range() const; int64_t get_num_bits() const; }; - +AbstractBasePtr FakeQuantWithMinMaxVarsInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); using PrimFakeQuantWithMinMaxVarsPtr = std::shared_ptr; +} // namespace ops } // namespace mindspore -#endif // MINDSPORE_CORE_C_OPS_FAKEQUANTWITHMINMAXVARS_H_ +#endif // MINDSPORE_CORE_OPS_FAKE_QUANT_WITH_MIN_MAX_VARS_H_ diff --git a/mindspore/core/ops/fake_quant_with_min_max_vars_per_channel.cc b/mindspore/core/ops/fake_quant_with_min_max_vars_per_channel.cc new file mode 100644 index 00000000000..2f361f21b54 --- /dev/null +++ b/mindspore/core/ops/fake_quant_with_min_max_vars_per_channel.cc @@ -0,0 +1,77 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops/fake_quant_with_min_max_vars_per_channel.h" +#include "ops/op_utils.h" + +namespace mindspore { +namespace ops { +void FakeQuantWithMinMaxVarsPerChannel::Init(const int64_t num_bits, const bool narrow_range) { + this->set_num_bits(num_bits); + this->set_narrow_range(narrow_range); +} +void FakeQuantWithMinMaxVarsPerChannel::set_num_bits(const int64_t num_bits) { + CheckAndConvertUtils::CheckInteger(kNumBits, num_bits, kGreaterThan, 0, this->name()); + this->AddAttr(kNumBits, MakeValue(num_bits)); +} +void FakeQuantWithMinMaxVarsPerChannel::set_narrow_range(const bool narrow_range) { + this->AddAttr(kNarrowRange, MakeValue(narrow_range)); +} +int64_t FakeQuantWithMinMaxVarsPerChannel::get_num_bits() const { + auto value_ptr = GetAttr(kNumBits); + return GetValue(value_ptr); +} +bool FakeQuantWithMinMaxVarsPerChannel::get_narrow_range() const { + auto value_ptr = GetAttr(kNarrowRange); + return GetValue(value_ptr); +} + +AbstractBasePtr FakeQuantWithMinMaxVarsPerChannelInfer(const abstract::AnalysisEnginePtr &, + const PrimitivePtr &primitive, + const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(primitive); + auto FakeQuantWithMinMaxVarsPerChannel_prim = primitive->cast(); + MS_EXCEPTION_IF_NULL(FakeQuantWithMinMaxVarsPerChannel_prim); + auto op_name = FakeQuantWithMinMaxVarsPerChannel_prim->name(); + auto x_shape = CheckAndConvertUtils::ConvertShapePtrToShape("x_shape", input_args[0]->BuildShape(), op_name); + auto min_shape = CheckAndConvertUtils::ConvertShapePtrToShape("min_shape", input_args[1]->BuildShape(), op_name); + auto max_shape = CheckAndConvertUtils::ConvertShapePtrToShape("max_shape", input_args[2]->BuildShape(), op_name); + CheckAndConvertUtils::CheckInteger("x rank", (int64_t)x_shape.size(), kGreaterThan, 1, op_name); + CheckAndConvertUtils::Check("min shape", min_shape, kEqual, "max shape", max_shape, op_name); + CheckAndConvertUtils::CheckInteger("min shape", (int64_t)min_shape.size(), kEqual, 1, op_name); + CheckAndConvertUtils::Check("min shape", min_shape[0], kEqual, "x shape", x_shape[x_shape.size() - 1], op_name); + + auto x_type = input_args[0]->BuildType(); + auto min_type = input_args[1]->BuildType(); + auto max_type = input_args[2]->BuildType(); + std::vector type_name = {"x", "min", "max"}; + std::vector type = {x_type, min_type, max_type}; + for (int64_t i = 0; i < 3; i++) { + CheckAndConvertUtils::CheckTensorTypeValid(type_name[i], type[i], {kNumberTypeFloat16, kNumberTypeFloat32}, + op_name); + } + auto tensor_type = x_type->cast(); + MS_EXCEPTION_IF_NULL(tensor_type); + auto data_type = tensor_type->element(); + MS_EXCEPTION_IF_NULL(data_type); + return std::make_shared(data_type, x_shape); +} + +REGISTER_PRIMITIVE_EVAL_IMPL(FakeQuantWithMinMaxVarsPerChannel, prim::kPrimFakeQuantWithMinMaxVarsPerChannel, + FakeQuantWithMinMaxVarsPerChannelInfer); +REGISTER_PRIMITIVE_C(kNameFakeQuantWithMinMaxVarsPerChannel, FakeQuantWithMinMaxVarsPerChannel); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/fake_quant_with_min_max_vars_per_channel.h b/mindspore/core/ops/fake_quant_with_min_max_vars_per_channel.h new file mode 100644 index 00000000000..ebfae11f5ca --- /dev/null +++ b/mindspore/core/ops/fake_quant_with_min_max_vars_per_channel.h @@ -0,0 +1,49 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_FAKE_QUANT_WITH_MIN_MAX_VARS_PER_CHANNEL_H_ +#define MINDSPORE_CORE_OPS_FAKE_QUANT_WITH_MIN_MAX_VARS_PER_CHANNEL_H_ +#include +#include +#include +#include +#include "ops/primitive_c.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameFakeQuantWithMinMaxVarsPerChannel = "FakeQuantWithMinMaxVarsPerChannel"; +class FakeQuantWithMinMaxVarsPerChannel : public PrimitiveC { + public: + FakeQuantWithMinMaxVarsPerChannel() : PrimitiveC(kNameFakeQuantWithMinMaxVarsPerChannel) {} + ~FakeQuantWithMinMaxVarsPerChannel() = default; + MS_DECLARE_PARENT(FakeQuantWithMinMaxVarsPerChannel, PrimitiveC); + void Init(const int64_t num_bits = 8, const bool narrow_range = false); + void set_num_bits(const int64_t num_bits); + void set_narrow_range(const bool narrow_range); + int64_t get_num_bits() const; + bool get_narrow_range() const; +}; + +AbstractBasePtr FakeQuantWithMinMaxVarsPerChannelInfer(const abstract::AnalysisEnginePtr &, + const PrimitivePtr &primitive, + const std::vector &input_args); +using PrimFakeQuantWithMinMaxVarsPerChannelPtr = std::shared_ptr; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_FAKE_QUANT_WITH_MIN_MAX_VARS_PER_CHANNEL_H_ diff --git a/mindspore/core/c_ops/reshape.cc b/mindspore/core/ops/fft_imag.cc similarity index 64% rename from mindspore/core/c_ops/reshape.cc rename to mindspore/core/ops/fft_imag.cc index b10333ccf9f..dfad6034a17 100644 --- a/mindspore/core/c_ops/reshape.cc +++ b/mindspore/core/ops/fft_imag.cc @@ -14,34 +14,38 @@ * limitations under the License. */ -#include "c_ops/reshape.h" -#include -#include +#include "ops/fft_imag.h" #include -#include -#include #include "utils/check_convert_utils.h" #include "abstract/primitive_infer_map.h" namespace mindspore { +namespace ops { namespace { abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector &input_args) { - // to do - return nullptr; + MS_EXCEPTION_IF_NULL(primitive); + auto FftImag_prim = primitive->cast(); + MS_EXCEPTION_IF_NULL(FftImag_prim); + auto prim_name = FftImag_prim->name(); + auto in_shape = CheckAndConvertUtils::ConvertShapePtrToShape("in_shape", input_args[0]->BuildShape(), prim_name); + in_shape.pop_back(); + return std::make_shared(in_shape); } TypePtr InferType(const PrimitivePtr &prim, const std::vector &input_args) { - // to do - return nullptr; + for (const auto &item : input_args) { + MS_EXCEPTION_IF_NULL(item); + } + return TypeIdToType(kNumberTypeFloat32); } } // namespace -AbstractBasePtr ReshapeInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, +AbstractBasePtr FftImagInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, const std::vector &input_args) { return std::make_shared(InferType(primitive, input_args), InferShape(primitive, input_args)->shape()); } - -REGISTER_PRIMITIVE_EVAL_IMPL(Reshape, prim::kPrimReshape, ReshapeInfer); -REGISTER_PRIMITIVE_C(kNameReshape, Reshape); +REGISTER_PRIMITIVE_EVAL_IMPL(FftImag, prim::kPrimFftImag, FftImagInfer); +REGISTER_PRIMITIVE_C(kNameFftImag, FftImag); +} // namespace ops } // namespace mindspore diff --git a/mindspore/core/c_ops/fft_imag.h b/mindspore/core/ops/fft_imag.h similarity index 71% rename from mindspore/core/c_ops/fft_imag.h rename to mindspore/core/ops/fft_imag.h index 4a055a26411..c0a3d2301a3 100644 --- a/mindspore/core/c_ops/fft_imag.h +++ b/mindspore/core/ops/fft_imag.h @@ -14,17 +14,18 @@ * limitations under the License. */ -#ifndef MINDSPORE_CORE_C_OPS_FFTIMAG_H_ -#define MINDSPORE_CORE_C_OPS_FFTIMAG_H_ +#ifndef MINDSPORE_CORE_OPS_FFT_IMAG_H_ +#define MINDSPORE_CORE_OPS_FFT_IMAG_H_ #include #include #include #include -#include "c_ops/primitive_c.h" +#include "ops/primitive_c.h" #include "abstract/abstract_value.h" #include "utils/check_convert_utils.h" namespace mindspore { +namespace ops { constexpr auto kNameFftImag = "FftImag"; class FftImag : public PrimitiveC { public: @@ -33,6 +34,10 @@ class FftImag : public PrimitiveC { MS_DECLARE_PARENT(FftImag, PrimitiveC); void Init() {} }; +AbstractBasePtr FftImagInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); +using PrimFftImagPtr = std::shared_ptr; +} // namespace ops } // namespace mindspore -#endif // MINDSPORE_CORE_C_OPS_FFTIMAG_H_ +#endif // MINDSPORE_CORE_OPS_FFT_IMAG_H_ diff --git a/mindspore/core/ops/fft_real.cc b/mindspore/core/ops/fft_real.cc new file mode 100644 index 00000000000..c9cfc6d5f8b --- /dev/null +++ b/mindspore/core/ops/fft_real.cc @@ -0,0 +1,43 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include + +#include "ops/fft_real.h" +#include "ops/op_utils.h" +#include "utils/check_convert_utils.h" +#include "abstract/primitive_infer_map.h" + +namespace mindspore { +namespace ops { +AbstractBasePtr FftRealInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(primitive); + auto prim_name = primitive->name(); + CheckAndConvertUtils::CheckInteger("input number", input_args.size(), kEqual, 1, prim_name); + for (const auto &item : input_args) { + MS_EXCEPTION_IF_NULL(item); + } + auto out_dtype = kFloat32; + auto out_shape = CheckAndConvertUtils::ConvertShapePtrToShape("x_shape", input_args[0]->BuildShape(), prim_name); + out_shape.pop_back(); + return std::make_shared(out_dtype, std::make_shared(out_shape)); +} +REGISTER_PRIMITIVE_EVAL_IMPL(FftReal, prim::kPrimFftReal, FftRealInfer); +REGISTER_PRIMITIVE_C(kNameFftReal, FftReal); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/fft_real.h b/mindspore/core/ops/fft_real.h new file mode 100644 index 00000000000..5aee6082196 --- /dev/null +++ b/mindspore/core/ops/fft_real.h @@ -0,0 +1,43 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_FFT_REAL_H_ +#define MINDSPORE_CORE_OPS_FFT_REAL_H_ +#include +#include + +#include "ops/primitive_c.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameFftReal = "FftReal"; +class FftReal : public PrimitiveC { + public: + FftReal() : PrimitiveC(kNameFftReal) {} + ~FftReal() = default; + MS_DECLARE_PARENT(FftReal, PrimitiveC); + void Init() {} +}; + +AbstractBasePtr FftRealInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); +using PrimFftRealPtr = std::shared_ptr; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_FFT_REAL_H_ diff --git a/mindspore/core/ops/fill.cc b/mindspore/core/ops/fill.cc new file mode 100644 index 00000000000..f90781c951a --- /dev/null +++ b/mindspore/core/ops/fill.cc @@ -0,0 +1,46 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops/fill.h" +#include "ops/op_utils.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +AbstractBasePtr FillInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(primitive); + auto prim_name = primitive->name(); + CheckAndConvertUtils::CheckInteger("input number", input_args.size(), kEqual, 3, prim_name); + for (const auto &item : input_args) { + MS_EXCEPTION_IF_NULL(item); + } + auto input_dtype = input_args[0]->cast(); + MS_EXCEPTION_IF_NULL(input_dtype); + auto dtype_value = input_dtype->BuildValue(); + MS_EXCEPTION_IF_NULL(dtype_value); + auto dtype = dtype_value->cast(); + MS_EXCEPTION_IF_NULL(dtype); + auto valid_types = common_valid_types; + valid_types.insert(kNumberTypeBool); + CheckAndConvertUtils::CheckTypeSame("output datatype", dtype, valid_types, prim_name); + auto out_shape = GetValue>(input_args[1]->BuildValue()); + return std::make_shared(dtype, std::make_shared(out_shape)); +} +REGISTER_PRIMITIVE_EVAL_IMPL(Fill, prim::kPrimFill, FillInfer); +REGISTER_PRIMITIVE_C(kNameFill, Fill); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/c_ops/fill.h b/mindspore/core/ops/fill.h similarity index 68% rename from mindspore/core/c_ops/fill.h rename to mindspore/core/ops/fill.h index e3b3794ba31..c983c5a54d6 100644 --- a/mindspore/core/c_ops/fill.h +++ b/mindspore/core/ops/fill.h @@ -14,13 +14,17 @@ * limitations under the License. */ -#ifndef MINDSPORE_CORE_C_OPS_FILL_H_ -#define MINDSPORE_CORE_C_OPS_FILL_H_ -#include "c_ops/primitive_c.h" +#ifndef MINDSPORE_CORE_OPS_FILL_H_ +#define MINDSPORE_CORE_OPS_FILL_H_ +#include +#include + +#include "ops/primitive_c.h" #include "abstract/abstract_value.h" #include "utils/check_convert_utils.h" namespace mindspore { +namespace ops { constexpr auto kNameFill = "Fill"; class Fill : public PrimitiveC { public: @@ -29,6 +33,11 @@ class Fill : public PrimitiveC { MS_DECLARE_PARENT(Fill, PrimitiveC); void Init() {} }; + +AbstractBasePtr FillInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); +using PrimFillPtr = std::shared_ptr; +} // namespace ops } // namespace mindspore -#endif // MINDSPORE_CORE_C_OPS_FILL_H_ +#endif // MINDSPORE_CORE_OPS_FILL_H_ diff --git a/mindspore/core/ops/flatten.cc b/mindspore/core/ops/flatten.cc new file mode 100644 index 00000000000..6ed4feb8883 --- /dev/null +++ b/mindspore/core/ops/flatten.cc @@ -0,0 +1,59 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include "ops/flatten.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +namespace { +abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(primitive); + auto flatten_prim = primitive->cast(); + MS_EXCEPTION_IF_NULL(flatten_prim); + auto prim_name = flatten_prim->name(); + CheckAndConvertUtils::CheckInteger("input args size", input_args.size(), kGreaterEqual, 1, prim_name); + auto x_shape = CheckAndConvertUtils::ConvertShapePtrToShape("x_shape", input_args[0]->BuildShape(), prim_name); + auto prod = 1; + int64_t size = x_shape.size(); + for (int64_t i = 1; i < size; i++) { + prod = prod * x_shape[i]; + } + std::vector out_shape = {x_shape[0], prod}; + return std::make_shared(out_shape); +} + +TypePtr InferType(const PrimitivePtr &prim, const std::vector &input_args) { + for (const auto &item : input_args) { + MS_EXCEPTION_IF_NULL(item); + } + auto infer_type = input_args[0]->BuildType()->cast()->element(); + const std::set valid_types = {TypeIdToType(kObjectTypeTensorType)}; + CheckAndConvertUtils::CheckSubClass("infer type", input_args[0]->BuildType(), valid_types, prim->name()); + return infer_type; +} +} // namespace + +AbstractBasePtr FlattenInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + return std::make_shared(InferType(primitive, input_args), + InferShape(primitive, input_args)->shape()); +} +REGISTER_PRIMITIVE_EVAL_IMPL(Flatten, prim::kPrimFlatten, FlattenInfer); +REGISTER_PRIMITIVE_C(kNameFlatten, Flatten); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/c_ops/flatten.h b/mindspore/core/ops/flatten.h similarity index 68% rename from mindspore/core/c_ops/flatten.h rename to mindspore/core/ops/flatten.h index 6bbc6a87254..164e7ccda6a 100644 --- a/mindspore/core/c_ops/flatten.h +++ b/mindspore/core/ops/flatten.h @@ -14,13 +14,17 @@ * limitations under the License. */ -#ifndef MINDSPORE_CORE_C_OPS_FLATTEN_H_ -#define MINDSPORE_CORE_C_OPS_FLATTEN_H_ -#include "c_ops/primitive_c.h" +#ifndef MINDSPORE_CORE_OPS_FLATTEN_H_ +#define MINDSPORE_CORE_OPS_FLATTEN_H_ +#include +#include + +#include "ops/primitive_c.h" #include "abstract/abstract_value.h" #include "utils/check_convert_utils.h" namespace mindspore { +namespace ops { constexpr auto kNameFlatten = "Flatten"; class Flatten : public PrimitiveC { public: @@ -29,6 +33,10 @@ class Flatten : public PrimitiveC { MS_DECLARE_PARENT(Flatten, PrimitiveC); void Init() {} }; +AbstractBasePtr FlattenInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); +using PrimFlattenPtr = std::shared_ptr; +} // namespace ops } // namespace mindspore -#endif // MINDSPORE_CORE_C_OPS_FLATTEN_H_ +#endif // MINDSPORE_CORE_OPS_FLATTEN_H_ diff --git a/mindspore/core/ops/floor.cc b/mindspore/core/ops/floor.cc new file mode 100644 index 00000000000..d3129e4196f --- /dev/null +++ b/mindspore/core/ops/floor.cc @@ -0,0 +1,60 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops/floor.h" +#include +#include +#include +#include +#include +#include "utils/check_convert_utils.h" +#include "abstract/primitive_infer_map.h" + +namespace mindspore { +namespace ops { +namespace { +abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(primitive); + auto floor_prim = primitive->cast(); + MS_EXCEPTION_IF_NULL(floor_prim); + auto prim_name = floor_prim->name(); + for (const auto &item : input_args) { + MS_EXCEPTION_IF_NULL(item); + } + auto in_shape = CheckAndConvertUtils::ConvertShapePtrToShape("x_shape", input_args[0]->GetShapeTrack(), prim_name); + return std::make_shared(in_shape); +} + +TypePtr InferType(const PrimitivePtr &prim, const std::vector &input_args) { + const std::set valid_types = {kNumberTypeFloat16, kNumberTypeFloat32, kNumberTypeFloat64}; + if (std::any_of(input_args.begin(), input_args.end(), [](AbstractBasePtr a) { return a == nullptr; })) { + MS_LOG(EXCEPTION) << "nullptr"; + } + std::map types; + types.emplace("x", input_args[0]->BuildType()); + auto infer_type = CheckAndConvertUtils::CheckTensorTypeSame(types, valid_types, prim->name()); + return TypeIdToType(infer_type); +} +} // namespace +AbstractBasePtr FloorInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + return std::make_shared(InferType(primitive, input_args), + InferShape(primitive, input_args)->shape()); +} +REGISTER_PRIMITIVE_EVAL_IMPL(Floor, prim::kPrimFloor, FloorInfer); +REGISTER_PRIMITIVE_C(kNameFloor, Floor); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/c_ops/floor.h b/mindspore/core/ops/floor.h similarity index 72% rename from mindspore/core/c_ops/floor.h rename to mindspore/core/ops/floor.h index 2b12ccb9a82..67f8c222f7a 100644 --- a/mindspore/core/c_ops/floor.h +++ b/mindspore/core/ops/floor.h @@ -14,17 +14,18 @@ * limitations under the License. */ -#ifndef MINDSPORE_CORE_C_OPS_FLOOR_H_ -#define MINDSPORE_CORE_C_OPS_FLOOR_H_ +#ifndef MINDSPORE_CORE_OPS_FLOOR_H_ +#define MINDSPORE_CORE_OPS_FLOOR_H_ #include #include #include #include -#include "c_ops/primitive_c.h" +#include "ops/primitive_c.h" #include "abstract/abstract_value.h" #include "utils/check_convert_utils.h" namespace mindspore { +namespace ops { constexpr auto kNameFloor = "Floor"; class Floor : public PrimitiveC { public: @@ -33,6 +34,10 @@ class Floor : public PrimitiveC { MS_DECLARE_PARENT(Floor, PrimitiveC); void Init() {} }; +AbstractBasePtr FloorInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); +using PrimFLoorPtr = std::shared_ptr; +} // namespace ops } // namespace mindspore -#endif // MINDSPORE_CORE_C_OPS_FLOOR_H_ +#endif // MINDSPORE_CORE_OPS_FLOOR_H_ diff --git a/mindspore/core/c_ops/floor_div.cc b/mindspore/core/ops/floor_div.cc similarity index 91% rename from mindspore/core/c_ops/floor_div.cc rename to mindspore/core/ops/floor_div.cc index e23149e99c5..448d9c7ce92 100644 --- a/mindspore/core/c_ops/floor_div.cc +++ b/mindspore/core/ops/floor_div.cc @@ -13,8 +13,10 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -#include "c_ops/floor_div.h" +#include "ops/floor_div.h" namespace mindspore { +namespace ops { REGISTER_PRIMITIVE_C(kNameFloorDiv, FloorDiv); +} // namespace ops } // namespace mindspore diff --git a/mindspore/core/c_ops/floor_div.h b/mindspore/core/ops/floor_div.h similarity index 84% rename from mindspore/core/c_ops/floor_div.h rename to mindspore/core/ops/floor_div.h index 2f707afa236..66a0ed94322 100644 --- a/mindspore/core/c_ops/floor_div.h +++ b/mindspore/core/ops/floor_div.h @@ -14,13 +14,14 @@ * limitations under the License. */ -#ifndef MINDSPORE_CORE_C_OPS_FLOORDIV_H_ -#define MINDSPORE_CORE_C_OPS_FLOORDIV_H_ -#include "c_ops/primitive_c.h" +#ifndef MINDSPORE_CORE_OPS_FLOOR_DIV_H_ +#define MINDSPORE_CORE_OPS_FLOOR_DIV_H_ +#include "ops/primitive_c.h" #include "abstract/abstract_value.h" #include "utils/check_convert_utils.h" namespace mindspore { +namespace ops { constexpr auto kNameFloorDiv = "FloorDiv"; class FloorDiv : public PrimitiveC { public: @@ -29,6 +30,7 @@ class FloorDiv : public PrimitiveC { MS_DECLARE_PARENT(FloorDiv, PrimitiveC); void Init() {} }; +} // namespace ops } // namespace mindspore -#endif // MINDSPORE_CORE_C_OPS_FLOORDIV_H_ +#endif // MINDSPORE_CORE_OPS_FLOOR_DIV_H_ diff --git a/mindspore/core/c_ops/floor_mod.cc b/mindspore/core/ops/floor_mod.cc similarity index 91% rename from mindspore/core/c_ops/floor_mod.cc rename to mindspore/core/ops/floor_mod.cc index dc8764ed0e0..33f7425bc2c 100644 --- a/mindspore/core/c_ops/floor_mod.cc +++ b/mindspore/core/ops/floor_mod.cc @@ -13,8 +13,10 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -#include "c_ops/floor_mod.h" +#include "ops/floor_mod.h" namespace mindspore { +namespace ops { REGISTER_PRIMITIVE_C(kNameFloorMod, FloorMod); +} // namespace ops } // namespace mindspore diff --git a/mindspore/core/c_ops/floor_mod.h b/mindspore/core/ops/floor_mod.h similarity index 84% rename from mindspore/core/c_ops/floor_mod.h rename to mindspore/core/ops/floor_mod.h index 3e0aff92f45..0410873412c 100644 --- a/mindspore/core/c_ops/floor_mod.h +++ b/mindspore/core/ops/floor_mod.h @@ -14,13 +14,14 @@ * limitations under the License. */ -#ifndef MINDSPORE_CORE_C_OPS_FLOORMOD_H_ -#define MINDSPORE_CORE_C_OPS_FLOORMOD_H_ -#include "c_ops/primitive_c.h" +#ifndef MINDSPORE_CORE_OPS_FLOOR_MOD_H_ +#define MINDSPORE_CORE_OPS_FLOOR_MOD_H_ +#include "ops/primitive_c.h" #include "abstract/abstract_value.h" #include "utils/check_convert_utils.h" namespace mindspore { +namespace ops { constexpr auto kNameFloorMod = "FloorMod"; class FloorMod : public PrimitiveC { public: @@ -29,6 +30,7 @@ class FloorMod : public PrimitiveC { MS_DECLARE_PARENT(FloorMod, PrimitiveC); void Init() {} }; +} // namespace ops } // namespace mindspore -#endif // MINDSPORE_CORE_C_OPS_FLOORMOD_H_ +#endif // MINDSPORE_CORE_OPS_FLOOR_MOD_H_ diff --git a/mindspore/core/ops/fused_batch_norm.cc b/mindspore/core/ops/fused_batch_norm.cc new file mode 100644 index 00000000000..ca527080dc9 --- /dev/null +++ b/mindspore/core/ops/fused_batch_norm.cc @@ -0,0 +1,52 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + +#include "ops/fused_batch_norm.h" +#include "ops/op_utils.h" + +namespace mindspore { +namespace ops { +void FusedBatchNorm::Init(const int64_t mode, const float epsilon, const float momentum) { + this->set_mode(mode); + this->set_epsilon(epsilon); + this->set_momentum(momentum); +} + +void FusedBatchNorm::set_mode(const int64_t mode) { this->AddAttr(kMode, MakeValue(mode)); } + +void FusedBatchNorm::set_epsilon(const float epsilon) { this->AddAttr(kEpsilon, MakeValue(epsilon)); } + +void FusedBatchNorm::set_momentum(const float momentum) { this->AddAttr(kMomentum, MakeValue(momentum)); } + +int64_t FusedBatchNorm::get_mode() const { + auto value_ptr = this->GetAttr(kMode); + return GetValue(value_ptr); +} + +float FusedBatchNorm::get_epsilon() const { + auto value_ptr = this->GetAttr(kEpsilon); + return GetValue(value_ptr); +} + +float FusedBatchNorm::get_momentum() const { + auto value_ptr = this->GetAttr(kMomentum); + return GetValue(value_ptr); +} +REGISTER_PRIMITIVE_C(kNameFusedBatchNorm, FusedBatchNorm); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/fused_batch_norm.h b/mindspore/core/ops/fused_batch_norm.h new file mode 100644 index 00000000000..0642ab73198 --- /dev/null +++ b/mindspore/core/ops/fused_batch_norm.h @@ -0,0 +1,49 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_FUSED_BATCH_NORM_H_ +#define MINDSPORE_CORE_OPS_FUSED_BATCH_NORM_H_ +#include +#include +#include +#include +#include "ops/primitive_c.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameFusedBatchNorm = "FusedBatchNorm"; +class FusedBatchNorm : public PrimitiveC { + public: + FusedBatchNorm() : PrimitiveC(kNameFusedBatchNorm) { + InitIOName({"x", "scale", "b", "mean", "variance"}, + {"y", "running_mean", "running_variance", "save_mean", "save_inv_variance"}); + } + ~FusedBatchNorm() = default; + MS_DECLARE_PARENT(FusedBatchNorm, PrimitiveC); + void Init(const int64_t mode = 0, const float epsilon = 1e-5, const float momentum = 0.1); + void set_mode(const int64_t mode); + void set_epsilon(const float epsilon); + void set_momentum(const float momentum); + int64_t get_mode() const; + float get_epsilon() const; + float get_momentum() const; +}; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_FUSED_BATCH_NORM_H_ diff --git a/mindspore/core/ops/fusion/activation.cc b/mindspore/core/ops/fusion/activation.cc new file mode 100644 index 00000000000..d270fa59a60 --- /dev/null +++ b/mindspore/core/ops/fusion/activation.cc @@ -0,0 +1,66 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops/fusion/activation.h" +#include +#include +#include +#include +#include "ops/op_utils.h" + +namespace mindspore { +namespace ops { +void Activation::set_alpha(const float alpha) { this->AddAttr(kAlpha, MakeValue(alpha)); } + +void Activation::set_min_val(const float min_val) { this->AddAttr(kMinVal, MakeValue(min_val)); } + +void Activation::set_max_val(const float max_val) { this->AddAttr(kMaxVal, MakeValue(max_val)); } + +void Activation::set_activation_type(const ActivationType &activation_type) { + int64_t swi; + swi = activation_type; + this->AddAttr(kActivationType, MakeValue(swi)); +} + +float Activation::get_alpha() const { + auto value_ptr = this->GetAttr(kAlpha); + return GetValue(value_ptr); +} + +float Activation::get_min_val() const { + auto value_ptr = this->GetAttr(kMinVal); + return GetValue(value_ptr); +} + +float Activation::get_max_val() const { + auto value_ptr = this->GetAttr(kMaxVal); + return GetValue(value_ptr); +} + +ActivationType Activation::get_activation_type() const { + auto value_ptr = GetAttr(kActivationType); + return ActivationType(GetValue(value_ptr)); +} +void Activation::Init(const float alpha, const float min_val, const float max_val, + const ActivationType &activation_type) { + this->set_alpha(alpha); + this->set_min_val(min_val); + this->set_max_val(max_val); + this->set_activation_type(activation_type); +} +REGISTER_PRIMITIVE_C(kNameActivation, Activation); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/fusion/activation.h b/mindspore/core/ops/fusion/activation.h new file mode 100644 index 00000000000..b0d030a3f60 --- /dev/null +++ b/mindspore/core/ops/fusion/activation.h @@ -0,0 +1,45 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_ACTIVATION_H_ +#define MINDSPORE_CORE_OPS_ACTIVATION_H_ +#include "ops/primitive_c.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameActivation = "Activation"; +class Activation : public PrimitiveC { + public: + Activation() : PrimitiveC(kNameActivation) {} + ~Activation() = default; + MS_DECLARE_PARENT(Activation, PrimitiveC); + void Init(const float alpha = 0.2, const float min_val = -1.0, const float max_val = 1.0, + const ActivationType &activation_type = NO_ACTIVATION); + void set_alpha(const float alpha); + void set_min_val(const float min_val); + void set_max_val(const float max_val); + void set_activation_type(const ActivationType &activation_type); + float get_alpha() const; + float get_min_val() const; + float get_max_val() const; + ActivationType get_activation_type() const; +}; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_ACTIVATION_H_ diff --git a/mindspore/core/ops/fusion/add_fusion.cc b/mindspore/core/ops/fusion/add_fusion.cc new file mode 100644 index 00000000000..cdd05e84bee --- /dev/null +++ b/mindspore/core/ops/fusion/add_fusion.cc @@ -0,0 +1,68 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops/fusion/add_fusion.h" +#include +#include +#include +#include +#include + +#include "ops/op_utils.h" + +namespace mindspore { +namespace ops { +void AddFusion::set_activation_type(const ActivationType activation_type) { + int64_t swi; + swi = activation_type; + this->AddAttr(kActivationType, MakeValue(swi)); +} +ActivationType AddFusion::get_activation_type() const { + auto value_ptr = GetAttr(kActivationType); + return ActivationType(GetValue(value_ptr)); +} +void AddFusion::Init(const ActivationType activation_type) { this->set_activation_type(activation_type); } + +namespace { +abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(primitive); + auto add_prim = primitive->cast(); + MS_EXCEPTION_IF_NULL(add_prim); + auto op_name = add_prim->name(); + return BroadCastInferShape(op_name, input_args); +} + +TypePtr InferType(const PrimitivePtr &prim, const std::vector &input_args) { + if (std::any_of(input_args.begin(), input_args.end(), [](const AbstractBasePtr &a) { return a == nullptr; })) { + MS_LOG(EXCEPTION) << "nullptr"; + } + std::map types; + types.emplace("x", input_args[0]->BuildType()); + types.emplace("y", input_args[1]->BuildType()); + auto infer_type = CheckAndConvertUtils::CheckTensorTypeSame(types, common_valid_types, prim->name()); + return TypeIdToType(infer_type); +} +} // namespace + +AbstractBasePtr AddFusionInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + return std::make_shared(InferType(primitive, input_args), + InferShape(primitive, input_args)->shape()); +} +REGISTER_PRIMITIVE_EVAL_IMPL(AddFusion, prim::kPrimAddFusion, AddFusionInfer); +REGISTER_PRIMITIVE_C(kNameAddFusion, AddFusion); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/fusion/add_fusion.h b/mindspore/core/ops/fusion/add_fusion.h new file mode 100644 index 00000000000..bb7da9dfdd8 --- /dev/null +++ b/mindspore/core/ops/fusion/add_fusion.h @@ -0,0 +1,45 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_ADD_FUSION_H_ +#define MINDSPORE_CORE_OPS_ADD_FUSION_H_ +#include +#include + +#include "ops/add.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameAddFusion = "AddFusion"; +class AddFusion : public Add { + public: + AddFusion() : Add(kNameAddFusion) { InitIOName({"x", "y"}, {"output"}); } + ~AddFusion() = default; + MS_DECLARE_PARENT(AddFusion, Add); + void Init(const ActivationType activation_type); + void set_activation_type(const ActivationType activation_type); + ActivationType get_activation_type() const; +}; + +AbstractBasePtr AddFusionInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); +using PrimAddFusionPtr = std::shared_ptr; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_ADD_FUSION_H_ diff --git a/mindspore/core/ops/fusion/adder_fusion.cc b/mindspore/core/ops/fusion/adder_fusion.cc new file mode 100644 index 00000000000..98d759f826b --- /dev/null +++ b/mindspore/core/ops/fusion/adder_fusion.cc @@ -0,0 +1,50 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops/fusion/adder_fusion.h" +#include "ops/op_utils.h" + +namespace mindspore { +namespace ops { +void AdderFusion::Init(const int64_t in_channel, const int64_t out_channel, const std::vector &kernel_size, + const PadMode &pad_mode, const std::vector &stride, + const std::vector &pad_list, const std::vector &dilation, const int64_t group, + const Format &format, const ActivationType activation_type) { + set_in_channel(in_channel); + set_out_channel(out_channel); + set_kernel_size(kernel_size); + set_pad_mode(pad_mode); + set_stride(stride); + set_pad_list(pad_list); + set_dilation(dilation); + set_group(group); + set_format(format); + set_activation_type(activation_type); +} + +void AdderFusion::set_activation_type(const ActivationType activation_type) { + int64_t swi = activation_type; + this->AddAttr(kActivationType, MakeValue(swi)); +} + +ActivationType AdderFusion::get_activation_type() const { + auto value_ptr = GetAttr(kActivationType); + return ActivationType(GetValue(value_ptr)); +} + +REGISTER_PRIMITIVE_C(kNameAdderFusion, AdderFusion); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/fusion/adder_fusion.h b/mindspore/core/ops/fusion/adder_fusion.h new file mode 100644 index 00000000000..5d5ec8ff349 --- /dev/null +++ b/mindspore/core/ops/fusion/adder_fusion.h @@ -0,0 +1,46 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_ADDER_FUSION_H_ +#define MINDSPORE_CORE_OPS_ADDER_FUSION_H_ + +#include +#include +#include +#include +#include "ops/adder.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameAdderFusion = "AdderFusion"; +class AdderFusion : public Adder { + public: + AdderFusion() : Adder(kNameAdderFusion) {} + MS_DECLARE_PARENT(AdderFusion, Adder); + void Init(const int64_t in_channel, const int64_t out_channel, const std::vector &kernel_size, + const PadMode &pad_mode, const std::vector &stride, const std::vector &pad_list, + const std::vector &dilation, const int64_t group, const Format &format, + const ActivationType activation_type); + void set_activation_type(const ActivationType activation_type); + + ActivationType get_activation_type() const; +}; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_ADDER_FUSION_H_ diff --git a/mindspore/core/ops/fusion/arg_max_fusion.cc b/mindspore/core/ops/fusion/arg_max_fusion.cc new file mode 100644 index 00000000000..35ac8bdc9fc --- /dev/null +++ b/mindspore/core/ops/fusion/arg_max_fusion.cc @@ -0,0 +1,40 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops/fusion/arg_max_fusion.h" + +namespace mindspore { +namespace ops { +void ArgMaxFusion::Init(const bool keep_dims, const bool out_max_value, const int64_t top_k, const int64_t axis) { + set_axis(axis); + set_keep_dims(keep_dims); + set_out_max_value(out_max_value); + set_top_k(top_k); +} + +void ArgMaxFusion::set_keep_dims(const bool keep_dims) { this->AddAttr(kKeepDims, MakeValue(keep_dims)); } +void ArgMaxFusion::set_out_max_value(const bool out_max_value) { + this->AddAttr(kOutMaxValue, MakeValue(out_max_value)); +} +void ArgMaxFusion::set_top_k(const int64_t top_k) { this->AddAttr(kTopK, MakeValue(top_k)); } + +bool ArgMaxFusion::get_keep_dims() const { return GetValue(GetAttr(kKeepDims)); } +bool ArgMaxFusion::get_out_max_value() const { return GetValue(GetAttr(kOutMaxValue)); } +int64_t ArgMaxFusion::get_top_k() const { return GetValue(GetAttr(kTopK)); } + +REGISTER_PRIMITIVE_C(kNameArgMaxFusion, ArgMaxFusion); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/fusion/arg_max_fusion.h b/mindspore/core/ops/fusion/arg_max_fusion.h new file mode 100644 index 00000000000..b730d759f91 --- /dev/null +++ b/mindspore/core/ops/fusion/arg_max_fusion.h @@ -0,0 +1,48 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_ARGMAX_FUSION_H_ +#define MINDSPORE_CORE_OPS_ARGMAX_FUSION_H_ +#include +#include + +#include "ops/arg_max.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameArgMaxFusion = "ArgMaxFusion"; +class ArgMaxFusion : public ArgMax { + public: + ArgMaxFusion() : ArgMax(kNameArgMaxFusion) { InitIOName({"x"}, {"output"}); } + ~ArgMaxFusion() = default; + MS_DECLARE_PARENT(ArgMaxFusion, ArgMax); + void Init(const bool keep_dims, const bool out_max_value, const int64_t top_k, const int64_t axis = -1); + + void set_keep_dims(const bool keep_dims); + void set_out_max_value(const bool out_max_value); + void set_top_k(const int64_t top_k); + + bool get_keep_dims() const; + bool get_out_max_value() const; + int64_t get_top_k() const; +}; +AbstractBasePtr ArgMaxFusionInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); +using PrimArgMaxFusion = std::shared_ptr; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_ARGMAX_FUSION_H_ diff --git a/mindspore/core/ops/fusion/arg_min_fusion.cc b/mindspore/core/ops/fusion/arg_min_fusion.cc new file mode 100644 index 00000000000..d72ebe8f31a --- /dev/null +++ b/mindspore/core/ops/fusion/arg_min_fusion.cc @@ -0,0 +1,45 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops/fusion/arg_min_fusion.h" + +namespace mindspore { +namespace ops { +void ArgMinFusion::Init(bool keep_dims, bool out_max_value, int64_t top_k, int64_t axis) { + set_axis(axis); + set_keep_dims(keep_dims); + set_out_max_value(out_max_value); + set_top_k(top_k); +} + +void ArgMinFusion::set_keep_dims(const bool keep_dims) { this->AddAttr(kKeepDims, MakeValue(keep_dims)); } +void ArgMinFusion::set_out_max_value(bool out_max_value) { AddAttr(kOutMaxValue, MakeValue(out_max_value)); } +void ArgMinFusion::set_top_k(int64_t top_k) { this->AddAttr(kTopK, MakeValue(top_k)); } + +bool ArgMinFusion::get_keep_dims() const { return GetValue(GetAttr(kKeepDims)); } +bool ArgMinFusion::get_out_max_value() const { + auto value_ptr = GetAttr(kOutMaxValue); + return GetValue(value_ptr); +} + +int64_t ArgMinFusion::get_top_k() const { + auto value_ptr = GetAttr(kTopK); + return GetValue(value_ptr); +} + +REGISTER_PRIMITIVE_C(kNameArgMinFusion, ArgMinFusion); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/fusion/arg_min_fusion.h b/mindspore/core/ops/fusion/arg_min_fusion.h new file mode 100644 index 00000000000..8e9cd28b17c --- /dev/null +++ b/mindspore/core/ops/fusion/arg_min_fusion.h @@ -0,0 +1,47 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_ARGMIN_FUSION_H_ +#define MINDSPORE_CORE_OPS_ARGMIN_FUSION_H_ +#include +#include + +#include "ops/arg_min.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameArgMinFusion = "ArgMinFusion"; +class ArgMinFusion : public ArgMin { + public: + ArgMinFusion() : ArgMin(kNameArgMinFusion) { InitIOName({"x"}, {"output"}); } + ~ArgMinFusion() = default; + MS_DECLARE_PARENT(ArgMinFusion, ArgMin); + void Init(bool keep_dims, bool out_max_value, int64_t top_k, int64_t axis = -1); + void set_keep_dims(const bool keep_dims); + void set_out_max_value(bool out_max_value); + void set_top_k(int64_t top_k); + + bool get_keep_dims() const; + bool get_out_max_value() const; + int64_t get_top_k() const; +}; +AbstractBasePtr ArgMinFusionInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); +using PrimArgMinFusion = std::shared_ptr; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_ARGMINTOPKMAXVALUE_H_ diff --git a/mindspore/core/ops/fusion/avg_pool_fusion.cc b/mindspore/core/ops/fusion/avg_pool_fusion.cc new file mode 100644 index 00000000000..887d3e9d7b7 --- /dev/null +++ b/mindspore/core/ops/fusion/avg_pool_fusion.cc @@ -0,0 +1,111 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops/fusion/avg_pool_fusion.h" + +namespace mindspore { +namespace ops { + +void AvgPoolFusion::Init(const std::vector &kernel_size, const std::vector &stride, + const PadMode &pad_mode, const Format &format, const std::vector &pad, + const RoundMode &round_mode, const bool global, const ActivationType activation_type) { + this->set_pad_mode(pad_mode); + this->set_kernel_size(kernel_size); + this->set_strides(stride); + this->set_format(format); + this->set_pad(pad); + this->set_round_mode(round_mode); + this->set_global(global); + this->set_activation_type(activation_type); +} + +void AvgPoolFusion::set_global(const bool global) { AddAttr(kGlobal, MakeValue(global)); } + +void AvgPoolFusion::set_activation_type(ActivationType activation_type) { + int64_t swi; + swi = activation_type; + this->AddAttr(kActivationType, MakeValue(swi)); +} + +bool AvgPoolFusion::get_global() const { + auto value_ptr = GetAttr(kGlobal); + return GetValue(value_ptr); +} + +ActivationType AvgPoolFusion::get_activation_type() const { + auto value_ptr = GetAttr(kActivationType); + return ActivationType(GetValue(value_ptr)); +} + +namespace { +abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(primitive); + auto pool_prim = primitive->cast(); + MS_EXCEPTION_IF_NULL(pool_prim); + auto op_name = pool_prim->name(); + auto in_shape = CheckAndConvertUtils::ConvertShapePtrToShape("x_shape", input_args[0]->GetShapeTrack(), op_name); + if (pool_prim->get_format() == NHWC) { + in_shape = {in_shape[0], in_shape[3], in_shape[1], in_shape[2]}; + } + CheckAndConvertUtils::CheckInteger("x_rank", in_shape.size(), kEqual, 4, op_name); + auto kernel_size = pool_prim->get_kernel_size(); + auto pad_mode = pool_prim->get_pad_mode(); + auto batch = in_shape[0]; + auto channel = in_shape[1]; + auto in_h = in_shape[2]; + auto in_w = in_shape[3]; + + auto strides = pool_prim->get_strides(); + auto kernel_h = kernel_size[2]; + auto kernel_w = kernel_size[3]; + auto stride_h = strides[2]; + auto stride_w = strides[3]; + int64_t out_h = -1; + int64_t out_w = -1; + if (pad_mode == VALID) { + out_h = ceil((in_h - (kernel_h - 1)) / stride_h); + out_w = ceil((in_w - (kernel_w - 1)) / stride_w); + } else if (pad_mode == SAME) { + out_h = ceil(in_h / stride_h); + out_w = ceil(in_w / stride_w); + } + std::vector out_shape = {batch, channel, out_h, out_w}; + if (pool_prim->get_format() == NHWC) { + out_shape = {batch, out_h, out_w, channel}; + } + if (std::any_of(out_shape.begin(), out_shape.end(), [](int64_t a) { return a <= 0; })) { + MS_LOG(EXCEPTION) << "Kernel size is not valid."; + } + return std::make_shared(out_shape); +} + +TypePtr InferType(const PrimitivePtr &prim, const std::vector &input_args) { + if (std::any_of(input_args.begin(), input_args.end(), [](AbstractBasePtr a) { return a == nullptr; })) { + MS_LOG(EXCEPTION) << "nullptr"; + } + return input_args[0]->BuildType(); +} +} // namespace + +AbstractBasePtr AvgPoolFusionInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + return std::make_shared(InferType(primitive, input_args), + InferShape(primitive, input_args)->shape()); +} +REGISTER_PRIMITIVE_EVAL_IMPL(AvgPoolFusion, prim::kPrimAvgPool, AvgPoolFusionInfer); +REGISTER_PRIMITIVE_C(kNameAvgPoolFusion, AvgPoolFusion); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/fusion/avg_pool_fusion.h b/mindspore/core/ops/fusion/avg_pool_fusion.h new file mode 100644 index 00000000000..af78fe2a808 --- /dev/null +++ b/mindspore/core/ops/fusion/avg_pool_fusion.h @@ -0,0 +1,50 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_AVG_POOL_FUSION_H_ +#define MINDSPORE_CORE_OPS_AVG_POOL_FUSION_H_ +#include +#include + +#include "ops/avg_pool.h" +#include "ops/op_utils.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameAvgPoolFusion = "AvgPoolFusion"; +class AvgPoolFusion : public AvgPool { + public: + AvgPoolFusion() : AvgPool(kNameAvgPoolFusion) { InitIOName({"x"}, {"output"}); } + ~AvgPoolFusion() = default; + MS_DECLARE_PARENT(AvgPoolFusion, AvgPool); + void Init(const std::vector &kernel_size = {1}, const std::vector &stride = {1}, + const PadMode &pad_mode = VALID, const Format &format = NCHW, + const std::vector &pad = {0, 0, 0, 0}, const RoundMode &round_mode = FLOOR, + const bool global = false, const ActivationType activation_type = NO_ACTIVATION); + void set_global(const bool global); + void set_activation_type(const ActivationType activation_type); + bool get_global() const; + ActivationType get_activation_type() const; +}; + +AbstractBasePtr AvgPoolFusionInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); +using PrimAvgPoolFusionPtr = std::shared_ptr; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_AVG_POOL_FUSION_H_ diff --git a/mindspore/core/ops/fusion/conv2d_backprop_filter_fusion.cc b/mindspore/core/ops/fusion/conv2d_backprop_filter_fusion.cc new file mode 100644 index 00000000000..f6f6e08ae57 --- /dev/null +++ b/mindspore/core/ops/fusion/conv2d_backprop_filter_fusion.cc @@ -0,0 +1,66 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + +#include "ops/fusion/conv2d_backprop_filter_fusion.h" +#include "ops/op_utils.h" + +namespace mindspore { +namespace ops { +void Conv2DBackpropFilterFusion::Init(const int64_t in_channel, const int64_t out_channel, + const std::vector &kernel_size, const PadMode &pad_mode, + const std::vector &pad_list, const int64_t mode, + const std::vector &stride, const std::vector &dilation, + const int64_t group, const Format &format, const ActivationType activation_type) { + set_out_channel(out_channel); + set_kernel_size(kernel_size); + set_pad_mode(pad_mode); + set_pad_list(pad_list); + set_mode(mode); + if (stride.size() == 4) { + set_stride({stride[2], stride[3]}); + } else { + set_stride(stride); + } + set_dilation(dilation); + set_group(group); + set_format(format); + set_activation_type(activation_type); +} + +void Conv2DBackpropFilterFusion::set_activation_type(const ActivationType activation_type) { + int64_t swi = activation_type; + this->AddAttr(kActivationType, MakeValue(swi)); +} + +void Conv2DBackpropFilterFusion::set_in_channel(const int64_t in_channel) { + this->AddAttr(kInChannel, MakeValue(in_channel)); +} + +ActivationType Conv2DBackpropFilterFusion::get_activation_type() const { + auto value_ptr = GetAttr(kActivationType); + return ActivationType(GetValue(value_ptr)); +} + +int64_t Conv2DBackpropFilterFusion::get_in_channel() const { + auto value_ptr = GetAttr(kInChannel); + return GetValue(value_ptr); +} + +REGISTER_PRIMITIVE_C(kNameConv2DBackpropFilterFusion, Conv2DBackpropFilterFusion); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/fusion/conv2d_backprop_filter_fusion.h b/mindspore/core/ops/fusion/conv2d_backprop_filter_fusion.h new file mode 100644 index 00000000000..552e428e4c2 --- /dev/null +++ b/mindspore/core/ops/fusion/conv2d_backprop_filter_fusion.h @@ -0,0 +1,50 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_CONV2D_BACKPROP_FILTER_FUSION_H_ +#define MINDSPORE_CORE_OPS_CONV2D_BACKPROP_FILTER_FUSION_H_ +#include +#include + +#include "ops/grad/conv2d_backprop_filter.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameConv2DBackpropFilterFusion = "Conv2DBackpropFilterFusion"; +class Conv2DBackpropFilterFusion : public Conv2DBackpropFilter { + public: + Conv2DBackpropFilterFusion() : Conv2DBackpropFilter(kNameConv2DBackpropFilterFusion) { + InitIOName({"out_backprop", "input", "filter_sizes"}, {"output"}); + } + ~Conv2DBackpropFilterFusion() = default; + MS_DECLARE_PARENT(Conv2DBackpropFilterFusion, Conv2DBackpropFilter); + void Init(const int64_t in_channel, const int64_t out_channel, const std::vector &kernel_size, + const PadMode &pad_mode = VALID, const std::vector &pad_list = {0, 0, 0, 0}, + const int64_t mode = 1, const std::vector &stride = {1, 1}, + const std::vector &dilation = {1, 1, 1, 1}, const int64_t group = 1, const Format &format = NCHW, + const ActivationType activation_type = NO_ACTIVATION); + void set_activation_type(const ActivationType activation_type); + void set_in_channel(const int64_t in_channel); + + ActivationType get_activation_type() const; + int64_t get_in_channel() const; +}; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_CONV2D_BACKPROP_FILTER_FUSION_H_ diff --git a/mindspore/core/ops/fusion/conv2d_backprop_input_fusion.cc b/mindspore/core/ops/fusion/conv2d_backprop_input_fusion.cc new file mode 100644 index 00000000000..088c73dc2dc --- /dev/null +++ b/mindspore/core/ops/fusion/conv2d_backprop_input_fusion.cc @@ -0,0 +1,62 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include + +#include "ops/fusion/conv2d_backprop_input_fusion.h" +#include "ops/op_utils.h" + +namespace mindspore { +namespace ops { +void Conv2DBackpropInputFusion::Init(int64_t in_channel, int64_t out_channel, const std::vector &kernel_size, + int64_t mode, const PadMode &pad_mode, const std::vector &pad, + const std::vector &stride, const std::vector &dilation, + int64_t group, const Format &format, const std::vector &pad_list, + const ActivationType &activation_type) { + set_in_channel(in_channel); + set_out_channel(out_channel); + set_kernel_size(kernel_size); + set_mode(mode); + set_pad_mode(pad_mode); + set_pad(pad); + set_stride(stride); + set_dilation(dilation); + set_group(group); + set_format(format); + set_pad_list(pad_list); + this->set_activation_type(activation_type); +} + +void Conv2DBackpropInputFusion::set_in_channel(int64_t in_channel) { this->AddAttr(kInChannel, MakeValue(in_channel)); } + +void Conv2DBackpropInputFusion::set_activation_type(const ActivationType &activation_type) { + int64_t swi; + swi = activation_type; + this->AddAttr(kActivationType, MakeValue(swi)); +} +int64_t Conv2DBackpropInputFusion::get_in_channel() const { + auto value_ptr = GetAttr(kInChannel); + return GetValue(value_ptr); +} + +ActivationType Conv2DBackpropInputFusion::get_activation_type() const { + auto value_ptr = GetAttr(kActivationType); + return ActivationType(GetValue(value_ptr)); +} +REGISTER_PRIMITIVE_C(kNameConv2DBackpropInputFusion, Conv2DBackpropInputFusion); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/fusion/conv2d_backprop_input_fusion.h b/mindspore/core/ops/fusion/conv2d_backprop_input_fusion.h new file mode 100644 index 00000000000..a2e175d979a --- /dev/null +++ b/mindspore/core/ops/fusion/conv2d_backprop_input_fusion.h @@ -0,0 +1,44 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_CONV2D_BACKPROP_INPUT_FUSION_H_ +#define MINDSPORE_CORE_OPS_CONV2D_BACKPROP_INPUT_FUSION_H_ +#include +#include "ops/grad/conv2d_backprop_input.h" +#include "ops/op_utils.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameConv2DBackpropInputFusion = "Conv2DBackpropInputFusion"; +class Conv2DBackpropInputFusion : public Conv2DBackpropInput { + public: + Conv2DBackpropInputFusion() : Conv2DBackpropInput(kNameConv2DBackpropInputFusion) {} + MS_DECLARE_PARENT(Conv2DBackpropInputFusion, Conv2DBackpropInput); + void Init(int64_t in_channel, int64_t out_channel, const std::vector &kernel_size, int64_t mode = 1, + const PadMode &pad_mode = VALID, const std::vector &pad = {0, 0, 0, 0}, + const std::vector &stride = {1, 1, 1, 1}, const std::vector &dilation = {1, 1, 1, 1}, + int64_t group = 1, const Format &format = NCHW, const std::vector &pad_list = {0, 0, 0, 0}, + const ActivationType &activation_type = NO_ACTIVATION); + void set_in_channel(int64_t in_channel); + void set_activation_type(const ActivationType &activation_type); + int64_t get_in_channel() const; + ActivationType get_activation_type() const; +}; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_CONV2D_BACKPROP_INPUT_FUSION_H_ diff --git a/mindspore/core/ops/fusion/conv2d_fusion.cc b/mindspore/core/ops/fusion/conv2d_fusion.cc new file mode 100644 index 00000000000..b516c8734da --- /dev/null +++ b/mindspore/core/ops/fusion/conv2d_fusion.cc @@ -0,0 +1,62 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include + +#include "ops/fusion/conv2d_fusion.h" +#include "ops/op_utils.h" + +namespace mindspore { +namespace ops { +void Conv2DFusion::Init(int64_t in_channel, int64_t out_channel, const std::vector &kernel_size, int64_t mode, + const PadMode &pad_mode, const std::vector &pad, const std::vector &stride, + const std::vector &dilation, int64_t group, const Format &format, + const std::vector &pad_list, const ActivationType &activation_type) { + this->set_in_channel(in_channel); + this->set_out_channel(out_channel); + this->set_kernel_size(kernel_size); + this->set_mode(mode); + this->set_pad_mode(pad_mode); + this->set_pad(pad); + this->set_stride(stride); + this->set_dilation(dilation); + this->set_group(group); + this->set_format(format); + this->set_pad_list(pad_list); + this->set_activation_type(activation_type); +} +void Conv2DFusion::set_in_channel(const int64_t in_channel) { this->AddAttr(kInChannel, MakeValue(in_channel)); } +void Conv2DFusion::set_pad_list(const std::vector &pad_list) { this->AddAttr(kPadList, MakeValue(pad_list)); } +void Conv2DFusion::set_activation_type(const ActivationType &activation_type) { + int64_t swi; + swi = activation_type; + this->AddAttr(kActivationType, MakeValue(swi)); +} +int64_t Conv2DFusion::get_in_channel() const { + auto value_ptr = GetAttr(kInChannel); + return GetValue(value_ptr); +} +std::vector Conv2DFusion::get_pad_list() const { + auto value_ptr = GetAttr(kPadList); + return GetValue>(value_ptr); +} +ActivationType Conv2DFusion::get_activation_type() const { + auto value_ptr = GetAttr(kActivationType); + return ActivationType(GetValue(value_ptr)); +} +REGISTER_PRIMITIVE_C(kNameConv2DFusion, Conv2DFusion); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/fusion/conv2d_fusion.h b/mindspore/core/ops/fusion/conv2d_fusion.h new file mode 100644 index 00000000000..1a267e967e4 --- /dev/null +++ b/mindspore/core/ops/fusion/conv2d_fusion.h @@ -0,0 +1,47 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_CONV2D_FUSION_H_ +#define MINDSPORE_CORE_OPS_CONV2D_FUSION_H_ +#include + +#include "ops/conv2d.h" +#include "ops/op_utils.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameConv2DFusion = "Conv2DFusion"; +class Conv2DFusion : public Conv2D { + public: + Conv2DFusion() : Conv2D(kNameConv2DFusion) {} + MS_DECLARE_PARENT(Conv2DFusion, Conv2D); + void Init(int64_t in_channel, int64_t out_channel, const std::vector &kernel_size, int64_t mode = 1, + const PadMode &pad_mode = VALID, const std::vector &pad = {0, 0, 0, 0}, + const std::vector &stride = {1, 1, 1, 1}, const std::vector &dilation = {1, 1, 1, 1}, + int64_t group = 1, const Format &format = NCHW, const std::vector &pad_list = {0, 0, 0, 0}, + const ActivationType &activation_type = NO_ACTIVATION); + void set_in_channel(const int64_t in_channel); + void set_pad_list(const std::vector &pad_list); + void set_activation_type(const ActivationType &activation_type); + int64_t get_in_channel() const; + std::vector get_pad_list() const; + ActivationType get_activation_type() const; +}; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_CONV2D_FUSION_H_ diff --git a/mindspore/core/ops/fusion/conv2d_transpose_fusion.cc b/mindspore/core/ops/fusion/conv2d_transpose_fusion.cc new file mode 100644 index 00000000000..db7dd323814 --- /dev/null +++ b/mindspore/core/ops/fusion/conv2d_transpose_fusion.cc @@ -0,0 +1,69 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops/fusion/conv2d_transpose_fusion.h" +#include "ops/op_utils.h" + +namespace mindspore { +namespace ops { +void Conv2dTransposeFusion::Init(int64_t in_channel, int64_t out_channel, const std::vector &kernel_size, + int64_t mode, const PadMode &pad_mode, const std::vector &pad, + const std::vector &stride, const std::vector &dilation, + int64_t group, const Format &format, const std::vector &pad_list, + const ActivationType activation_type) { + set_in_channel(in_channel); + set_out_channel(out_channel); + set_kernel_size(kernel_size); + set_mode(mode); + set_pad_mode(pad_mode); + set_pad(pad); + set_stride(stride); + set_dilation(dilation); + set_group(group); + set_format(format); + set_pad_list(pad_list); + set_activation_type(activation_type); +} + +void Conv2dTransposeFusion::set_kernel_size(const std::vector &kernel_size) { + CheckAndConvertUtils::CheckInteger(kKernelSize, kernel_size.size(), kEqual, 2, name()); + for (int64_t item : kernel_size) { + CheckAndConvertUtils::CheckInteger(kKernelSize, item, kGreaterEqual, 1, name()); + } + AddAttr(kKernelSize, MakeValue(kernel_size)); +} + +void Conv2dTransposeFusion::set_dilation(const std::vector &dilation) { + CheckAndConvertUtils::CheckInteger(kDilation, dilation.size(), kEqual, 2, name()); + for (int64_t item : dilation) { + CheckAndConvertUtils::CheckInteger(kDilation, item, kGreaterEqual, 1, name()); + } + AddAttr(kDilation, MakeValue(dilation)); +} + +void Conv2dTransposeFusion::set_activation_type(const ActivationType activation_type) { + int64_t swi = activation_type; + this->AddAttr(kActivationType, MakeValue(swi)); +} + +ActivationType Conv2dTransposeFusion::get_activation_type() const { + auto value_ptr = GetAttr(kActivationType); + return ActivationType(GetValue(value_ptr)); +} + +REGISTER_PRIMITIVE_C(kNameConv2dTransposeFusion, Conv2dTransposeFusion); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/fusion/conv2d_transpose_fusion.h b/mindspore/core/ops/fusion/conv2d_transpose_fusion.h new file mode 100644 index 00000000000..63882124253 --- /dev/null +++ b/mindspore/core/ops/fusion/conv2d_transpose_fusion.h @@ -0,0 +1,48 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_CONV2D_TRANSPOSE_FUSION_H_ +#define MINDSPORE_CORE_OPS_CONV2D_TRANSPOSE_FUSION_H_ +#include + +#include "ops/conv2d_transpose.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameConv2dTransposeFusion = "Conv2dTransposeFusion"; +class Conv2dTransposeFusion : public Conv2dTranspose { + public: + Conv2dTransposeFusion() : Conv2dTranspose(kNameConv2dTransposeFusion) { + InitIOName({"out_backprop", "filter", "input_sizes"}, {"output"}); + } + ~Conv2dTransposeFusion() = default; + MS_DECLARE_PARENT(Conv2dTransposeFusion, Conv2dTranspose); + void Init(int64_t in_channel, int64_t out_channel, const std::vector &kernel_size, int64_t mode = 1, + const PadMode &pad_mode = VALID, const std::vector &pad = {0, 0, 0, 0}, + const std::vector &stride = {1, 1}, const std::vector &dilation = {1, 1}, + int64_t group = 1, const Format &format = NCHW, const std::vector &pad_list = {0, 0, 0, 0}, + const ActivationType activation_type = NO_ACTIVATION); + void set_kernel_size(const std::vector &kernel_size); + void set_dilation(const std::vector &dilation); + void set_activation_type(const ActivationType activation_type); + ActivationType get_activation_type() const; +}; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_CONV2D_TRANSPOSE_FUSION_H_ diff --git a/mindspore/core/ops/fusion/depthwise_conv2d_fusion.cc b/mindspore/core/ops/fusion/depthwise_conv2d_fusion.cc new file mode 100644 index 00000000000..52b3d56ff72 --- /dev/null +++ b/mindspore/core/ops/fusion/depthwise_conv2d_fusion.cc @@ -0,0 +1,75 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops/fusion/depthwise_conv2d_fusion.h" +#include +#include "ops/op_utils.h" + +namespace mindspore { +namespace ops { +void DepthWiseConv2DFusion::Init(const int64_t channel_multiplier, const std::vector &kernel_size, + const int64_t mode, const PadMode &pad_mode, const std::vector &pad, + const std::vector &stride, const std::vector &dilation, + const int64_t group, const ActivationType &activation_type) { + auto prim_name = this->name(); + this->set_format(NCHW); + this->AddAttr("offset_a", MakeValue(0)); + this->set_mode(CheckAndConvertUtils::CheckInteger("mode", mode, kEqual, 3, prim_name)); + + this->set_kernel_size(CheckAndConvertUtils::CheckPositiveVector(kKernelSize, kernel_size, prim_name)); + auto strides = CheckAndConvertUtils::CheckPositiveVector(kStride, stride, this->name(), false, false); + if (strides[0] != strides[1]) { + MS_EXCEPTION(ValueError) << "The height and width of stride should be equal, but got height " << strides[0] + << ", width " << strides[1]; + } + this->set_stride(strides); + auto dilations = CheckAndConvertUtils::CheckPositiveVector(kDilation, dilation, this->name(), false, false); + if (dilations[0] != dilations[1]) { + MS_EXCEPTION(ValueError) << "The height and width of dilation should be equal, but got height " << dilations[0] + << ", width " << dilations[1]; + } + this->set_dilation(dilations); + this->set_pad_mode(pad_mode); + + CheckAndConvertUtils::CheckInteger("pad_size", pad.size(), kEqual, 4, prim_name); + if (pad_mode == PAD) { + for (auto item : pad) { + CheckAndConvertUtils::Check("pad_item", item, kGreaterEqual, "zeros_list", 0, prim_name); + } + } else { + CheckAndConvertUtils::Check(kPad, pad, kEqual, "zeros_list", {0, 0, 0, 0}, prim_name); + } + this->set_pad(CheckAndConvertUtils::CheckPositiveVector(kPad, pad, this->name(), true, true)); + + this->set_out_channel( + CheckAndConvertUtils::CheckInteger("channel_multiplier", channel_multiplier, kGreaterThan, 0, prim_name)); + this->set_group(CheckAndConvertUtils::CheckInteger("group", group, kGreaterThan, 0, prim_name)); + this->set_activation_type(activation_type); +} + +void DepthWiseConv2DFusion::set_activation_type(const ActivationType &activation_type) { + int64_t swi; + swi = activation_type; + this->AddAttr(kActivationType, MakeValue(swi)); +} + +ActivationType DepthWiseConv2DFusion::get_activation_type() const { + auto value_ptr = GetAttr(kActivationType); + return ActivationType(GetValue(value_ptr)); +} +REGISTER_PRIMITIVE_C(kNameDepthWiseConv2DFusion, DepthWiseConv2DFusion); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/fusion/depthwise_conv2d_fusion.h b/mindspore/core/ops/fusion/depthwise_conv2d_fusion.h new file mode 100644 index 00000000000..43e3a8be4fb --- /dev/null +++ b/mindspore/core/ops/fusion/depthwise_conv2d_fusion.h @@ -0,0 +1,41 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_DEPTHWISE_CONV2D_FUSION_H_ +#define MINDSPORE_CORE_OPS_DEPTHWISE_CONV2D_FUSION_H_ +#include + +#include "ops/depthwise_conv2d.h" +#include "ops/op_utils.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameDepthWiseConv2DFusion = "DepthWiseConv2DFusion"; +class DepthWiseConv2DFusion : public DepthWiseConv2D { + public: + MS_DECLARE_PARENT(DepthWiseConv2DFusion, DepthWiseConv2D); + void Init(const int64_t out_channel, const std::vector &kernel_size, const int64_t mode = 1, + const PadMode &pad_mode = VALID, const std::vector &pad = {0, 0, 0, 0}, + const std::vector &stride = {1, 1, 1, 1}, const std::vector &dilation = {1, 1, 1, 1}, + const int64_t group = 1, const ActivationType &activation_type = NO_ACTIVATION); + void set_activation_type(const ActivationType &activation_type); + ActivationType get_activation_type() const; +}; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_DEPTHWISE_CONV2D_FUSION_H_ diff --git a/mindspore/core/ops/fusion/div_fusion.cc b/mindspore/core/ops/fusion/div_fusion.cc new file mode 100644 index 00000000000..4fc79cd025c --- /dev/null +++ b/mindspore/core/ops/fusion/div_fusion.cc @@ -0,0 +1,37 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops/fusion/div_fusion.h" +#include +#include "ops/op_utils.h" + +namespace mindspore { +namespace ops { +void DivFusion::Init(const ActivationType &activation_type) { this->set_activation_type(activation_type); } + +void DivFusion::set_activation_type(const ActivationType &activation_type) { + int64_t swi; + swi = activation_type; + this->AddAttr(kActivationType, MakeValue(swi)); +} + +ActivationType DivFusion::get_activation_type() const { + auto value_ptr = GetAttr(kActivationType); + return ActivationType(GetValue(value_ptr)); +} +REGISTER_PRIMITIVE_C(kNameDivFusion, DivFusion); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/fusion/div_fusion.h b/mindspore/core/ops/fusion/div_fusion.h new file mode 100644 index 00000000000..dec9dde7d65 --- /dev/null +++ b/mindspore/core/ops/fusion/div_fusion.h @@ -0,0 +1,37 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_DIV_FUSION_H_ +#define MINDSPORE_CORE_OPS_DIV_FUSION_H_ +#include "ops/div.h" +#include "ops/op_utils.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameDivFusion = "DivFusion"; +class DivFusion : public Div { + public: + DivFusion() : Div(kNameDivFusion) {} + MS_DECLARE_PARENT(DivFusion, Div); + void Init(const ActivationType &activation_type = NO_ACTIVATION); + void set_activation_type(const ActivationType &activation_type); + ActivationType get_activation_type() const; +}; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_DIV_FUSION_H_ diff --git a/mindspore/core/c_ops/dropout.cc b/mindspore/core/ops/fusion/embedding_lookup_fusion.cc similarity index 58% rename from mindspore/core/c_ops/dropout.cc rename to mindspore/core/ops/fusion/embedding_lookup_fusion.cc index a149128acf6..934321bdb02 100644 --- a/mindspore/core/c_ops/dropout.cc +++ b/mindspore/core/ops/fusion/embedding_lookup_fusion.cc @@ -14,19 +14,18 @@ * limitations under the License. */ -#include "c_ops/dropout.h" -#include "c_ops/op_utils.h" -#include "utils/check_convert_utils.h" +#include "ops/fusion/embedding_lookup_fusion.h" +#include +#include "ops/op_utils.h" namespace mindspore { -void Dropout::Init(float keep_prob) { this->set_keep_prob(keep_prob); } -void Dropout::set_keep_prob(float keep_prob) { - CheckAndConvertUtils::CheckInRange(kKeepProb, keep_prob, kIncludeRight, {0.0, 1.0}, this->name()); - this->AddAttr(kKeepProb, MakeValue(keep_prob)); -} -float Dropout::get_keep_prob() { - auto value_ptr = this->GetAttr(kKeepProb); +namespace ops { +void EmbeddingLookupFusion::set_max_norm(const float max_norm) { this->AddAttr(kMaxNorm, MakeValue(max_norm)); } +float EmbeddingLookupFusion::get_max_norm() const { + auto value_ptr = GetAttr(kMaxNorm); return GetValue(value_ptr); } -REGISTER_PRIMITIVE_C(kNameDropout, Dropout); +void EmbeddingLookupFusion::Init(const float max_norm) { this->set_max_norm(max_norm); } +REGISTER_PRIMITIVE_C(kNameEmbeddingLookupFusion, EmbeddingLookupFusion); +} // namespace ops } // namespace mindspore diff --git a/mindspore/core/ops/fusion/embedding_lookup_fusion.h b/mindspore/core/ops/fusion/embedding_lookup_fusion.h new file mode 100644 index 00000000000..478da8413b7 --- /dev/null +++ b/mindspore/core/ops/fusion/embedding_lookup_fusion.h @@ -0,0 +1,44 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_EMBEDDING_LOOKUP_FUSION_H_ +#define MINDSPORE_CORE_OPS_EMBEDDING_LOOKUP_FUSION_H_ +#include +#include +#include +#include +#include "ops/primitive_c.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameEmbeddingLookupFusion = "EmbeddingLookupFusion"; +class EmbeddingLookupFusion : public PrimitiveC { + public: + EmbeddingLookupFusion() : PrimitiveC(kNameEmbeddingLookupFusion) { + InitIOName({"params", "indices", "offset"}, {"output"}); + } + ~EmbeddingLookupFusion() = default; + MS_DECLARE_PARENT(EmbeddingLookupFusion, PrimitiveC); + void Init(const float max_norm = 0.0); + void set_max_norm(const float max_norm); + float get_max_norm() const; +}; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_EMBEDDING_LOOKUP_FUSION_H_ diff --git a/mindspore/core/ops/fusion/exp_fusion.cc b/mindspore/core/ops/fusion/exp_fusion.cc new file mode 100644 index 00000000000..02d65ff43d4 --- /dev/null +++ b/mindspore/core/ops/fusion/exp_fusion.cc @@ -0,0 +1,53 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops/fusion/exp_fusion.h" +#include +#include +#include +#include +#include "ops/op_utils.h" + +namespace mindspore { +namespace ops { +void ExpFusion::Init(const float base, const float scale, const float shift) { + this->set_base(base); + this->set_scale(scale); + this->set_shift(shift); +} + +void ExpFusion::set_base(const float base) { this->AddAttr(kBase, MakeValue(base)); } + +void ExpFusion::set_scale(const float scale) { this->AddAttr(kScale, MakeValue(scale)); } + +void ExpFusion::set_shift(const float shift) { this->AddAttr(kShift, MakeValue(shift)); } + +float ExpFusion::get_base() const { + auto value_ptr = GetAttr(kBase); + return GetValue(value_ptr); +} +float ExpFusion::get_scale() const { + auto value_ptr = GetAttr(kScale); + return GetValue(value_ptr); +} +float ExpFusion::get_shift() const { + auto value_ptr = GetAttr(kShift); + return GetValue(value_ptr); +} + +REGISTER_PRIMITIVE_C(kNameExpFusion, ExpFusion); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/fusion/exp_fusion.h b/mindspore/core/ops/fusion/exp_fusion.h new file mode 100644 index 00000000000..c05683c5741 --- /dev/null +++ b/mindspore/core/ops/fusion/exp_fusion.h @@ -0,0 +1,42 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_EXP_FUSION_H_ +#define MINDSPORE_CORE_OPS_EXP_FUSION_H_ +#include "ops/exp.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameExpFusion = "ExpFusion"; +class ExpFusion : public Exp { + public: + ExpFusion() : Exp(kNameExpFusion) { InitIOName({"x"}, {"y"}); } + ~ExpFusion() = default; + MS_DECLARE_PARENT(ExpFusion, Exp); + void Init(const float base = -1.0, const float scale = 1.0, const float shift = 0.0); + void set_base(const float base); + void set_scale(const float scale); + void set_shift(const float shift); + float get_base() const; + float get_scale() const; + float get_shift() const; +}; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_EXP_FUSION_H_ diff --git a/mindspore/core/ops/fusion/full_connection.cc b/mindspore/core/ops/fusion/full_connection.cc new file mode 100644 index 00000000000..b51213a0cd6 --- /dev/null +++ b/mindspore/core/ops/fusion/full_connection.cc @@ -0,0 +1,116 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops/fusion/full_connection.h" +#include +#include "ops/op_utils.h" + +namespace mindspore { +namespace ops { +void FullConnection::set_has_bias(const bool has_bias) { this->AddAttr(kHasBias, MakeValue(has_bias)); } +bool FullConnection::get_has_bias() const { + auto value_ptr = GetAttr(kHasBias); + return GetValue(value_ptr); +} + +void FullConnection::set_axis(const int64_t axis) { this->AddAttr(kAxis, MakeValue(axis)); } +int64_t FullConnection::get_axis() const { + auto value_ptr = GetAttr(kAxis); + return GetValue(value_ptr); +} + +void FullConnection::set_use_axis(const bool use_axis) { this->AddAttr(kUseAxis, MakeValue(use_axis)); } +bool FullConnection::get_use_axis() const { + auto value_ptr = GetAttr(kUseAxis); + return GetValue(value_ptr); +} + +void FullConnection::set_activation_type(const ActivationType &activation_type) { + int64_t swi; + swi = activation_type; + this->AddAttr(kActivationType, MakeValue(swi)); +} +ActivationType FullConnection::get_activation_type() const { + auto value_ptr = GetAttr(kActivationType); + return ActivationType(GetValue(value_ptr)); +} +void FullConnection::Init(const bool has_bias, const int64_t axis, const bool use_axis, + const ActivationType &activation_type) { + this->set_has_bias(has_bias); + this->set_axis(axis); + this->set_use_axis(use_axis); + this->set_activation_type(activation_type); +} +AbstractBasePtr FullConnectionInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(primitive); + auto full_prim = primitive->cast(); + MS_EXCEPTION_IF_NULL(full_prim); + auto prim_name = full_prim->name(); + MS_EXCEPTION_IF_NULL(input_args[0]); + MS_EXCEPTION_IF_NULL(input_args[1]); + auto input0 = input_args[0]; + auto input1 = input_args[1]; + auto input0_shape = CheckAndConvertUtils::ConvertShapePtrToShape("input0_shape", input0->BuildShape(), prim_name); + auto input1_shape = CheckAndConvertUtils::ConvertShapePtrToShape("input1_shape", input1->BuildShape(), prim_name); + auto prim_axis = full_prim->get_axis(); + if (full_prim->get_has_bias()) { + CheckAndConvertUtils::CheckInteger("input_args.size()", input_args.size(), kEqual, 3, prim_name); + } else { + CheckAndConvertUtils::CheckInteger("input_args.size()", input_args.size(), kEqual, 2, prim_name); + } + if (full_prim->get_use_axis() && (prim_axis < 1 || prim_axis > (int64_t)input0_shape.size())) { + MS_EXCEPTION(ValueError) << "Full Connection axis invalid"; + } + int64_t new_k = 1; + if (full_prim->get_use_axis()) { + for (size_t t = prim_axis; t < input0_shape.size(); t++) { + new_k *= input0_shape[t]; + } + if (new_k != input1_shape[1]) { + MS_EXCEPTION(ValueError) << "Input1 size invalid"; + } + } else { + new_k = input1_shape[1]; + } + if (full_prim->get_has_bias()) { + auto input2_shape = + CheckAndConvertUtils::ConvertShapePtrToShape("input2_shape", input_args[2]->BuildShape(), prim_name); + if (input2_shape[0] != input1_shape[0]) { + MS_EXCEPTION(ValueError) << "Bias size invalid"; + } + } + std::vector out_shape = {(int64_t)input0_shape.size()}; + if (full_prim->get_use_axis()) { + out_shape.resize(prim_axis + 1); + out_shape[prim_axis] = input1_shape[0]; + } else { + int64_t total = 1; + for (size_t i = 0; i < input0_shape.size(); i++) { + total *= input0_shape[i]; + } + out_shape.resize(2); + auto batch_size = total / new_k; + out_shape[0] = batch_size; + out_shape[1] = input1_shape[0]; + } + auto input0_type = input_args[0]->BuildType()->cast()->element(); + return std::make_shared(input0_type, out_shape); +} +REGISTER_PRIMITIVE_EVAL_IMPL(FullConnection, prim::kPrimFullConnection, FullConnectionInfer); +REGISTER_PRIMITIVE_C(kNameFullConnection, FullConnection); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/fusion/full_connection.h b/mindspore/core/ops/fusion/full_connection.h new file mode 100644 index 00000000000..18e22bae33f --- /dev/null +++ b/mindspore/core/ops/fusion/full_connection.h @@ -0,0 +1,51 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_FULL_CONNECTION_FUSION_H_ +#define MINDSPORE_CORE_OPS_FULL_CONNECTION_FUSION_H_ +#include +#include +#include +#include +#include "ops/primitive_c.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameFullConnection = "FullConnection"; +class FullConnection : public PrimitiveC { + public: + FullConnection() : PrimitiveC(kNameFullConnection) { InitIOName({"x1", "x2", "b"}, {"output"}); } + ~FullConnection() = default; + MS_DECLARE_PARENT(FullConnection, PrimitiveC); + void Init(const bool has_bias, const int64_t axis, const bool use_axis, const ActivationType &activation_type); + void set_has_bias(const bool has_bias); + void set_axis(const int64_t axis); + void set_use_axis(const bool use_axis); + void set_activation_type(const ActivationType &activation_type); + bool get_has_bias() const; + int64_t get_axis() const; + bool get_use_axis() const; + ActivationType get_activation_type() const; +}; +AbstractBasePtr FullConnectionInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); +using PrimFullConnectionPtr = std::shared_ptr; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_FULL_CONNECTION_FUSION_H_ diff --git a/mindspore/core/ops/fusion/l2_normalize_fusion.cc b/mindspore/core/ops/fusion/l2_normalize_fusion.cc new file mode 100644 index 00000000000..3c8198fb442 --- /dev/null +++ b/mindspore/core/ops/fusion/l2_normalize_fusion.cc @@ -0,0 +1,42 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops/fusion/l2_normalize_fusion.h" +#include +#include "ops/op_utils.h" + +namespace mindspore { +namespace ops { +void L2NormalizeFusion::Init(const std::vector &axis, const float epsilon, + const ActivationType &activation_type) { + this->set_axis(axis); + this->set_epsilon(epsilon); + this->set_activation_type(activation_type); +} + +void L2NormalizeFusion::set_activation_type(const ActivationType &activation_type) { + int64_t swi; + swi = activation_type; + this->AddAttr(kActivationType, MakeValue(swi)); +} + +ActivationType L2NormalizeFusion::get_activation_type() const { + auto value_ptr = GetAttr(kActivationType); + return ActivationType(GetValue(value_ptr)); +} +REGISTER_PRIMITIVE_C(kNameL2NormalizeFusion, L2NormalizeFusion); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/fusion/l2_normalize_fusion.h b/mindspore/core/ops/fusion/l2_normalize_fusion.h new file mode 100644 index 00000000000..56dea08c94b --- /dev/null +++ b/mindspore/core/ops/fusion/l2_normalize_fusion.h @@ -0,0 +1,40 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_L2_NORMALIZE_FUSION_H_ +#define MINDSPORE_CORE_OPS_L2_NORMALIZE_FUSION_H_ +#include + +#include "ops/l2_normalize.h" +#include "ops/op_utils.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameL2NormalizeFusion = "L2NormalizeFusion"; +class L2NormalizeFusion : public L2Normalize { + public: + L2NormalizeFusion() : L2Normalize(kNameL2NormalizeFusion) {} + MS_DECLARE_PARENT(L2NormalizeFusion, L2Normalize); + void Init(const std::vector &axis, const float epsilon = 1e-4, + const ActivationType &activation_type = NO_ACTIVATION); + void set_activation_type(const ActivationType &activation_type); + ActivationType get_activation_type() const; +}; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_L2_NORMALIZE_FUSION_H_ diff --git a/mindspore/core/ops/fusion/layer_norm_fusion.cc b/mindspore/core/ops/fusion/layer_norm_fusion.cc new file mode 100644 index 00000000000..5d9d3a2f28c --- /dev/null +++ b/mindspore/core/ops/fusion/layer_norm_fusion.cc @@ -0,0 +1,38 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops/fusion/layer_norm_fusion.h" + +namespace mindspore { +namespace ops { +void LayerNormFusion::Init(const int64_t begin_norm_axis, const int64_t begin_params_axis, const float epsilon, + const bool elementwise_affine) { + this->set_begin_norm_axis(begin_norm_axis); + this->set_begin_params_axis(begin_params_axis); + this->set_epsilon(epsilon); + this->set_elementwise_affine(elementwise_affine); +} + +void LayerNormFusion::set_elementwise_affine(const bool elementwise_affine) { + AddAttr(kElementwiseAffine, MakeValue(elementwise_affine)); +} + +bool LayerNormFusion::get_elementwise_affine() const { + auto value_ptr = GetAttr(kElementwiseAffine); + return GetValue(value_ptr); +} +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/fusion/layer_norm_fusion.h b/mindspore/core/ops/fusion/layer_norm_fusion.h new file mode 100644 index 00000000000..7e230dbe15f --- /dev/null +++ b/mindspore/core/ops/fusion/layer_norm_fusion.h @@ -0,0 +1,46 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_LAYER_NORM_FUSION_H_ +#define MINDSPORE_CORE_OPS_LAYER_NORM_FUSION_H_ +#include +#include + +#include "ops/layer_norm.h" +#include "ops/op_utils.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameLayerNormFusion = "LayerNormFusion"; +class LayerNormFusion : public LayerNorm { + public: + LayerNormFusion() : LayerNorm(kNameLayerNormFusion) {} + ~LayerNormFusion() = default; + MS_DECLARE_PARENT(LayerNormFusion, LayerNorm); + void Init(const int64_t begin_norm_axis = 1, const int64_t begin_params_axis = 1, const float epsilon = 1e-7, + const bool elementwise_affine = false); + void set_elementwise_affine(const bool elementwise_affine); + bool get_elementwise_affine() const; +}; + +AbstractBasePtr LayerNormFusionInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); +using PrimLayerNormFusionPtr = std::shared_ptr; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_LAYER_NORM_FUSION_H_ diff --git a/mindspore/core/ops/fusion/max_pool_fusion.cc b/mindspore/core/ops/fusion/max_pool_fusion.cc new file mode 100644 index 00000000000..c8e687342cc --- /dev/null +++ b/mindspore/core/ops/fusion/max_pool_fusion.cc @@ -0,0 +1,109 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops/fusion/max_pool_fusion.h" + +namespace mindspore { +namespace ops { +void MaxPoolFusion::Init(const std::vector &kernel_size, const std::vector &stride, + const PadMode &pad_mode, const Format &format, const std::vector &pad, + const RoundMode &round_mode, const bool global, const ActivationType activation_type) { + this->set_pad_mode(pad_mode); + this->set_kernel_size(kernel_size); + this->set_strides(stride); + this->set_format(format); + this->set_pad(pad); + this->set_round_mode(round_mode); + this->set_global(global); + this->set_activation_type(activation_type); +} + +void MaxPoolFusion::set_global(const bool global) { AddAttr(kGlobal, MakeValue(global)); } + +void MaxPoolFusion::set_activation_type(ActivationType activation_type) { + int64_t swi; + swi = activation_type; + this->AddAttr(kActivationType, MakeValue(swi)); +} + +bool MaxPoolFusion::get_global() const { + auto value_ptr = GetAttr(kGlobal); + return GetValue(value_ptr); +} + +ActivationType MaxPoolFusion::get_activation_type() const { + auto value_ptr = GetAttr(kActivationType); + return ActivationType(GetValue(value_ptr)); +} + +namespace { +abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(primitive); + auto pool_prim = primitive->cast(); + MS_EXCEPTION_IF_NULL(pool_prim); + auto op_name = pool_prim->name(); + auto in_shape = CheckAndConvertUtils::ConvertShapePtrToShape("x_shape", input_args[0]->GetShapeTrack(), op_name); + if (pool_prim->get_format() == NHWC) { + in_shape = {in_shape[0], in_shape[3], in_shape[1], in_shape[2]}; + } + CheckAndConvertUtils::CheckInteger("x_rank", in_shape.size(), kEqual, 4, op_name); + auto kernel_size = pool_prim->get_kernel_size(); + auto pad_mode = pool_prim->get_pad_mode(); + auto batch = in_shape[0]; + auto channel = in_shape[1]; + auto in_h = in_shape[2]; + auto in_w = in_shape[3]; + + auto strides = pool_prim->get_strides(); + auto kernel_h = kernel_size[2]; + auto kernel_w = kernel_size[3]; + auto stride_h = strides[2]; + auto stride_w = strides[3]; + int64_t out_h = -1; + int64_t out_w = -1; + if (pad_mode == VALID) { + out_h = ceil((in_h - (kernel_h - 1)) / stride_h); + out_w = ceil((in_w - (kernel_w - 1)) / stride_w); + } else if (pad_mode == SAME) { + out_h = ceil(in_h / stride_h); + out_w = ceil(in_w / stride_w); + } + std::vector out_shape = {batch, channel, out_h, out_w}; + if (pool_prim->get_format() == NHWC) { + out_shape = {batch, out_h, out_w, channel}; + } + if (std::any_of(out_shape.begin(), out_shape.end(), [](int64_t a) { return a <= 0; })) { + MS_LOG(EXCEPTION) << "Kernel size is not valid."; + } + return std::make_shared(out_shape); +} + +TypePtr InferType(const PrimitivePtr &prim, const std::vector &input_args) { + if (std::any_of(input_args.begin(), input_args.end(), [](AbstractBasePtr a) { return a == nullptr; })) { + MS_LOG(EXCEPTION) << "nullptr"; + } + return input_args[0]->BuildType(); +} +} // namespace + +AbstractBasePtr MaxPoolFusionInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + return std::make_shared(InferType(primitive, input_args), + InferShape(primitive, input_args)->shape()); +} +REGISTER_PRIMITIVE_EVAL_IMPL(MaxPoolFusion, prim::kPrimMaxPool, MaxPoolFusionInfer); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/fusion/max_pool_fusion.h b/mindspore/core/ops/fusion/max_pool_fusion.h new file mode 100644 index 00000000000..affce94dba6 --- /dev/null +++ b/mindspore/core/ops/fusion/max_pool_fusion.h @@ -0,0 +1,50 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_MAX_POOL_FUSION_H_ +#define MINDSPORE_CORE_OPS_MAX_POOL_FUSION_H_ +#include +#include + +#include "ops/max_pool.h" +#include "ops/op_utils.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameMaxPoolFusion = "MaxPoolFusion"; +class MaxPoolFusion : public MaxPool { + public: + MaxPoolFusion() : MaxPool(kNameMaxPoolFusion) { InitIOName({"x"}, {"output"}); } + ~MaxPoolFusion() = default; + MS_DECLARE_PARENT(MaxPoolFusion, MaxPool); + void Init(const std::vector &kernel_size = {1}, const std::vector &stride = {1}, + const PadMode &pad_mode = VALID, const Format &format = NCHW, + const std::vector &pad = {0, 0, 0, 0}, const RoundMode &round_mode = FLOOR, + const bool global = false, const ActivationType activation_type = NO_ACTIVATION); + void set_global(const bool global); + void set_activation_type(const ActivationType activation_type); + bool get_global() const; + ActivationType get_activation_type() const; +}; + +AbstractBasePtr MaxPoolFusionInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); +using PrimMaxPoolFusionPtr = std::shared_ptr; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_MAX_POOL_FUSION_H_ diff --git a/mindspore/core/ops/fusion/mul_fusion.cc b/mindspore/core/ops/fusion/mul_fusion.cc new file mode 100644 index 00000000000..429686439c2 --- /dev/null +++ b/mindspore/core/ops/fusion/mul_fusion.cc @@ -0,0 +1,38 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops/fusion/mul_fusion.h" +#include +#include +#include +#include +#include "ops/op_utils.h" + +namespace mindspore { +namespace ops { +void MulFusion::set_activation_type(const ActivationType &activation_type) { + int64_t swi; + swi = activation_type; + this->AddAttr(kActivationType, MakeValue(swi)); +} +ActivationType MulFusion::get_activation_type() const { + auto value_ptr = GetAttr(kActivationType); + return ActivationType(GetValue(value_ptr)); +} +void MulFusion::Init(const ActivationType &activation_type) { this->set_activation_type(activation_type); } +REGISTER_PRIMITIVE_C(kNameMulFusion, MulFusion); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/fusion/mul_fusion.h b/mindspore/core/ops/fusion/mul_fusion.h new file mode 100644 index 00000000000..50ef505900d --- /dev/null +++ b/mindspore/core/ops/fusion/mul_fusion.h @@ -0,0 +1,38 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_MUL_FUSION_H_ +#define MINDSPORE_CORE_OPS_MUL_FUSION_H_ +#include "ops/mul.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameMulFusion = "MulFusion"; +class MulFusion : public Mul { + public: + MulFusion() : Mul(kNameMulFusion) { InitIOName({"x", "y"}, {"output"}); } + ~MulFusion() = default; + MS_DECLARE_PARENT(MulFusion, Mul); + void Init(const ActivationType &activation_type); + void set_activation_type(const ActivationType &activation_type); + ActivationType get_activation_type() const; +}; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_MUL_FUSION_H_ diff --git a/mindspore/core/ops/fusion/pad_fusion.cc b/mindspore/core/ops/fusion/pad_fusion.cc new file mode 100644 index 00000000000..be0643e9310 --- /dev/null +++ b/mindspore/core/ops/fusion/pad_fusion.cc @@ -0,0 +1,52 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops/fusion/pad_fusion.h" +#include +#include +#include +#include +#include "ops/op_utils.h" + +namespace mindspore { +namespace ops { +void PadFusion::Init(const PaddingMode &padding_mode, const float constant_value) { + this->set_padding_mode(padding_mode); + this->set_constant_value(constant_value); +} + +void PadFusion::set_padding_mode(const PaddingMode &padding_mode) { + int64_t swi; + swi = padding_mode; + this->AddAttr(kPaddingMode, MakeValue(swi)); +} + +void PadFusion::set_constant_value(const float constant_value) { + this->AddAttr(kConstantValue, MakeValue(constant_value)); +} + +PaddingMode PadFusion::get_padding_mode() const { + auto value_ptr = GetAttr(kPaddingMode); + return PaddingMode(GetValue(value_ptr)); +} +float PadFusion::get_constant_value() const { + auto value_ptr = GetAttr(kConstantValue); + return GetValue(value_ptr); +} + +REGISTER_PRIMITIVE_C(kNamePadFusion, PadFusion); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/fusion/pad_fusion.h b/mindspore/core/ops/fusion/pad_fusion.h new file mode 100644 index 00000000000..b9dd577b53e --- /dev/null +++ b/mindspore/core/ops/fusion/pad_fusion.h @@ -0,0 +1,40 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_PAD_FUSION_H_ +#define MINDSPORE_CORE_OPS_PAD_FUSION_H_ +#include "ops/pad.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNamePadFusion = "PadFusion"; +class PadFusion : public Pad { + public: + PadFusion() : Pad(kNamePadFusion) { InitIOName({"x"}, {"y"}); } + ~PadFusion() = default; + MS_DECLARE_PARENT(PadFusion, Pad); + void Init(const PaddingMode &padding_mode, const float constant_value); + void set_padding_mode(const PaddingMode &padding_mode); + void set_constant_value(const float constant_value); + PaddingMode get_padding_mode() const; + float get_constant_value() const; +}; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_PAD_FUSION_H_ diff --git a/mindspore/core/c_ops/arg_min.cc b/mindspore/core/ops/fusion/partial_fusion.cc similarity index 57% rename from mindspore/core/c_ops/arg_min.cc rename to mindspore/core/ops/fusion/partial_fusion.cc index b7f112cae88..709e28a9e16 100644 --- a/mindspore/core/c_ops/arg_min.cc +++ b/mindspore/core/ops/fusion/partial_fusion.cc @@ -14,25 +14,19 @@ * limitations under the License. */ -#include "c_ops/arg_min.h" +#include "ops/fusion/partial_fusion.h" +#include "ops/op_utils.h" namespace mindspore { -void ArgMin::Init(bool keep_dims, int64_t axis) { - set_axis(axis); - set_keep_dims(keep_dims); +namespace ops { +void PartialFusion::Init(const int64_t sub_graph_index) { this->set_sub_graph_index(sub_graph_index); } +void PartialFusion::set_sub_graph_index(const int64_t sub_graph_index) { + this->AddAttr(kSubGraphIndex, MakeValue(sub_graph_index)); } - -void ArgMin::set_axis(int64_t axis) { this->AddAttr(kAxis, MakeValue(axis)); } -void ArgMin::set_keep_dims(bool keep_dims) { this->AddAttr(kOutputType, MakeValue(keep_dims)); } - -int64_t ArgMin::get_axis() { - auto value_ptr = GetAttr(kAxis); +int64_t PartialFusion::get_sub_graph_index() const { + auto value_ptr = GetAttr(kSubGraphIndex); return GetValue(value_ptr); } - -bool ArgMin::get_keep_dims() { - auto value_ptr = GetAttr(kKeepDims); - return GetValue(value_ptr); -} -REGISTER_PRIMITIVE_C(kNameArgMin, ArgMin); +REGISTER_PRIMITIVE_C(kNamePartialFusion, PartialFusion); +} // namespace ops } // namespace mindspore diff --git a/mindspore/core/ops/fusion/partial_fusion.h b/mindspore/core/ops/fusion/partial_fusion.h new file mode 100644 index 00000000000..df4544cee42 --- /dev/null +++ b/mindspore/core/ops/fusion/partial_fusion.h @@ -0,0 +1,38 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_PARTIAL_FUSION_H_ +#define MINDSPORE_CORE_OPS_PARTIAL_FUSION_H_ +#include "ops/primitive_c.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNamePartialFusion = "PartialFusion"; +class PartialFusion : public PrimitiveC { + public: + PartialFusion() : PrimitiveC(kNamePartialFusion) {} + ~PartialFusion() = default; + MS_DECLARE_PARENT(PartialFusion, PrimitiveC); + void Init(const int64_t sub_graph_index); + void set_sub_graph_index(const int64_t sub_graph_index); + int64_t get_sub_graph_index() const; +}; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_PARTIAL_FUSION_H_ diff --git a/mindspore/core/ops/fusion/pow_fusion.cc b/mindspore/core/ops/fusion/pow_fusion.cc new file mode 100644 index 00000000000..e350c40518b --- /dev/null +++ b/mindspore/core/ops/fusion/pow_fusion.cc @@ -0,0 +1,66 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include +#include "ops/fusion/pow_fusion.h" +#include "ops/op_utils.h" + +namespace mindspore { +namespace ops { +void PowFusion::Init(const float &scale, const float &shift) { + this->set_scale(scale); + this->set_shift(shift); +} + +void PowFusion::set_scale(const float &scale) { this->AddAttr(kScale, MakeValue(scale)); } +void PowFusion::set_shift(const float &shift) { this->AddAttr(kShift, MakeValue(shift)); } + +float PowFusion::get_scale() const { return GetValue(GetAttr(kScale)); } +float PowFusion::get_shift() const { return GetValue(GetAttr(kShift)); } + +namespace { +abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(primitive); + auto pow_prim = primitive->cast(); + MS_EXCEPTION_IF_NULL(pow_prim); + auto op_name = pow_prim->name(); + return BroadCastInferShape(op_name, input_args); +} + +TypePtr InferType(const PrimitivePtr &prim, const std::vector &input_args) { + for (const auto &item : input_args) { + MS_EXCEPTION_IF_NULL(item); + } + std::map types; + types.emplace("x", input_args[0]->BuildType()); + types.emplace("y", input_args[1]->BuildType()); + auto infer_type = CheckAndConvertUtils::CheckTensorTypeSame(types, common_valid_types, prim->name()); + return TypeIdToType(infer_type); +} +} // namespace + +AbstractBasePtr PowFusionInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + return std::make_shared(InferType(primitive, input_args), + InferShape(primitive, input_args)->shape()); +} +REGISTER_PRIMITIVE_EVAL_IMPL(PowFusion, prim::kPrimPowFusion, PowFusionInfer); +REGISTER_PRIMITIVE_C(kNamePowFusion, PowFusion); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/fusion/pow_fusion.h b/mindspore/core/ops/fusion/pow_fusion.h new file mode 100644 index 00000000000..2e78dd2d0ae --- /dev/null +++ b/mindspore/core/ops/fusion/pow_fusion.h @@ -0,0 +1,40 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_POW_FUSION_H_ +#define MINDSPORE_CORE_OPS_POW_FUSION_H_ +#include "ops/pow.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNamePowFusion = "PowFusion"; +class PowFusion : public Pow { + public: + PowFusion() : Pow(kNamePowFusion) {} + ~PowFusion() = default; + MS_DECLARE_PARENT(PowFusion, Pow); + void Init(const float &scale, const float &shift); + void set_scale(const float &scale); + void set_shift(const float &shift); + float get_scale() const; + float get_shift() const; +}; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_POW_FUSION_H_ diff --git a/mindspore/core/ops/fusion/prelu_fusion.cc b/mindspore/core/ops/fusion/prelu_fusion.cc new file mode 100644 index 00000000000..e8fd3b2b1d8 --- /dev/null +++ b/mindspore/core/ops/fusion/prelu_fusion.cc @@ -0,0 +1,48 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops/fusion/prelu_fusion.h" +#include +#include +#include +#include +#include "ops/op_utils.h" + +namespace mindspore { +namespace ops { +void PReLUFusion::Init(const bool channel_shared, const std::vector &slope) { + this->set_channel_shared(channel_shared); + this->set_slope(slope); +} + +void PReLUFusion::set_channel_shared(const bool channel_shared) { + this->AddAttr(kChannelShared, MakeValue(channel_shared)); +} + +void PReLUFusion::set_slope(const std::vector &slope) { this->AddAttr(kSlope, MakeValue(slope)); } + +bool PReLUFusion::get_channel_shared() const { + auto value_ptr = GetAttr(kChannelShared); + return GetValue(value_ptr); +} +std::vector PReLUFusion::get_slope() const { + auto value_ptr = GetAttr(kSlope); + return GetValue>(value_ptr); +} + +REGISTER_PRIMITIVE_C(kNamePReLUFusion, PReLUFusion); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/fusion/prelu_fusion.h b/mindspore/core/ops/fusion/prelu_fusion.h new file mode 100644 index 00000000000..8f96b316d79 --- /dev/null +++ b/mindspore/core/ops/fusion/prelu_fusion.h @@ -0,0 +1,42 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_PRELU_FUSION_H_ +#define MINDSPORE_CORE_OPS_PRELU_FUSION_H_ +#include + +#include "ops/prelu.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNamePReLUFusion = "PReLUFusion"; +class PReLUFusion : public PReLU { + public: + PReLUFusion() : PReLU(kNamePReLUFusion) {} + ~PReLUFusion() = default; + MS_DECLARE_PARENT(PReLUFusion, PReLU); + void Init(const bool channel_shared, const std::vector &slope); + void set_channel_shared(const bool channel_shared); + void set_slope(const std::vector &slope); + bool get_channel_shared() const; + std::vector get_slope() const; +}; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_PRELU_FUSION_H_ diff --git a/mindspore/core/ops/fusion/reduce_fusion.cc b/mindspore/core/ops/fusion/reduce_fusion.cc new file mode 100644 index 00000000000..4ea0b83d7a8 --- /dev/null +++ b/mindspore/core/ops/fusion/reduce_fusion.cc @@ -0,0 +1,74 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include +#include +#include "ops/fusion/reduce_fusion.h" +#include "ops/op_utils.h" +#include "utils/check_convert_utils.h" +#include "abstract/primitive_infer_map.h" + +namespace mindspore { +namespace ops { + +void ReduceFusion::set_keep_dims(const bool keep_dims) { this->AddAttr(kKeepDims, MakeValue(keep_dims)); } + +void ReduceFusion::set_mode(const ReduceMode mode) { + int64_t swi; + swi = mode; + this->AddAttr(kMode, MakeValue(swi)); +} + +void ReduceFusion::set_reduce_to_end(const bool reduce_to_end) { + this->AddAttr(kReduceToEnd, MakeValue(reduce_to_end)); +} + +void ReduceFusion::set_coeff(const float coeff) { this->AddAttr(kCoeff, MakeValue(coeff)); } + +bool ReduceFusion::get_keep_dims() const { + auto value_ptr = GetAttr(kKeepDims); + return GetValue(value_ptr); +} + +ReduceMode ReduceFusion::get_mode() const { + auto value_ptr = GetAttr(kMode); + return ReduceMode(GetValue(value_ptr)); +} + +bool ReduceFusion::get_reduce_to_end() const { + auto value_ptr = GetAttr(kReduceToEnd); + return GetValue(value_ptr); +} + +float ReduceFusion::get_coeff() const { + auto value_ptr = GetAttr(kCoeff); + return GetValue(value_ptr); +} + +void ReduceFusion::Init(const bool keep_dims, const ReduceMode mode, const bool reduce_to_end, const float coeff) { + this->set_keep_dims(keep_dims); + this->set_mode(mode); + this->set_reduce_to_end(reduce_to_end); + this->set_coeff(coeff); +} + +REGISTER_PRIMITIVE_EVAL_IMPL(ReduceFusion, prim::kPrimReduceMean, ReduceInfer); +REGISTER_PRIMITIVE_C(kNameReduceFusion, ReduceFusion); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/fusion/reduce_fusion.h b/mindspore/core/ops/fusion/reduce_fusion.h new file mode 100644 index 00000000000..64b8d0029bf --- /dev/null +++ b/mindspore/core/ops/fusion/reduce_fusion.h @@ -0,0 +1,52 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_REDUCE_FUSION_H_ +#define MINDSPORE_CORE_OPS_REDUCE_FUSION_H_ +#include +#include +#include +#include +#include "ops/reduce.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameReduceFusion = "ReduceFusion"; +class ReduceFusion : public Reduce { + public: + ReduceFusion() : Reduce(kNameReduceFusion) {} + ~ReduceFusion() = default; + MS_DECLARE_PARENT(ReduceFusion, PrimitiveC); + void Init(const bool keep_dims = false, const ReduceMode mode = ReduceMode::Reduce_Mean, + const bool reduce_to_end = false, const float coeff = 1.0); + void set_keep_dims(const bool keep_dims); + void set_mode(const ReduceMode mode); + void set_reduce_to_end(const bool reduce_to_end); + void set_coeff(const float coeff); + bool get_keep_dims() const; + ReduceMode get_mode() const; + bool get_reduce_to_end() const; + float get_coeff() const; +}; +AbstractBasePtr ReduceFusionInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); +using PrimReduceFusiuonPtr = std::shared_ptr; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_REDUCE_FUSION_H_ diff --git a/mindspore/core/ops/fusion/scale_fusion.cc b/mindspore/core/ops/fusion/scale_fusion.cc new file mode 100644 index 00000000000..23fa6243e9d --- /dev/null +++ b/mindspore/core/ops/fusion/scale_fusion.cc @@ -0,0 +1,40 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops/fusion/scale_fusion.h" +#include +#include "ops/op_utils.h" + +namespace mindspore { +namespace ops { +void ScaleFusion::Init(const int64_t axis, const ActivationType &activation_type) { + this->set_axis(axis); + this->set_activation_type(activation_type); +} + +void ScaleFusion::set_activation_type(const ActivationType &activation_type) { + int64_t swi; + swi = activation_type; + this->AddAttr(kActivationType, MakeValue(swi)); +} + +ActivationType ScaleFusion::get_activation_type() const { + auto value_ptr = GetAttr(kActivationType); + return ActivationType(GetValue(value_ptr)); +} +REGISTER_PRIMITIVE_C(kNameScaleFusion, ScaleFusion); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/fusion/scale_fusion.h b/mindspore/core/ops/fusion/scale_fusion.h new file mode 100644 index 00000000000..bc2d664b54b --- /dev/null +++ b/mindspore/core/ops/fusion/scale_fusion.h @@ -0,0 +1,37 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_SCALE_FUSION_H_ +#define MINDSPORE_CORE_OPS_SCALE_FUSION_H_ +#include "ops/scale.h" +#include "ops/op_utils.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameScaleFusion = "ScaleFusion"; +class ScaleFusion : public Scale { + public: + ScaleFusion() : Scale(kNameScaleFusion) {} + MS_DECLARE_PARENT(ScaleFusion, Scale); + void Init(const int64_t axis = -1, const ActivationType &activation_type = NO_ACTIVATION); + void set_activation_type(const ActivationType &activation_type); + ActivationType get_activation_type() const; +}; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_SCALE_FUSION_H_ diff --git a/mindspore/core/ops/fusion/slice_fusion.cc b/mindspore/core/ops/fusion/slice_fusion.cc new file mode 100644 index 00000000000..b26b2a59aff --- /dev/null +++ b/mindspore/core/ops/fusion/slice_fusion.cc @@ -0,0 +1,70 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops/fusion/slice_fusion.h" +#include +#include "ops/op_utils.h" + +namespace mindspore { +namespace ops { +void SliceFusion::Init(const std::vector &axes) { this->set_axes(axes); } + +void SliceFusion::set_axes(const std::vector &axes) { this->AddAttr(kAxes, MakeValue(axes)); } + +std::vector SliceFusion::get_axes() const { + auto value_ptr = GetAttr(kAxes); + return GetValue>(value_ptr); +} + +AbstractBasePtr SliceFusionInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(primitive); + auto SliceFusion_prim = primitive->cast(); + MS_EXCEPTION_IF_NULL(SliceFusion_prim); + auto op_name = SliceFusion_prim->name(); + auto x_shape = CheckAndConvertUtils::ConvertShapePtrToShape("x_shape", input_args[0]->BuildShape(), op_name); + auto x_shape_len = (int64_t)x_shape.size(); + auto begin_v = input_args[1]->BuildValue(); + auto size_v = input_args[2]->BuildValue(); + auto x_type = input_args[0]->BuildType(); + MS_EXCEPTION_IF_NULL(x_type); + auto tensor_type = x_type->cast(); + MS_EXCEPTION_IF_NULL(tensor_type); + auto data_type = tensor_type->element(); + MS_EXCEPTION_IF_NULL(data_type); + if (begin_v == kAnyValue || size_v == kAnyValue) { + return std::make_shared(data_type, std::vector{}); + } + auto begin = GetValue>(begin_v); + auto size = GetValue>(size_v); + CheckAndConvertUtils::Check("len of begin", (int64_t)begin.size(), kEqual, "len x's dim", x_shape_len); + CheckAndConvertUtils::Check("len of size", (int64_t)size.size(), kEqual, "len x's dim", x_shape_len); + + for (int64_t i = 0; i < x_shape_len; i++) { + CheckAndConvertUtils::CheckInteger("input size[" + std::to_string(i) + "]", size[i], kGreaterThan, 0, ""); + if (x_shape[i] < (begin[i] + size[i])) { + auto y = begin[i] + size[i]; + MS_EXCEPTION(ValueError) << "For " + op_name + "slice shape can't bigger than origin shape " + + std::to_string(x_shape[i]) + "," + std::to_string(y); + } + } + return std::make_shared(data_type, size); +} + +REGISTER_PRIMITIVE_EVAL_IMPL(SliceFusion, prim::kPrimSliceFusion, SliceFusionInfer); +REGISTER_PRIMITIVE_C(kNameSliceFusion, SliceFusion); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/fusion/slice_fusion.h b/mindspore/core/ops/fusion/slice_fusion.h new file mode 100644 index 00000000000..32baf7fcfcc --- /dev/null +++ b/mindspore/core/ops/fusion/slice_fusion.h @@ -0,0 +1,45 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_SLICE_FUSION_H_ +#define MINDSPORE_CORE_OPS_SLICE_FUSION_H_ +#include +#include +#include +#include +#include "ops/primitive_c.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameSliceFusion = "SliceFusion"; +class SliceFusion : public PrimitiveC { + public: + SliceFusion() : PrimitiveC(kNameSliceFusion) { InitIOName({"x", "begin", "size"}, {"output"}); } + ~SliceFusion() = default; + MS_DECLARE_PARENT(SliceFusion, PrimitiveC); + void Init(const std::vector &axes); + void set_axes(const std::vector &axes); + std::vector get_axes() const; +}; +AbstractBasePtr SliceFusionInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); +using PrimSliceFusionPtr = std::shared_ptr; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_SLICE_FUSION_H_ diff --git a/mindspore/core/ops/fusion/sub_fusion.cc b/mindspore/core/ops/fusion/sub_fusion.cc new file mode 100644 index 00000000000..a3cde51cba6 --- /dev/null +++ b/mindspore/core/ops/fusion/sub_fusion.cc @@ -0,0 +1,37 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops/fusion/sub_fusion.h" +#include +#include "ops/op_utils.h" + +namespace mindspore { +namespace ops { +void SubFusion::Init(const ActivationType &activation_type) { this->set_activation_type(activation_type); } + +void SubFusion::set_activation_type(const ActivationType &activation_type) { + int64_t swi; + swi = activation_type; + this->AddAttr(kActivationType, MakeValue(swi)); +} + +ActivationType SubFusion::get_activation_type() const { + auto value_ptr = GetAttr(kActivationType); + return ActivationType(GetValue(value_ptr)); +} +REGISTER_PRIMITIVE_C(kNameSubFusion, SubFusion); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/fusion/sub_fusion.h b/mindspore/core/ops/fusion/sub_fusion.h new file mode 100644 index 00000000000..46df3204b1a --- /dev/null +++ b/mindspore/core/ops/fusion/sub_fusion.h @@ -0,0 +1,37 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_SUB_FUSION_H_ +#define MINDSPORE_CORE_OPS_SUB_FUSION_H_ +#include "ops/sub.h" +#include "ops/op_utils.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameSubFusion = "SubFusion"; +class SubFusion : public Sub { + public: + SubFusion() : Sub(kNameSubFusion) {} + MS_DECLARE_PARENT(SubFusion, Sub); + void Init(const ActivationType &activation_type = NO_ACTIVATION); + void set_activation_type(const ActivationType &activation_type); + ActivationType get_activation_type() const; +}; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_SUB_FUSION_H_ diff --git a/mindspore/core/c_ops/broadcast_to.cc b/mindspore/core/ops/fusion/tile_fusion.cc similarity index 57% rename from mindspore/core/c_ops/broadcast_to.cc rename to mindspore/core/ops/fusion/tile_fusion.cc index 04a967746bf..bfab87ed5dc 100644 --- a/mindspore/core/c_ops/broadcast_to.cc +++ b/mindspore/core/ops/fusion/tile_fusion.cc @@ -14,20 +14,21 @@ * limitations under the License. */ -#include "c_ops/broadcast_to.h" +#include "ops/fusion/tile_fusion.h" +#include +#include "ops/op_utils.h" namespace mindspore { -void BroadcastTo::Init(const std::vector &shape) { set_shape(shape); } +namespace ops { +void TileFusion::Init(const std::vector &dims) { this->set_dims(dims); } -void BroadcastTo::set_shape(const std::vector &shape) { - CheckAndConvertUtils::CheckInteger(kShapeSize, shape.size(), kGreaterThan, 0, name()); - CheckAndConvertUtils::CheckPositiveVector(kShape, shape, name(), false, true); - AddAttr(kShape, MakeValue(shape)); -} +void TileFusion::set_dims(const std::vector &dims) { this->AddAttr(kDims, MakeValue(dims)); } -std::vector BroadcastTo::get_shape() const { - auto value_ptr = GetAttr(kShape); +std::vector TileFusion::get_dims() const { + auto value_ptr = GetAttr(kDims); return GetValue>(value_ptr); } -REGISTER_PRIMITIVE_C(kNameBroadcastTo, BroadcastTo); + +REGISTER_PRIMITIVE_C(kNameTileFusion, TileFusion); +} // namespace ops } // namespace mindspore diff --git a/mindspore/core/ops/fusion/tile_fusion.h b/mindspore/core/ops/fusion/tile_fusion.h new file mode 100644 index 00000000000..357cc94e11f --- /dev/null +++ b/mindspore/core/ops/fusion/tile_fusion.h @@ -0,0 +1,39 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_TILE_FUSION_H_ +#define MINDSPORE_CORE_OPS_TILE_FUSION_H_ +#include + +#include "ops/tile.h" +#include "ops/op_utils.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameTileFusion = "TileFusion"; +class TileFusion : public Tile { + public: + TileFusion() : Tile(kNameTileFusion) {} + MS_DECLARE_PARENT(TileFusion, Tile); + void Init(const std::vector &dims); + void set_dims(const std::vector &dims); + std::vector get_dims() const; +}; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_TILE_FUSION_H_ diff --git a/mindspore/core/c_ops/l2_normalize.cc b/mindspore/core/ops/fusion/topk_fusion.cc similarity index 53% rename from mindspore/core/c_ops/l2_normalize.cc rename to mindspore/core/ops/fusion/topk_fusion.cc index e9ef6a7d294..97693fbc9e3 100644 --- a/mindspore/core/c_ops/l2_normalize.cc +++ b/mindspore/core/ops/fusion/topk_fusion.cc @@ -14,26 +14,32 @@ * limitations under the License. */ -#include "c_ops/l2_normalize.h" +#include "ops/fusion/topk_fusion.h" +#include +#include "ops/op_utils.h" namespace mindspore { -void L2Normalize::Init(int64_t axis, float epsilon) { +namespace ops { +void TopKFusion::Init(const bool sorted, const int64_t axis, const int64_t largest) { this->set_axis(axis); - this->set_epsilon(epsilon); + this->set_largest(largest); + this->set_sorted(sorted); } -void L2Normalize::set_axis(int64_t axis) { AddAttr(kAxis, MakeValue(axis)); } +void TopKFusion::set_axis(const int64_t axis) { this->AddAttr(kAxis, MakeValue(axis)); } -void L2Normalize::set_epsilon(float epsilon) { AddAttr(kEpsilon, MakeValue(epsilon)); } +void TopKFusion::set_largest(const int64_t largest) { this->AddAttr(kLargest, MakeValue(largest)); } -int64_t L2Normalize::get_axis() { +int64_t TopKFusion::get_axis() const { auto value_ptr = GetAttr(kAxis); return GetValue(value_ptr); } -float L2Normalize::get_epsilon() { - auto value_ptr = GetAttr(kEpsilon); - return GetValue(value_ptr); +int64_t TopKFusion::get_largest() const { + auto value_ptr = GetAttr(kLargest); + return GetValue(value_ptr); } -REGISTER_PRIMITIVE_C(kNameL2Normalize, L2Normalize); + +REGISTER_PRIMITIVE_C(kNameTopKFusion, TopKFusion); +} // namespace ops } // namespace mindspore diff --git a/mindspore/core/ops/fusion/topk_fusion.h b/mindspore/core/ops/fusion/topk_fusion.h new file mode 100644 index 00000000000..c9a80c1966d --- /dev/null +++ b/mindspore/core/ops/fusion/topk_fusion.h @@ -0,0 +1,41 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_TOPK_FUSION_H_ +#define MINDSPORE_CORE_OPS_TOPK_FUSION_H_ +#include + +#include "ops/topk.h" +#include "ops/op_utils.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameTopKFusion = "TopKFusion"; +class TopKFusion : public TopK { + public: + TopKFusion() : TopK(kNameTopKFusion) {} + MS_DECLARE_PARENT(TopKFusion, TopK); + void Init(const bool sorted, const int64_t axis, const int64_t largest); + void set_axis(const int64_t axis); + void set_largest(const int64_t largest); + int64_t get_axis() const; + int64_t get_largest() const; +}; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_TOPK_FUSION_H_ diff --git a/mindspore/core/ops/gather.cc b/mindspore/core/ops/gather.cc new file mode 100644 index 00000000000..55fcc1b5afe --- /dev/null +++ b/mindspore/core/ops/gather.cc @@ -0,0 +1,52 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include "ops/gather.h" + +namespace mindspore { +namespace ops { +AbstractBasePtr GatherInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(primitive); + auto gather_prim = primitive->cast(); + MS_EXCEPTION_IF_NULL(gather_prim); + auto prim_name = gather_prim->name(); + CheckAndConvertUtils::CheckInteger("gather_infer", input_args.size(), kEqual, 3, prim_name); + + // Infer type + auto x_type = input_args[0]->BuildType()->cast()->element(); + // auto dim_type = input_args[1]->BuildType(); + // auto index_type = input_args[2]->BuildType()->cast()->element(); + std::set valid_x_type = {TypeIdToType(kObjectTypeTensorType)}; + CheckAndConvertUtils::CheckSubClass("x_type", input_args[0]->BuildType(), valid_x_type, prim_name); + const std::set valid_index_types = {kNumberTypeInt32, kNumberTypeInt64}; + CheckAndConvertUtils::CheckTensorTypeValid("index_type", input_args[2]->BuildType(), valid_index_types, prim_name); + std::set valid_dim_type = {TypeIdToType(kNumberTypeInt32), TypeIdToType(kNumberTypeInt64)}; + CheckAndConvertUtils::CheckSubClass("dim_type", input_args[1]->BuildType(), valid_dim_type, prim_name); + + // Infer shape + auto x_shape = CheckAndConvertUtils::ConvertShapePtrToShape("x_shape", input_args[0]->BuildShape(), prim_name); + auto index_shape = CheckAndConvertUtils::ConvertShapePtrToShape("dim_shape", input_args[2]->BuildShape(), prim_name); + CheckAndConvertUtils::Check("x_rank", x_shape.size(), kEqual, "index_rank", index_shape.size(), prim_name); + + return std::make_shared(x_type, index_shape); +} +REGISTER_PRIMITIVE_EVAL_IMPL(Gather, prim::kPrimGather, GatherInfer); +REGISTER_PRIMITIVE_C(kNameGather, Gather); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/c_ops/gather.h b/mindspore/core/ops/gather.h similarity index 72% rename from mindspore/core/c_ops/gather.h rename to mindspore/core/ops/gather.h index b01d350802b..aa1e88a4f91 100644 --- a/mindspore/core/c_ops/gather.h +++ b/mindspore/core/ops/gather.h @@ -14,17 +14,18 @@ * limitations under the License. */ -#ifndef MINDSPORE_CORE_C_OPS_GATHER_H_ -#define MINDSPORE_CORE_C_OPS_GATHER_H_ +#ifndef MINDSPORE_CORE_OPS_GATHER_H_ +#define MINDSPORE_CORE_OPS_GATHER_H_ #include #include #include #include -#include "c_ops/primitive_c.h" +#include "ops/primitive_c.h" #include "abstract/abstract_value.h" #include "utils/check_convert_utils.h" namespace mindspore { +namespace ops { constexpr auto kNameGather = "Gather"; class Gather : public PrimitiveC { public: @@ -33,6 +34,10 @@ class Gather : public PrimitiveC { MS_DECLARE_PARENT(Gather, PrimitiveC); void Init() {} }; +AbstractBasePtr GatherInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); +using PrimGatherPtr = std::shared_ptr; +} // namespace ops } // namespace mindspore -#endif // MINDSPORE_CORE_C_OPS_GATHER_H_ +#endif // MINDSPORE_CORE_OPS_GATHER_H_ diff --git a/mindspore/core/ops/gather_nd.cc b/mindspore/core/ops/gather_nd.cc new file mode 100644 index 00000000000..8227fdfbda3 --- /dev/null +++ b/mindspore/core/ops/gather_nd.cc @@ -0,0 +1,73 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include +#include +#include "ops/gather_nd.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +namespace { +abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(primitive); + auto gather_prim = primitive->cast(); + MS_EXCEPTION_IF_NULL(gather_prim); + auto prim_name = gather_prim->name(); + CheckAndConvertUtils::CheckInteger("input number", input_args.size(), kEqual, 2, prim_name); + for (const auto &item : input_args) { + MS_EXCEPTION_IF_NULL(item); + } + auto input_shape = CheckAndConvertUtils::ConvertShapePtrToShape("x_shape", input_args[0]->BuildShape(), prim_name); + auto indices_shape = + CheckAndConvertUtils::ConvertShapePtrToShape("indices_shape", input_args[1]->BuildShape(), prim_name); + auto input_rank = input_shape.size(); + auto indices_rank = indices_shape.size(); + CheckAndConvertUtils::CheckInteger("Input of indices data", input_rank, kGreaterEqual, + indices_shape[indices_rank - 1], prim_name); + std::vector output_shape; + for (size_t i = 0; i < indices_rank - 1; i++) { + output_shape.push_back(indices_shape[i]); + } + for (size_t i = indices_shape[indices_rank - 1]; i < input_rank; ++i) { + output_shape.push_back(input_shape[i]); + } + return std::make_shared(output_shape); +} + +TypePtr InferType(const PrimitivePtr &prim, const std::vector &input_args) { + const std::set valid_types = {kNumberTypeInt8, kNumberTypeInt16, kNumberTypeInt32, kNumberTypeInt64}; + if (std::any_of(input_args.begin(), input_args.end(), [](AbstractBasePtr a) { return a == nullptr; })) { + MS_LOG(EXCEPTION) << "nullptr"; + } + std::map types; + types.emplace("input_x", input_args[0]->BuildType()); + auto infer_type = CheckAndConvertUtils::CheckTensorTypeSame(types, valid_types, prim->name()); + return TypeIdToType(infer_type); +} +} // namespace +AbstractBasePtr GatherNdInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + return std::make_shared(InferType(primitive, input_args), + InferShape(primitive, input_args)->shape()); +} +REGISTER_PRIMITIVE_EVAL_IMPL(GatherNd, prim::kPrimGatherND, GatherNdInfer); +REGISTER_PRIMITIVE_C(kNameGatherNd, GatherNd); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/c_ops/gather_nd.h b/mindspore/core/ops/gather_nd.h similarity index 69% rename from mindspore/core/c_ops/gather_nd.h rename to mindspore/core/ops/gather_nd.h index 1ea7ad69c79..413c9d8f4e2 100644 --- a/mindspore/core/c_ops/gather_nd.h +++ b/mindspore/core/ops/gather_nd.h @@ -14,13 +14,16 @@ * limitations under the License. */ -#ifndef MINDSPORE_CORE_C_OPS_GATHERND_H_ -#define MINDSPORE_CORE_C_OPS_GATHERND_H_ -#include "c_ops/primitive_c.h" +#ifndef MINDSPORE_CORE_OPS_GATHER_ND_H_ +#define MINDSPORE_CORE_OPS_GATHER_ND_H_ +#include +#include +#include "ops/primitive_c.h" #include "abstract/abstract_value.h" #include "utils/check_convert_utils.h" namespace mindspore { +namespace ops { constexpr auto kNameGatherNd = "GatherNd"; class GatherNd : public PrimitiveC { public: @@ -29,6 +32,10 @@ class GatherNd : public PrimitiveC { MS_DECLARE_PARENT(GatherNd, PrimitiveC); void Init() {} }; +AbstractBasePtr GatherNdInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); +using PrimGatherNd = std::shared_ptr; +} // namespace ops } // namespace mindspore -#endif // MINDSPORE_CORE_C_OPS_GATHERND_H_ +#endif // MINDSPORE_CORE_OPS_GATHER_ND_H_ diff --git a/mindspore/core/ops/gelu.cc b/mindspore/core/ops/gelu.cc new file mode 100644 index 00000000000..a8ab8f6aa99 --- /dev/null +++ b/mindspore/core/ops/gelu.cc @@ -0,0 +1,58 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include + +#include "ops/gelu.h" +#include "utils/check_convert_utils.h" +#include "abstract/primitive_infer_map.h" + +namespace mindspore { +namespace ops { +namespace { +abstract::ShapePtr GeLUInferShape(const PrimitivePtr &primitive, const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(primitive); + auto gelu_prim = primitive->cast(); + MS_EXCEPTION_IF_NULL(gelu_prim); + auto prim_name = gelu_prim->name(); + auto input_shape = CheckAndConvertUtils::ConvertShapePtrToShape("input_x", input_args[0]->BuildShape(), prim_name); + return std::make_shared(input_shape); +} + +TypePtr GeLUInferType(const PrimitivePtr &prim, const std::vector &input_args) { + for (const auto &item : input_args) { + MS_EXCEPTION_IF_NULL(item); + } + const std::set valid_types = {kNumberTypeFloat16, kNumberTypeFloat32}; + std::map types; + types.emplace("input_x", input_args[0]->BuildType()); + auto infer_type = CheckAndConvertUtils::CheckTensorTypeSame(types, valid_types, prim->name()); + return TypeIdToType(infer_type); +} +} // namespace +AbstractBasePtr GeLUInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + return std::make_shared(GeLUInferType(primitive, input_args), + GeLUInferShape(primitive, input_args)->shape()); +} + +REGISTER_PRIMITIVE_EVAL_IMPL(GeLU, prim::kPrimGeLU, GeLUInfer); +REGISTER_PRIMITIVE_C(kNameGeLU, GeLU); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/gelu.h b/mindspore/core/ops/gelu.h new file mode 100644 index 00000000000..b3ffa39a9e5 --- /dev/null +++ b/mindspore/core/ops/gelu.h @@ -0,0 +1,42 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_GELU_H_ +#define MINDSPORE_CORE_OPS_GELU_H_ +#include +#include + +#include "ops/primitive_c.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameGeLU = "GeLU"; +class GeLU : public PrimitiveC { + public: + GeLU() : PrimitiveC(kNameGeLU) {} + ~GeLU() = default; + MS_DECLARE_PARENT(GeLU, PrimitiveC); + void Init() {} +}; +AbstractBasePtr GeLUInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); +using PrimGeLUPtr = std::shared_ptr; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_GELU_H_ diff --git a/mindspore/core/ops/grad/activation_grad.cc b/mindspore/core/ops/grad/activation_grad.cc new file mode 100644 index 00000000000..961394ec6fa --- /dev/null +++ b/mindspore/core/ops/grad/activation_grad.cc @@ -0,0 +1,53 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops/grad/activation_grad.h" +#include +#include +#include +#include +#include +#include "ops/op_utils.h" +#include "utils/check_convert_utils.h" +#include "abstract/primitive_infer_map.h" + +namespace mindspore { +namespace ops { + +void ActivationGrad::Init(const ActivationType &type, const float alpha) { + this->set_type(type); + this->set_alpha(alpha); +} + +void ActivationGrad::set_type(const ActivationType &type) { + int64_t swi = type; + this->AddAttr(kActivationType, MakeValue(swi)); +} + +ActivationType ActivationGrad::get_type() const { + auto value_ptr = GetAttr(kActivationType); + return ActivationType(GetValue(value_ptr)); +} + +void ActivationGrad::set_alpha(const float alpha) { this->AddAttr(kAlpha, MakeValue(alpha)); } + +float ActivationGrad::get_alpha() const { + auto value_ptr = GetAttr(kAlpha); + return GetValue(value_ptr); +} +REGISTER_PRIMITIVE_C(kNameActivationGrad, ActivationGrad); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/grad/activation_grad.h b/mindspore/core/ops/grad/activation_grad.h new file mode 100644 index 00000000000..2fb73eba3e1 --- /dev/null +++ b/mindspore/core/ops/grad/activation_grad.h @@ -0,0 +1,45 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_ACTIVATION_GRAD_H_ +#define MINDSPORE_CORE_OPS_ACTIVATION_GRAD_H_ +#include +#include +#include +#include +#include "ops/primitive_c.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameActivationGrad = "ActivationGrad"; +class ActivationGrad : public PrimitiveC { + public: + ActivationGrad() : PrimitiveC(kNameActivationGrad) {} + ~ActivationGrad() = default; + MS_DECLARE_PARENT(ActivationGrad, PrimitiveC); + void Init(const ActivationType &type = NO_ACTIVATION, const float alpha = 0.2); + void set_type(const ActivationType &type); + void set_alpha(const float alpha); + + ActivationType get_type() const; + float get_alpha() const; +}; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_ACTIVATION_GRAD_H_ diff --git a/mindspore/core/ops/grad/add_grad.cc b/mindspore/core/ops/grad/add_grad.cc new file mode 100644 index 00000000000..32bc5ab10a5 --- /dev/null +++ b/mindspore/core/ops/grad/add_grad.cc @@ -0,0 +1,31 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops/grad/add_grad.h" +#include +#include +#include +#include +#include +#include "ops/op_utils.h" +#include "utils/check_convert_utils.h" +#include "abstract/primitive_infer_map.h" + +namespace mindspore { +namespace ops { +REGISTER_PRIMITIVE_C(kNameAddGrad, AddGrad); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/grad/add_grad.h b/mindspore/core/ops/grad/add_grad.h new file mode 100644 index 00000000000..b4d48fea529 --- /dev/null +++ b/mindspore/core/ops/grad/add_grad.h @@ -0,0 +1,40 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_ADD_GRAD_H_ +#define MINDSPORE_CORE_OPS_ADD_GRAD_H_ +#include +#include +#include +#include +#include "ops/primitive_c.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameAddGrad = "AddGrad"; +class AddGrad : public PrimitiveC { + public: + AddGrad() : PrimitiveC(kNameAddGrad) {} + ~AddGrad() = default; + MS_DECLARE_PARENT(AddGrad, PrimitiveC); + void Init() {} +}; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_ADD_GRAD_H_ diff --git a/mindspore/core/ops/grad/avg_pool_grad.cc b/mindspore/core/ops/grad/avg_pool_grad.cc new file mode 100644 index 00000000000..5752bd53789 --- /dev/null +++ b/mindspore/core/ops/grad/avg_pool_grad.cc @@ -0,0 +1,38 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops/grad/avg_pool_grad.h" +#include "ops/op_utils.h" + +namespace mindspore { +namespace ops { +AbstractBasePtr AvgPoolGradInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(primitive); + auto AvgPoolGrad_prim = primitive->cast(); + MS_EXCEPTION_IF_NULL(AvgPoolGrad_prim); + MS_EXCEPTION_IF_NULL(input_args[0]->BuildValue()); + auto origin_input_shape = GetValue>(input_args[0]->BuildValue()); + auto tensor_type = input_args[1]->BuildType()->cast(); + MS_EXCEPTION_IF_NULL(tensor_type); + auto element = tensor_type->element(); + return std::make_shared(element, origin_input_shape); +} + +REGISTER_PRIMITIVE_EVAL_IMPL(AvgPoolGrad, prim::kPrimAvgPoolGrad, AvgPoolGradInfer); +REGISTER_PRIMITIVE_C(kNameAvgPoolGrad, AvgPoolGrad); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/grad/avg_pool_grad.h b/mindspore/core/ops/grad/avg_pool_grad.h new file mode 100644 index 00000000000..c0e4c900010 --- /dev/null +++ b/mindspore/core/ops/grad/avg_pool_grad.h @@ -0,0 +1,45 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_AVG_POOL_GRAD_H_ +#define MINDSPORE_CORE_OPS_AVG_POOL_GRAD_H_ +#include +#include +#include +#include +#include "ops/grad/pool_grad.h" +#include "ops/primitive_c.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameAvgPoolGrad = "AvgPoolGrad"; +class AvgPoolGrad : public PoolGrad { + public: + AvgPoolGrad() : PoolGrad(kNameAvgPoolGrad) { InitIOName({"x_origin", "out_origin", "grad"}, {"output"}); } + ~AvgPoolGrad() = default; + MS_DECLARE_PARENT(AvgPoolGrad, PoolGrad); +}; + +AbstractBasePtr AvgPoolGradInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); +using PrimAvgPoolGradPtr = std::shared_ptr; + +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_AVG_POOL_GRAD_H_ diff --git a/mindspore/core/ops/grad/batch_norm_grad.cc b/mindspore/core/ops/grad/batch_norm_grad.cc new file mode 100644 index 00000000000..31cecb8f621 --- /dev/null +++ b/mindspore/core/ops/grad/batch_norm_grad.cc @@ -0,0 +1,73 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include "ops/grad/batch_norm_grad.h" +#include "utils/check_convert_utils.h" +#include "abstract/primitive_infer_map.h" +#include "ops/op_utils.h" + +namespace mindspore { +namespace ops { +void BatchNormGrad::Init(const bool is_training, const float epsilon) { + this->set_is_training(is_training); + this->set_epsilon(epsilon); +} + +void BatchNormGrad::set_epsilon(const float epsilon) { + // CheckAndConvertUtils::CheckInRange(kEpsilon, epsilon, kIncludeRight, {0, 1}, this->name()); + this->AddAttr(kEpsilon, MakeValue(epsilon)); +} + +float BatchNormGrad::get_epsilon() const { + auto value_ptr = this->GetAttr(kEpsilon); + return GetValue(value_ptr); +} + +void BatchNormGrad::set_is_training(const bool is_training) { this->AddAttr(kIsTraining, MakeValue(is_training)); } + +bool BatchNormGrad::get_is_training() const { + auto value_ptr = this->GetAttr(kIsTraining); + return GetValue(value_ptr); +} + +AbstractBasePtr BatchNormGradInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(primitive); + auto BatchNormGrad_prim = primitive->cast(); + MS_EXCEPTION_IF_NULL(BatchNormGrad_prim); + auto op_name = BatchNormGrad_prim->name(); + MS_EXCEPTION_IF_NULL(input_args[1]); + MS_EXCEPTION_IF_NULL(input_args[2]); + MS_EXCEPTION_IF_NULL(input_args[3]); + auto y_backprop_shape = + CheckAndConvertUtils::ConvertShapePtrToShape("y_backprop_shape", input_args[0]->BuildShape(), op_name); + auto x_shape = CheckAndConvertUtils::ConvertShapePtrToShape("x_shape", input_args[1]->BuildShape(), op_name); + CheckAndConvertUtils::Check("BatchNorm y_backprop_shape", y_backprop_shape, kEqual, "BatchNorm x_shape", x_shape); + + auto dx = input_args[1]->Broaden(); + auto dscale = input_args[2]->Broaden(); + auto reserve_1 = input_args[3]->Broaden(); + auto reserve_2 = input_args[4]->Broaden(); + + AbstractBasePtrList rets = {dx, dscale, dscale, reserve_1, reserve_2}; + return std::make_shared(rets); +} + +REGISTER_PRIMITIVE_EVAL_IMPL(BatchNormGrad, prim::kPrimBatchNormGrad, BatchNormGradInfer); +REGISTER_PRIMITIVE_C(kNameBatchNormGrad, BatchNormGrad); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/grad/batch_norm_grad.h b/mindspore/core/ops/grad/batch_norm_grad.h new file mode 100644 index 00000000000..dc0ed2cf8cd --- /dev/null +++ b/mindspore/core/ops/grad/batch_norm_grad.h @@ -0,0 +1,46 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_BATCH_NORM_GRAD_H_ +#define MINDSPORE_CORE_OPS_BATCH_NORM_GRAD_H_ +#include +#include +#include "ops/primitive_c.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameBatchNormGrad = "BatchNormGrad"; +class BatchNormGrad : public PrimitiveC { + public: + BatchNormGrad() : PrimitiveC(kNameBatchNormGrad) {} + ~BatchNormGrad() = default; + MS_DECLARE_PARENT(BatchNormGrad, PrimitiveC); + void Init(const bool is_training = false, const float epsilon = 1e-05); + void set_is_training(const bool is_training); + void set_epsilon(const float epsilon); + bool get_is_training() const; + float get_epsilon() const; +}; + +AbstractBasePtr BatchNormGradInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); +using PrimBatchNormGradPtr = std::shared_ptr; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_BATCH_NORM_GRAD_H_ diff --git a/mindspore/core/ops/grad/bias_grad.cc b/mindspore/core/ops/grad/bias_grad.cc new file mode 100644 index 00000000000..4b0253f1b55 --- /dev/null +++ b/mindspore/core/ops/grad/bias_grad.cc @@ -0,0 +1,52 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops/grad/bias_grad.h" +#include +#include +#include +#include +#include +#include "ops/op_utils.h" +#include "utils/check_convert_utils.h" +#include "abstract/primitive_infer_map.h" + +namespace mindspore { +namespace ops { +AbstractBasePtr BiasGradInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(primitive); + auto bias_prim = primitive->cast(); + MS_EXCEPTION_IF_NULL(bias_prim); + auto prim_name = bias_prim->name(); + CheckAndConvertUtils::CheckInteger("bias_grad_infer", input_args.size(), kEqual, 1, prim_name); + MS_EXCEPTION_IF_NULL(input_args[0]); + + // Infer shape + auto inshape = CheckAndConvertUtils::ConvertShapePtrToShape("inshape", input_args[0]->BuildShape(), prim_name); + for (size_t i = 0; i < inshape.size() - 1; i++) { + inshape[i] = 1; + } + + // Infer type + auto intype = input_args[0]->BuildType()->cast()->element(); + + return std::make_shared(intype, inshape); +} +REGISTER_PRIMITIVE_EVAL_IMPL(BiasGrad, prim::kPrimBiasGrad, BiasGradInfer); +REGISTER_PRIMITIVE_C(kNameBiasGrad, BiasGrad); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/c_ops/bias_grad.h b/mindspore/core/ops/grad/bias_grad.h similarity index 70% rename from mindspore/core/c_ops/bias_grad.h rename to mindspore/core/ops/grad/bias_grad.h index db6c284a036..6229e72bc31 100644 --- a/mindspore/core/c_ops/bias_grad.h +++ b/mindspore/core/ops/grad/bias_grad.h @@ -14,27 +14,30 @@ * limitations under the License. */ -#ifndef MINDSPORE_CORE_C_OPS_BIASGRAD_H_ -#define MINDSPORE_CORE_C_OPS_BIASGRAD_H_ +#ifndef MINDSPORE_CORE_OPS_BIAS_GRAD_H_ +#define MINDSPORE_CORE_OPS_BIAS_GRAD_H_ #include #include #include #include -#include "c_ops/primitive_c.h" +#include "ops/primitive_c.h" #include "abstract/abstract_value.h" #include "utils/check_convert_utils.h" namespace mindspore { +namespace ops { constexpr auto kNameBiasGrad = "BiasGrad"; class BiasGrad : public PrimitiveC { public: BiasGrad() : PrimitiveC(kNameBiasGrad) {} ~BiasGrad() = default; MS_DECLARE_PARENT(BiasGrad, PrimitiveC); - void Init(const std::vector &axis); - void set_axis(const std::vector &axis); - std::vector get_axis() const; + void Init(); }; +AbstractBasePtr BiasGradInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); +using PrimBiasGradPtr = std::shared_ptr; +} // namespace ops } // namespace mindspore -#endif // MINDSPORE_CORE_C_OPS_BIASGRAD_H_ +#endif // MINDSPORE_CORE_OPS_BIAS_GRAD_H_ diff --git a/mindspore/core/ops/grad/binary_cross_entropy_grad.cc b/mindspore/core/ops/grad/binary_cross_entropy_grad.cc new file mode 100644 index 00000000000..32c34370c61 --- /dev/null +++ b/mindspore/core/ops/grad/binary_cross_entropy_grad.cc @@ -0,0 +1,80 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include + +#include "ops/grad/binary_cross_entropy_grad.h" + +namespace mindspore { +namespace ops { +namespace { +abstract::ShapePtr BinaryCrossEntroyGradInferShape(const PrimitivePtr &primitive, + const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(primitive); + auto binary_cross_entropy_grad_prim = primitive->cast(); + MS_EXCEPTION_IF_NULL(binary_cross_entropy_grad_prim); + auto prim_name = binary_cross_entropy_grad_prim->name(); + auto x_shape = CheckAndConvertUtils::ConvertShapePtrToShape("x_shape", input_args[0]->BuildShape(), prim_name); + auto y_shape = CheckAndConvertUtils::ConvertShapePtrToShape("y_shape", input_args[1]->BuildShape(), prim_name); + auto weight_shape = + CheckAndConvertUtils::ConvertShapePtrToShape("weight_shape", input_args[2]->BuildShape(), prim_name); + CheckAndConvertUtils::Check("x shape", x_shape, kEqual, "y shape", y_shape, prim_name); + if (weight_shape.size() < 1) { + CheckAndConvertUtils::Check("y shape", y_shape, kEqual, "weight shape", weight_shape, prim_name); + } + return std::make_shared(x_shape); +} + +TypePtr BinaryCrossEntroyGradInferType(const PrimitivePtr &prim, const std::vector &input_args) { + for (const auto &item : input_args) { + MS_EXCEPTION_IF_NULL(item); + } + const std::set valid_types = {kNumberTypeFloat16, kNumberTypeFloat32}; + std::map types; + types.emplace("x_shape", input_args[0]->BuildType()); + types.emplace("y_shape", input_args[1]->BuildType()); + auto infer_type = CheckAndConvertUtils::CheckTensorTypeSame(types, valid_types, prim->name()); + if (input_args[3]->BuildType() != nullptr) { + types.emplace("x_shape", input_args[0]->BuildType()); + types.emplace("weight_shape", input_args[2]->BuildType()); + infer_type = CheckAndConvertUtils::CheckTensorTypeSame(types, valid_types, prim->name()); + } + return TypeIdToType(infer_type); +} +} // namespace +void BinaryCrossEntropyGrad::Init(const Reduction &reduction) { set_reduction(reduction); } + +void BinaryCrossEntropyGrad::set_reduction(const Reduction &reduction) { + int64_t swi = reduction; + this->AddAttr(kReduction, MakeValue(swi)); +} +Reduction BinaryCrossEntropyGrad::get_reduction() const { + auto value_ptr = GetAttr(kReduction); + return Reduction(GetValue(value_ptr)); +} + +AbstractBasePtr BinaryCrossEntropyGradInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + return std::make_shared(BinaryCrossEntroyGradInferType(primitive, input_args), + BinaryCrossEntroyGradInferShape(primitive, input_args)->shape()); +} + +REGISTER_PRIMITIVE_EVAL_IMPL(BinaryCrossEntropyGrad, prim::kPrimBinaryCrossEntropyGrad, BinaryCrossEntropyGradInfer); +REGISTER_PRIMITIVE_C(kNameBinaryCrossEntropyGrad, BinaryCrossEntropyGrad); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/c_ops/binary_cross_entropy_grad.h b/mindspore/core/ops/grad/binary_cross_entropy_grad.h similarity index 72% rename from mindspore/core/c_ops/binary_cross_entropy_grad.h rename to mindspore/core/ops/grad/binary_cross_entropy_grad.h index af35b0d3466..9e89b033ebf 100644 --- a/mindspore/core/c_ops/binary_cross_entropy_grad.h +++ b/mindspore/core/ops/grad/binary_cross_entropy_grad.h @@ -14,30 +14,31 @@ * limitations under the License. */ -#ifndef MINDSPORE_CORE_C_OPS_BINARY_CROSS_ENTROPY_GRAD_H_ -#define MINDSPORE_CORE_C_OPS_BINARY_CROSS_ENTROPY_GRAD_H_ +#ifndef MINDSPORE_CORE_OPS_BINARY_CROSS_ENTROPY_GRAD_H_ +#define MINDSPORE_CORE_OPS_BINARY_CROSS_ENTROPY_GRAD_H_ #include -#include #include -#include "c_ops/primitive_c.h" -#include "c_ops/op_utils.h" +#include "ops/primitive_c.h" +#include "ops/op_utils.h" #include "abstract/abstract_value.h" #include "utils/check_convert_utils.h" namespace mindspore { +namespace ops { constexpr auto kNameBinaryCrossEntropyGrad = "BinaryCrossEntropyGrad"; class BinaryCrossEntropyGrad : public PrimitiveC { public: BinaryCrossEntropyGrad() : PrimitiveC(kNameBinaryCrossEntropyGrad) {} ~BinaryCrossEntropyGrad() = default; MS_DECLARE_PARENT(BinaryCrossEntropyGrad, PrimitiveC); - void Init(const std::string &reduction = "mean"); - void set_reduction(const std::string &reduction); - std::string get_reduction() const; + void Init(const Reduction &reduction = MEAN); + void set_reduction(const Reduction &reduction); + Reduction get_reduction() const; }; AbstractBasePtr BinaryCrossEntropyGradInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, const std::vector &input_args); -using PrimBinaryCrossEntropyGrad = std::shared_ptr; +using PrimBinaryCrossEntropyGradPtr = std::shared_ptr; +} // namespace ops } // namespace mindspore -#endif // MINDSPORE_CORE_C_OPS_BINARY_CROSS_ENTROPY_GRAD_H_ +#endif // MINDSPORE_CORE_OPS_BINARY_CROSS_ENTROPY_GRAD_H_ diff --git a/mindspore/core/ops/grad/bn_grad.cc b/mindspore/core/ops/grad/bn_grad.cc new file mode 100644 index 00000000000..b31562bdd42 --- /dev/null +++ b/mindspore/core/ops/grad/bn_grad.cc @@ -0,0 +1,45 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops/grad/bn_grad.h" +#include "utils/check_convert_utils.h" +#include "abstract/primitive_infer_map.h" +#include "ops/op_utils.h" + +namespace mindspore { +namespace ops { +void BNGrad::Init(const float eps, const float momentum) { + this->set_eps(eps); + this->set_momentum(momentum); +} + +void BNGrad::set_eps(const float eps) { this->AddAttr(kEps, MakeValue(eps)); } + +float BNGrad::get_eps() const { + auto value_ptr = this->GetAttr(kEps); + return GetValue(value_ptr); +} + +void BNGrad::set_momentum(const float momentum) { this->AddAttr(kMomentum, MakeValue(momentum)); } + +float BNGrad::get_momentum() const { + auto value_ptr = this->GetAttr(kMomentum); + return GetValue(value_ptr); +} + +REGISTER_PRIMITIVE_C(kNameBNGrad, BNGrad); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/grad/bn_grad.h b/mindspore/core/ops/grad/bn_grad.h new file mode 100644 index 00000000000..c8473590bee --- /dev/null +++ b/mindspore/core/ops/grad/bn_grad.h @@ -0,0 +1,41 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_BN_GRAD_H_ +#define MINDSPORE_CORE_OPS_BN_GRAD_H_ +#include +#include "ops/primitive_c.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameBNGrad = "BNGrad"; +class BNGrad : public PrimitiveC { + public: + BNGrad() : PrimitiveC(kNameBNGrad) {} + ~BNGrad() = default; + MS_DECLARE_PARENT(BNGrad, PrimitiveC); + void Init(const float eps, const float momentum); + void set_eps(const float eps); + void set_momentum(const float momentum); + float get_eps() const; + float get_momentum() const; +}; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_BN_GRAD_H_ diff --git a/mindspore/core/ops/grad/conv2d_backprop_filter.cc b/mindspore/core/ops/grad/conv2d_backprop_filter.cc new file mode 100644 index 00000000000..7c51ade8b40 --- /dev/null +++ b/mindspore/core/ops/grad/conv2d_backprop_filter.cc @@ -0,0 +1,157 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include + +#include "ops/grad/conv2d_backprop_filter.h" +#include "ops/op_utils.h" + +namespace mindspore { +namespace ops { +namespace { +abstract::ShapePtr Conv2DBackpropFilterInferShape(const PrimitivePtr &primitive, + const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(primitive); + auto conv2d_backprop_filter_prim = primitive->cast(); + MS_EXCEPTION_IF_NULL(conv2d_backprop_filter_prim); + // auto prim_name = conv2d_backprop_filter_prim->name(); + + auto out_put = input_args[2]->BuildValue(); + auto infer_shape = GetValue>(out_put); + return std::make_shared(infer_shape); +} + +TypePtr Conv2DBackpropFilterInferType(const PrimitivePtr &prim, const std::vector &input_args) { + for (const auto &item : input_args) { + MS_EXCEPTION_IF_NULL(item); + } + const std::set valid_types = {kNumberTypeInt8, kNumberTypeInt32, kNumberTypeFloat16, kNumberTypeFloat32}; + std::map types; + types.emplace("drotput", input_args[0]->BuildType()); + types.emplace("input_x", input_args[1]->BuildType()); + auto infer_type = CheckAndConvertUtils::CheckTensorTypeSame(types, valid_types, prim->name()); + return TypeIdToType(infer_type); +} +} // namespace + +void Conv2DBackpropFilter::Init(const int64_t out_channel, const std::vector &kernel_size, + const PadMode &pad_mode, const std::vector &pad_list, const int64_t mode, + const std::vector &stride, const std::vector &dilation, + const int64_t group, const Format &format) { + set_out_channel(out_channel); + set_kernel_size(kernel_size); + set_pad_mode(pad_mode); + set_pad_list(pad_list); + set_mode(mode); + if (stride.size() == 4) { + set_stride({stride[2], stride[3]}); + } else { + set_stride(stride); + } + set_dilation(dilation); + set_group(group); + set_format(format); +} + +void Conv2DBackpropFilter::set_out_channel(const int64_t out_channel) { + this->AddAttr(kOutChannel, MakeValue(out_channel)); +} + +int64_t Conv2DBackpropFilter::get_out_channel() const { + auto value_ptr = GetAttr(kOutChannel); + return GetValue(value_ptr); +} + +void Conv2DBackpropFilter::set_kernel_size(const std::vector &kernel_size) { + this->AddAttr(kKernelSize, MakeValue(kernel_size)); +} + +std::vector Conv2DBackpropFilter::get_kernel_size() const { + auto value_ptr = GetAttr(kKernelSize); + return GetValue>(value_ptr); +} + +void Conv2DBackpropFilter::set_pad_mode(const PadMode &pad_mode) { + int64_t swi = pad_mode; + this->AddAttr(kPadMode, MakeValue(swi)); +} + +PadMode Conv2DBackpropFilter::get_pad_mode() const { + auto value_ptr = GetAttr(kPadMode); + return PadMode(GetValue(value_ptr)); +} + +void Conv2DBackpropFilter::set_pad_list(const std::vector &pad_list) { + this->AddAttr(kPadList, MakeValue(pad_list)); +} + +std::vector Conv2DBackpropFilter::get_pad_list() const { + auto value_ptr = GetAttr(kPadList); + return GetValue>(value_ptr); +} + +void Conv2DBackpropFilter::set_mode(const int64_t mode) { this->AddAttr(kMode, MakeValue(mode)); } + +int64_t Conv2DBackpropFilter::get_mode() const { + auto value_ptr = GetAttr(kMode); + return GetValue(value_ptr); +} + +void Conv2DBackpropFilter::set_stride(const std::vector &stride) { this->AddAttr(kStride, MakeValue(stride)); } + +std::vector Conv2DBackpropFilter::get_stride() const { + auto value_ptr = GetAttr(kStride); + return GetValue>(value_ptr); +} + +void Conv2DBackpropFilter::set_dilation(const std::vector &dilation) { + this->AddAttr(kDilation, MakeValue(dilation)); +} + +std::vector Conv2DBackpropFilter::get_dilation() const { + auto value_ptr = GetAttr(kDilation); + return GetValue>(value_ptr); +} + +void Conv2DBackpropFilter::set_group(const int64_t group) { this->AddAttr(kGroup, MakeValue(group)); } + +int64_t Conv2DBackpropFilter::get_group() const { + auto value_ptr = GetAttr(kGroup); + return GetValue(value_ptr); +} + +void Conv2DBackpropFilter::set_format(const Format &format) { + int64_t swi = format; + this->AddAttr(kFormat, MakeValue(swi)); +} + +Format Conv2DBackpropFilter::get_format() const { + auto value_ptr = GetAttr(kFormat); + return Format(GetValue(value_ptr)); +} + +AbstractBasePtr Conv2DBackpropFilterInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + return std::make_shared(Conv2DBackpropFilterInferType(primitive, input_args), + Conv2DBackpropFilterInferShape(primitive, input_args)->shape()); +} + +REGISTER_PRIMITIVE_EVAL_IMPL(Conv2DBackpropFilter, prim::kPrimConv2DBackpropFilter, Conv2DBackpropFilterInfer); +REGISTER_PRIMITIVE_C(kNameConv2DBackpropFilter, Conv2DBackpropFilter); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/grad/conv2d_backprop_filter.h b/mindspore/core/ops/grad/conv2d_backprop_filter.h new file mode 100644 index 00000000000..d67ef99e597 --- /dev/null +++ b/mindspore/core/ops/grad/conv2d_backprop_filter.h @@ -0,0 +1,73 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_CONV2D_BACKPROP_FILTER_H_ +#define MINDSPORE_CORE_OPS_CONV2D_BACKPROP_FILTER_H_ +#include +#include +#include + +#include "ops/primitive_c.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameConv2DBackpropFilter = "Conv2DBackpropFilter"; +class Conv2DBackpropFilter : public PrimitiveC { + public: + Conv2DBackpropFilter() : PrimitiveC(kNameConv2DBackpropFilter) { + InitIOName({"out_backprop", "input", "filter_sizes"}, {"output"}); + } + explicit Conv2DBackpropFilter(const std::string k_name) : PrimitiveC(k_name) { + InitIOName({"out_backprop", "input", "filter_sizes"}, {"output"}); + } + ~Conv2DBackpropFilter() = default; + MS_DECLARE_PARENT(Conv2DBackpropFilter, PrimitiveC); + void Init(const int64_t out_channel, const std::vector &kernel_size, const PadMode &pad_mode = VALID, + const std::vector &pad_list = {0, 0, 0, 0}, const int64_t mode = 1, + const std::vector &stride = {1, 1}, const std::vector &dilation = {1, 1, 1, 1}, + const int64_t group = 1, const Format &format = NCHW); + void set_out_channel(const int64_t out_channel); + void set_kernel_size(const std::vector &kernel_size); + void set_pad_mode(const PadMode &pad_mode); + void set_pad_list(const std::vector &pad_list); + void set_mode(const int64_t mode); + void set_stride(const std::vector &stride); + void set_dilation(const std::vector &dilation); + void set_group(const int64_t group); + void set_format(const Format &format); + // kernel_size(h, w) + // stride(h, w) + // pad_list(up, down, left, right) + + int64_t get_out_channel() const; + std::vector get_kernel_size() const; + PadMode get_pad_mode() const; + std::vector get_pad_list() const; + int64_t get_mode() const; + std::vector get_stride() const; + std::vector get_dilation() const; + int64_t get_group() const; + Format get_format() const; +}; +AbstractBasePtr Conv2DBackpropFilterInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); +using PrimConv2DBackpropFilterPtr = std::shared_ptr; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_Conv2DBackpropFilter_H_ diff --git a/mindspore/core/ops/grad/conv2d_backprop_input.cc b/mindspore/core/ops/grad/conv2d_backprop_input.cc new file mode 100644 index 00000000000..6c2eeee8f47 --- /dev/null +++ b/mindspore/core/ops/grad/conv2d_backprop_input.cc @@ -0,0 +1,145 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include + +#include "ops/grad/conv2d_backprop_input.h" + +namespace mindspore { +namespace ops { +void Conv2DBackpropInput::Init(int64_t out_channel, const std::vector &kernel_size, int64_t mode, + const PadMode &pad_mode, const std::vector &pad, + const std::vector &stride, const std::vector &dilation, int64_t group, + const Format &format, const std::vector &pad_list) { + set_out_channel(out_channel); + set_kernel_size(kernel_size); + set_mode(mode); + set_pad_mode(pad_mode); + set_pad(pad); + set_stride(stride); + set_dilation(dilation); + set_group(group); + set_format(format); + set_pad_list(pad_list); +} + +void Conv2DBackpropInput::set_out_channel(int64_t out_channel) { + AddAttr(kOutChannel, + MakeValue(CheckAndConvertUtils::CheckInteger(kOutChannel, out_channel, kGreaterThan, 0, name()))); +} + +void Conv2DBackpropInput::set_kernel_size(const std::vector &kernel_size) { + AddAttr(kKernelSize, MakeValue(CheckAndConvertUtils::CheckPositiveVector(kKernelSize, kernel_size, name()))); +} + +void Conv2DBackpropInput::set_stride(const std::vector &stride) { + AddAttr(kStride, MakeValue(CheckAndConvertUtils::CheckPositiveVector(kStride, stride, name(), true, true))); +} + +void Conv2DBackpropInput::set_dilation(const std::vector &dilation) { + AddAttr(kDilation, MakeValue(CheckAndConvertUtils::CheckPositiveVector(kDilation, dilation, name(), true, true))); +} + +void Conv2DBackpropInput::set_pad_mode(const PadMode &pad_mode) { + std::vector pad = get_pad(); + if (pad_mode == PAD) { + for (auto item : pad) { + CheckAndConvertUtils::Check(kPadItem, item, kGreaterEqual, "zeros_list", 0, name()); + } + } else { + CheckAndConvertUtils::Check(kPad, pad, kEqual, "zeros_list", {0, 0, 0, 0}, name()); + } + int64_t swi = pad_mode; + AddAttr(kPadMode, MakeValue(swi)); +} + +void Conv2DBackpropInput::set_pad(const std::vector &pad) { + CheckAndConvertUtils::CheckInteger("pad_size", pad.size(), kEqual, 4, name()); + AddAttr(kPad, MakeValue(CheckAndConvertUtils::CheckPositiveVector(kPad, pad, name(), true, true))); +} + +void Conv2DBackpropInput::set_mode(int64_t mode) { + AddAttr(kMode, MakeValue(CheckAndConvertUtils::CheckInteger(kMode, mode, kEqual, 1, name()))); +} + +void Conv2DBackpropInput::set_group(int64_t group) { + AddAttr(kGroup, MakeValue(CheckAndConvertUtils::CheckInteger(kGroup, group, kGreaterThan, 0, name()))); +} + +void Conv2DBackpropInput::set_format(const Format &format) { + int64_t f = format; + AddAttr(kFormat, MakeValue(f)); +} + +void Conv2DBackpropInput::set_pad_list(const std::vector &pad_list) { + this->AddAttr(kPadList, MakeValue(pad_list)); +} + +int64_t Conv2DBackpropInput::get_out_channel() const { + auto value_ptr = GetAttr(kOutChannel); + return GetValue(value_ptr); +} + +std::vector Conv2DBackpropInput::get_kernel_size() const { + auto value_ptr = GetAttr(kKernelSize); + return GetValue>(value_ptr); +} + +std::vector Conv2DBackpropInput::get_stride() const { + auto value_ptr = GetAttr(kStride); + return GetValue>(value_ptr); +} + +std::vector Conv2DBackpropInput::get_dilation() const { + auto value_ptr = GetAttr(kDilation); + return GetValue>(value_ptr); +} + +PadMode Conv2DBackpropInput::get_pad_mode() const { + auto value_ptr = GetAttr(kPadMode); + return PadMode(GetValue(value_ptr)); +} + +std::vector Conv2DBackpropInput::get_pad() const { + auto value_ptr = GetAttr(kPad); + return GetValue>(value_ptr); +} + +int64_t Conv2DBackpropInput::get_mode() const { + auto value_ptr = GetAttr(kMode); + return GetValue(value_ptr); +} + +int64_t Conv2DBackpropInput::get_group() const { + auto value_ptr = GetAttr(kGroup); + return GetValue(value_ptr); +} + +Format Conv2DBackpropInput::get_format() const { + auto value_ptr = GetAttr(kFormat); + return Format(GetValue(value_ptr)); +} + +std::vector Conv2DBackpropInput::get_pad_list() const { + auto value_ptr = GetAttr(kPadList); + return GetValue>(value_ptr); +} +REGISTER_PRIMITIVE_C(kNameConv2DBackpropInput, Conv2DBackpropInput); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/grad/conv2d_backprop_input.h b/mindspore/core/ops/grad/conv2d_backprop_input.h new file mode 100644 index 00000000000..aeb9e67a7ee --- /dev/null +++ b/mindspore/core/ops/grad/conv2d_backprop_input.h @@ -0,0 +1,66 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_CONV2D_BACKPROP_INPUT_H_ +#define MINDSPORE_CORE_OPS_CONV2D_BACKPROP_INPUT_H_ +#include +#include +#include +#include + +#include "ops/op_utils.h" +#include "ops/primitive_c.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" +namespace mindspore { +namespace ops { +constexpr auto kNameConv2DBackpropInput = "Conv2DBackpropInput"; +class Conv2DBackpropInput : public PrimitiveC { + public: + explicit Conv2DBackpropInput(const std::string &k_name = kNameConv2DBackpropInput) : PrimitiveC(k_name) { + InitIOName({"out_backprop", "filter", "input_sizes"}, {"output"}); + } + ~Conv2DBackpropInput() = default; + MS_DECLARE_PARENT(Conv2DBackpropInput, PrimitiveC); + void Init(int64_t out_channel, const std::vector &kernel_size, int64_t mode = 1, + const PadMode &pad_mode = VALID, const std::vector &pad = {0, 0, 0, 0}, + const std::vector &stride = {1, 1, 1, 1}, const std::vector &dilation = {1, 1, 1, 1}, + int64_t group = 1, const Format &format = NCHW, const std::vector &pad_list = {0, 0, 0, 0}); + void set_kernel_size(const std::vector &kernel_size); + void set_stride(const std::vector &stride); + void set_dilation(const std::vector &dilation); + void set_pad_mode(const PadMode &pad_mode); + void set_pad(const std::vector &pad); + void set_mode(int64_t mode); + void set_group(int64_t group); + void set_out_channel(int64_t out_channel); + void set_format(const Format &format); + void set_pad_list(const std::vector &pad_list); + std::vector get_kernel_size() const; + std::vector get_stride() const; + std::vector get_dilation() const; + PadMode get_pad_mode() const; + std::vector get_pad() const; + int64_t get_mode() const; + int64_t get_group() const; + int64_t get_out_channel() const; + Format get_format() const; + std::vector get_pad_list() const; +}; +using PrimConv2DBackpropInputPtr = std::shared_ptr; +} // namespace ops +} // namespace mindspore +#endif // MINDSPORE_CORE_OPS_CONV2D_BACKPROP_INPUT_H_ diff --git a/mindspore/core/ops/grad/de_conv2d_grad_filter.cc b/mindspore/core/ops/grad/de_conv2d_grad_filter.cc new file mode 100644 index 00000000000..0e336c98abc --- /dev/null +++ b/mindspore/core/ops/grad/de_conv2d_grad_filter.cc @@ -0,0 +1,138 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + +#include "ops/grad/de_conv2d_grad_filter.h" +#include "ops/op_utils.h" + +namespace mindspore { +namespace ops { +void DeConv2DGradFilter::Init(const int64_t in_channel, const int64_t out_channel, + const std::vector &kernel_size, const PadMode &pad_mode, + const std::vector &pad_list, const std::vector &stride, + const std::vector &dilation, const int64_t group, const Format &format, + const ActivationType &activation_type, const bool has_bias) { + set_in_channel(in_channel); + set_out_channel(out_channel); + set_kernel_size(kernel_size); + set_pad_mode(pad_mode); + set_pad_list(pad_list); + set_stride(stride); + set_dilation(dilation); + set_group(group); + set_format(format); + set_activation_type(activation_type); + set_has_bias(has_bias); +} + +void DeConv2DGradFilter::set_in_channel(const int64_t in_channel) { this->AddAttr(kInChannel, MakeValue(in_channel)); } + +int64_t DeConv2DGradFilter::get_in_channel() const { + auto value_ptr = GetAttr(kInChannel); + return GetValue(value_ptr); +} + +void DeConv2DGradFilter::set_out_channel(const int64_t out_channel) { + this->AddAttr(kOutChannel, MakeValue(out_channel)); +} + +int64_t DeConv2DGradFilter::get_out_channel() const { + auto value_ptr = GetAttr(kOutChannel); + return GetValue(value_ptr); +} + +void DeConv2DGradFilter::set_kernel_size(const std::vector &kernel_size) { + this->AddAttr(kKernelSize, MakeValue(kernel_size)); +} + +std::vector DeConv2DGradFilter::get_kernel_size() const { + auto value_ptr = GetAttr(kKernelSize); + return GetValue>(value_ptr); +} + +void DeConv2DGradFilter::set_pad_mode(const PadMode &pad_mode) { + int64_t swi = pad_mode; + this->AddAttr(kPadMode, MakeValue(swi)); +} + +PadMode DeConv2DGradFilter::get_pad_mode() const { + auto value_ptr = GetAttr(kPadMode); + return PadMode(GetValue(value_ptr)); +} + +void DeConv2DGradFilter::set_pad_list(const std::vector &pad_list) { + this->AddAttr(kPadList, MakeValue(pad_list)); +} + +std::vector DeConv2DGradFilter::get_pad_list() const { + auto value_ptr = GetAttr(kPadList); + return GetValue>(value_ptr); +} + +void DeConv2DGradFilter::set_stride(const std::vector &stride) { this->AddAttr(kStride, MakeValue(stride)); } + +std::vector DeConv2DGradFilter::get_stride() const { + auto value_ptr = GetAttr(kStride); + return GetValue>(value_ptr); +} + +void DeConv2DGradFilter::set_dilation(const std::vector &dilation) { + this->AddAttr(kDilation, MakeValue(dilation)); +} + +std::vector DeConv2DGradFilter::get_dilation() const { + auto value_ptr = GetAttr(kDilation); + return GetValue>(value_ptr); +} + +void DeConv2DGradFilter::set_group(const int64_t group) { this->AddAttr(kGroup, MakeValue(group)); } + +int64_t DeConv2DGradFilter::get_group() const { + auto value_ptr = GetAttr(kGroup); + return GetValue(value_ptr); +} + +void DeConv2DGradFilter::set_format(const Format &format) { + int64_t swi = format; + this->AddAttr(kFormat, MakeValue(swi)); +} + +Format DeConv2DGradFilter::get_format() const { + auto value_ptr = GetAttr(kFormat); + return Format(GetValue(value_ptr)); +} + +void DeConv2DGradFilter::set_activation_type(const ActivationType &activation_type) { + int64_t swi = activation_type; + this->AddAttr(kActivationType, MakeValue(swi)); +} + +ActivationType DeConv2DGradFilter::get_activation_type() const { + auto value_ptr = GetAttr(kActivationType); + return ActivationType(GetValue(value_ptr)); +} + +void DeConv2DGradFilter::set_has_bias(const bool has_bias) { this->AddAttr(kHasBias, MakeValue(has_bias)); } + +bool DeConv2DGradFilter::get_has_bias() const { + auto value_ptr = GetAttr(kHasBias); + return GetValue(value_ptr); +} + +REGISTER_PRIMITIVE_C(kNameDeConv2DGradFilter, DeConv2DGradFilter); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/grad/de_conv2d_grad_filter.h b/mindspore/core/ops/grad/de_conv2d_grad_filter.h new file mode 100644 index 00000000000..c9561638276 --- /dev/null +++ b/mindspore/core/ops/grad/de_conv2d_grad_filter.h @@ -0,0 +1,67 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_DE_CONV2D_GRAD_FILTER_H_ +#define MINDSPORE_CORE_OPS_DE_CONV2D_GRAD_FILTER_H_ +#include + +#include "ops/primitive_c.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameDeConv2DGradFilter = "DeConv2DGradFilter"; +class DeConv2DGradFilter : public PrimitiveC { + public: + DeConv2DGradFilter() : PrimitiveC(kNameDeConv2DGradFilter) {} + ~DeConv2DGradFilter() = default; + MS_DECLARE_PARENT(DeConv2DGradFilter, PrimitiveC); + void Init(const int64_t in_channel, const int64_t out_channel, const std::vector &kernel_size, + const PadMode &pad_mode, const std::vector &pad_list, const std::vector &stride, + const std::vector &dilation, const int64_t group, const Format &format = NCHW, + const ActivationType &activation_type = NO_ACTIVATION, const bool has_bias = false); + void set_in_channel(const int64_t in_channel); + void set_out_channel(const int64_t out_channel); + void set_kernel_size(const std::vector &kernel_size); + void set_pad_mode(const PadMode &pad_mode); + void set_pad_list(const std::vector &pad_list); + void set_stride(const std::vector &stride); + void set_dilation(const std::vector &dilation); + void set_group(const int64_t group); + void set_format(const Format &format); + void set_activation_type(const ActivationType &activation_type); + void set_has_bias(const bool has_bias); + // kernel_size(h, w) + // stride(h, w) + // pad_list(up, down, left, right) + + int64_t get_in_channel() const; + int64_t get_out_channel() const; + std::vector get_kernel_size() const; + PadMode get_pad_mode() const; + std::vector get_pad_list() const; + std::vector get_stride() const; + std::vector get_dilation() const; + int64_t get_group() const; + Format get_format() const; + ActivationType get_activation_type() const; + bool get_has_bias() const; +}; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_DE_CONV2D_GRAD_FILTER_H_ diff --git a/mindspore/core/c_ops/gather_nd.cc b/mindspore/core/ops/grad/div_grad.cc similarity index 83% rename from mindspore/core/c_ops/gather_nd.cc rename to mindspore/core/ops/grad/div_grad.cc index a017eed5768..ea719392bb4 100644 --- a/mindspore/core/c_ops/gather_nd.cc +++ b/mindspore/core/ops/grad/div_grad.cc @@ -14,9 +14,12 @@ * limitations under the License. */ -#include "c_ops/gather_nd.h" +#include "ops/grad/div_grad.h" +#include "ops/op_utils.h" #include "utils/check_convert_utils.h" namespace mindspore { -REGISTER_PRIMITIVE_C(kNameGatherNd, GatherNd); +namespace ops { +REGISTER_PRIMITIVE_C(kNameDivGrad, DivGrad); +} // namespace ops } // namespace mindspore diff --git a/mindspore/core/ops/grad/div_grad.h b/mindspore/core/ops/grad/div_grad.h new file mode 100644 index 00000000000..1daba389fa8 --- /dev/null +++ b/mindspore/core/ops/grad/div_grad.h @@ -0,0 +1,36 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_DIV_GRAD_H_ +#define MINDSPORE_CORE_OPS_DIV_GRAD_H_ +#include "ops/primitive_c.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameDivGrad = "DivGrad"; +class DivGrad : public PrimitiveC { + public: + DivGrad() : PrimitiveC(kNameDivGrad) {} + ~DivGrad() = default; + MS_DECLARE_PARENT(DivGrad, PrimitiveC); + void Init() {} +}; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_DIV_GRAD_H_ diff --git a/mindspore/core/ops/grad/dropout_grad.cc b/mindspore/core/ops/grad/dropout_grad.cc new file mode 100644 index 00000000000..41e626769da --- /dev/null +++ b/mindspore/core/ops/grad/dropout_grad.cc @@ -0,0 +1,70 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops/grad/dropout_grad.h" +#include "ops/op_utils.h" + +namespace mindspore { +namespace ops { +void DropoutGrad::Init(const float keep_prob) { this->set_keep_prob(keep_prob); } + +void DropoutGrad::set_keep_prob(const float keep_prob) { + CheckAndConvertUtils::CheckInRange(kKeepProb, keep_prob, kIncludeRight, {0.0, 1.0}, this->name()); + this->AddAttr(kKeepProb, MakeValue(keep_prob)); +} + +float DropoutGrad::get_keep_prob() const { + auto value_ptr = GetAttr(kKeepProb); + return GetValue(value_ptr); +} + +namespace { +abstract::ShapePtr DropoutGradInferShape(const PrimitivePtr &primitive, + const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(primitive); + auto DropoutGrad_prim = primitive->cast(); + MS_EXCEPTION_IF_NULL(DropoutGrad_prim); + auto op_name = DropoutGrad_prim->name(); + auto in_shape = CheckAndConvertUtils::ConvertShapePtrToShape("input_shape", input_args[0]->BuildShape(), op_name); + return std::make_shared(in_shape); +} + +TypePtr DropoutGradInferType(const PrimitivePtr &prim, const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(prim); + auto DropoutGrad_prim = prim->cast(); + MS_EXCEPTION_IF_NULL(DropoutGrad_prim); + auto op_name = DropoutGrad_prim->name(); + auto mask_dtype = input_args[1]->BuildType(); + auto dy_dtype = input_args[0]->BuildType(); + CheckAndConvertUtils::CheckSubClass("mask", mask_dtype, {TypeIdToType(kObjectTypeTensorType)}, op_name); + CheckAndConvertUtils::CheckTensorTypeValid("dy", dy_dtype, {kNumberTypeFloat16, kNumberTypeFloat32}, op_name); + auto tensor_type = dy_dtype->cast(); + MS_EXCEPTION_IF_NULL(tensor_type); + auto data_type = tensor_type->element(); + return data_type; +} +} // namespace + +AbstractBasePtr DropoutGradInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + return std::make_shared(DropoutGradInferType(primitive, input_args), + DropoutGradInferShape(primitive, input_args)->shape()); +} + +REGISTER_PRIMITIVE_EVAL_IMPL(DropoutGrad, prim::kPrimDropoutGrad, DropoutGradInfer); +REGISTER_PRIMITIVE_C(kNameDropoutGrad, DropoutGrad); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/grad/dropout_grad.h b/mindspore/core/ops/grad/dropout_grad.h new file mode 100644 index 00000000000..89f0b0d16e8 --- /dev/null +++ b/mindspore/core/ops/grad/dropout_grad.h @@ -0,0 +1,45 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_DROPOUT_GRAD_H_ +#define MINDSPORE_CORE_OPS_DROPOUT_GRAD_H_ +#include +#include + +#include "ops/primitive_c.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameDropoutGrad = "DropoutGrad"; +class DropoutGrad : public PrimitiveC { + public: + DropoutGrad() : PrimitiveC(kNameDropoutGrad) {} + ~DropoutGrad() = default; + MS_DECLARE_PARENT(DropoutGrad, PrimitiveC); + void Init(const float keep_prob = 0.5); + void set_keep_prob(const float keep_prob); + float get_keep_prob() const; +}; + +AbstractBasePtr DropoutGradInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); +using PrimDropoutGradPtr = std::shared_ptr; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_DROPOUT_GRAD_H_ diff --git a/mindspore/core/ops/grad/flatten_grad.cc b/mindspore/core/ops/grad/flatten_grad.cc new file mode 100644 index 00000000000..05e3d427613 --- /dev/null +++ b/mindspore/core/ops/grad/flatten_grad.cc @@ -0,0 +1,41 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops/grad/flatten_grad.h" + +namespace mindspore { +namespace ops { +AbstractBasePtr FlattenGradInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(primitive); + auto prim_name = primitive->name(); + CheckAndConvertUtils::CheckInteger("input number", input_args.size(), kEqual, 2, prim_name); + for (const auto &item : input_args) { + MS_EXCEPTION_IF_NULL(item); + } + auto input_x = input_args[0]->cast(); + MS_EXCEPTION_IF_NULL(input_x); + auto input_shape = input_args[1]->cast(); + MS_EXCEPTION_IF_NULL(input_shape); + auto out_shape = GetValue>(input_shape->BuildValue()); + auto ret = input_x->Broaden(); + ret->set_shape(std::make_shared(out_shape)); + return ret; +} +REGISTER_PRIMITIVE_EVAL_IMPL(FlattenGrad, prim::kPrimFlattenGrad, FlattenGradInfer); +REGISTER_PRIMITIVE_C(kNameFlattenGrad, FlattenGrad); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/c_ops/flatten_grad.h b/mindspore/core/ops/grad/flatten_grad.h similarity index 85% rename from mindspore/core/c_ops/flatten_grad.h rename to mindspore/core/ops/grad/flatten_grad.h index eb45bde44a9..fd791711b39 100644 --- a/mindspore/core/c_ops/flatten_grad.h +++ b/mindspore/core/ops/grad/flatten_grad.h @@ -14,17 +14,18 @@ * limitations under the License. */ -#ifndef MINDSPORE_CORE_C_OPS_FLATTEN_GRAD_H_ -#define MINDSPORE_CORE_C_OPS_FLATTEN_GRAD_H_ +#ifndef MINDSPORE_CORE_OPS_FLATTEN_GRAD_H_ +#define MINDSPORE_CORE_OPS_FLATTEN_GRAD_H_ #include #include -#include "c_ops/primitive_c.h" -#include "c_ops/op_utils.h" +#include "ops/primitive_c.h" +#include "ops/op_utils.h" #include "abstract/abstract_value.h" #include "utils/check_convert_utils.h" namespace mindspore { +namespace ops { constexpr auto kNameFlattenGrad = "FlattenGrad"; class FlattenGrad : public PrimitiveC { public: @@ -36,5 +37,6 @@ class FlattenGrad : public PrimitiveC { AbstractBasePtr FlattenGradInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, const std::vector &input_args); using PrimFlattenGrad = std::shared_ptr; +} // namespace ops } // namespace mindspore -#endif // MINDSPORE_CORE_C_OPS_FlattenGrad_H_ +#endif // MINDSPORE_CORE_OPS_FlattenGrad_H_ diff --git a/mindspore/core/ops/grad/group_conv2d_grad_input.cc b/mindspore/core/ops/grad/group_conv2d_grad_input.cc new file mode 100644 index 00000000000..ad5d2848d97 --- /dev/null +++ b/mindspore/core/ops/grad/group_conv2d_grad_input.cc @@ -0,0 +1,167 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include "ops/grad/group_conv2d_grad_input.h" +#include "ops/op_utils.h" + +namespace mindspore { +namespace ops { +void GroupConv2DGradInput::Init(const int64_t &in_channel, const int64_t &out_channel, + const std::vector &kernel_size, const PadMode &pad_mode, + const std::vector &pad_list, const std::vector &stride, + const std::vector &dilation, const int64_t &group, + const std::vector &input_shape, const Format &format, + const ActivationType &activation_type, const bool has_bias) { + set_in_channel(in_channel); + set_out_channel(out_channel); + set_kernel_size(kernel_size); + set_pad_mode(pad_mode); + set_pad_list(pad_list); + set_stride(stride); + set_dilation(dilation); + set_group(group); + set_input_shape(input_shape); + set_format(format); + set_activation_type(activation_type); + set_has_bias(has_bias); +} + +void GroupConv2DGradInput::set_in_channel(const int64_t &in_channel) { + this->AddAttr(kInChannel, MakeValue(in_channel)); +} + +int64_t GroupConv2DGradInput::get_in_channel() const { + auto value_ptr = GetAttr(kInChannel); + return GetValue(value_ptr); +} + +void GroupConv2DGradInput::set_out_channel(const int64_t &out_channel) { + this->AddAttr(kOutChannel, MakeValue(out_channel)); +} + +int64_t GroupConv2DGradInput::get_out_channel() const { + auto value_ptr = GetAttr(kOutChannel); + return GetValue(value_ptr); +} + +void GroupConv2DGradInput::set_kernel_size(const std::vector &kernel_size) { + this->AddAttr(kKernelSize, MakeValue(kernel_size)); +} + +std::vector GroupConv2DGradInput::get_kernel_size() const { + auto value_ptr = GetAttr(kKernelSize); + return GetValue>(value_ptr); +} + +void GroupConv2DGradInput::set_pad_mode(const PadMode &pad_mode) { + int64_t swi = pad_mode; + this->AddAttr(kPadMode, MakeValue(swi)); +} + +PadMode GroupConv2DGradInput::get_pad_mode() const { + auto value_ptr = GetAttr(kPadMode); + return PadMode(GetValue(value_ptr)); +} + +void GroupConv2DGradInput::set_pad_list(const std::vector &pad_list) { + this->AddAttr(kPadList, MakeValue(pad_list)); +} + +std::vector GroupConv2DGradInput::get_pad_list() const { + auto value_ptr = GetAttr(kPadList); + return GetValue>(value_ptr); +} + +void GroupConv2DGradInput::set_stride(const std::vector &stride) { this->AddAttr(kStride, MakeValue(stride)); } + +std::vector GroupConv2DGradInput::get_stride() const { + auto value_ptr = GetAttr(kStride); + return GetValue>(value_ptr); +} + +void GroupConv2DGradInput::set_dilation(const std::vector &dilation) { + this->AddAttr(kDilation, MakeValue(dilation)); +} + +std::vector GroupConv2DGradInput::get_dilation() const { + auto value_ptr = GetAttr(kDilation); + return GetValue>(value_ptr); +} + +void GroupConv2DGradInput::set_group(const int64_t &group) { this->AddAttr(kGroup, MakeValue(group)); } + +int64_t GroupConv2DGradInput::get_group() const { + auto value_ptr = GetAttr(kGroup); + return GetValue(value_ptr); +} + +void GroupConv2DGradInput::set_input_shape(const std::vector &input_shape) { + this->AddAttr(kInputShape, MakeValue(input_shape)); +} + +std::vector GroupConv2DGradInput::get_input_shape() const { + auto value_ptr = GetAttr(kInputShape); + return GetValue>(value_ptr); +} + +void GroupConv2DGradInput::set_format(const Format &format) { + int64_t swi = format; + this->AddAttr(kFormat, MakeValue(swi)); +} + +Format GroupConv2DGradInput::get_format() const { + auto value_ptr = GetAttr(kFormat); + return Format(GetValue(value_ptr)); +} + +void GroupConv2DGradInput::set_activation_type(const ActivationType &activation_type) { + int64_t swi = activation_type; + this->AddAttr(kActivationType, MakeValue(swi)); +} + +ActivationType GroupConv2DGradInput::get_activation_type() const { + auto value_ptr = GetAttr(kActivationType); + return ActivationType(GetValue(value_ptr)); +} + +void GroupConv2DGradInput::set_has_bias(const bool has_bias) { this->AddAttr(kHasBias, MakeValue(has_bias)); } + +bool GroupConv2DGradInput::get_has_bias() const { + auto value_ptr = GetAttr(kHasBias); + return GetValue(value_ptr); +} +AbstractBasePtr GroupConv2DGradInputInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(primitive); + auto group_prim = primitive->cast(); + MS_EXCEPTION_IF_NULL(group_prim); + auto prim_name = group_prim->name(); + CheckAndConvertUtils::CheckInteger("group_conv_2D_infer", input_args.size(), kGreaterEqual, 2, prim_name); + MS_EXCEPTION_IF_NULL(input_args[0]); + + // Infer shape + auto shape = group_prim->get_input_shape(); + + // Infer type + auto type = input_args[0]->BuildType()->cast()->element(); + + return std::make_shared(type, shape); +} +REGISTER_PRIMITIVE_EVAL_IMPL(GroupConv2DGradInput, prim::kPrimGroupConv2DGradInput, GroupConv2DGradInputInfer); +REGISTER_PRIMITIVE_C(kNameGroupConv2DGradInput, GroupConv2DGradInput); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/grad/group_conv2d_grad_input.h b/mindspore/core/ops/grad/group_conv2d_grad_input.h new file mode 100644 index 00000000000..e5fd35f1285 --- /dev/null +++ b/mindspore/core/ops/grad/group_conv2d_grad_input.h @@ -0,0 +1,74 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_GROUP_CONV2D_GRAD_INPUT_H_ +#define MINDSPORE_CORE_OPS_GROUP_CONV2D_GRAD_INPUT_H_ +#include +#include + +#include "ops/primitive_c.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameGroupConv2DGradInput = "GroupConv2DGradInput"; +class GroupConv2DGradInput : public PrimitiveC { + public: + GroupConv2DGradInput() : PrimitiveC(kNameGroupConv2DGradInput) {} + ~GroupConv2DGradInput() = default; + MS_DECLARE_PARENT(GroupConv2DGradInput, PrimitiveC); + void Init(const int64_t &in_channel, const int64_t &out_channel, const std::vector &kernel_size, + const PadMode &pad_mode, const std::vector &pad_list, const std::vector &stride, + const std::vector &dilation, const int64_t &group, const std::vector &input_shape, + const Format &format = NCHW, const ActivationType &activation_type = NO_ACTIVATION, + const bool has_bias = false); + void set_in_channel(const int64_t &in_channel); + void set_out_channel(const int64_t &out_channel); + void set_kernel_size(const std::vector &kernel_size); + void set_pad_mode(const PadMode &pad_mode); + void set_pad_list(const std::vector &pad_list); + void set_stride(const std::vector &stride); + void set_dilation(const std::vector &dilation); + void set_group(const int64_t &group); + void set_input_shape(const std::vector &input_shape); + void set_format(const Format &format); + void set_activation_type(const ActivationType &activation_type); + void set_has_bias(const bool has_bias); + // kernel_size(h, w) + // stride(h, w) + // pad_list(up, down, left, right) + + int64_t get_in_channel() const; + int64_t get_out_channel() const; + std::vector get_kernel_size() const; + PadMode get_pad_mode() const; + std::vector get_pad_list() const; + std::vector get_stride() const; + std::vector get_dilation() const; + int64_t get_group() const; + std::vector get_input_shape() const; + Format get_format() const; + ActivationType get_activation_type() const; + bool get_has_bias() const; +}; +AbstractBasePtr GroupConv2DGradInputInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); +using PrimGroupConv2DGradInputPtr = std::shared_ptr; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_GROUP_CONV2D_GRAD_INPUT_H_ diff --git a/mindspore/core/c_ops/log_grad.cc b/mindspore/core/ops/grad/log_grad.cc similarity index 87% rename from mindspore/core/c_ops/log_grad.cc rename to mindspore/core/ops/grad/log_grad.cc index 26b6327ce3b..c86b075c9dc 100644 --- a/mindspore/core/c_ops/log_grad.cc +++ b/mindspore/core/ops/grad/log_grad.cc @@ -14,16 +14,18 @@ * limitations under the License. */ -#include "c_ops/log_grad.h" +#include "ops/grad/log_grad.h" #include #include #include #include #include -#include "c_ops/op_utils.h" +#include "ops/op_utils.h" #include "utils/check_convert_utils.h" #include "abstract/primitive_infer_map.h" namespace mindspore { +namespace ops { REGISTER_PRIMITIVE_C(kNameLogGrad, LogGrad); -} +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/c_ops/log_grad.h b/mindspore/core/ops/grad/log_grad.h similarity index 84% rename from mindspore/core/c_ops/log_grad.h rename to mindspore/core/ops/grad/log_grad.h index bb43cc5908a..f92cde7db52 100644 --- a/mindspore/core/c_ops/log_grad.h +++ b/mindspore/core/ops/grad/log_grad.h @@ -14,17 +14,18 @@ * limitations under the License. */ -#ifndef MINDSPORE_CORE_C_OPS_LOGGRAD_H_ -#define MINDSPORE_CORE_C_OPS_LOGGRAD_H_ +#ifndef MINDSPORE_CORE_OPS_LOG_GRAD_H_ +#define MINDSPORE_CORE_OPS_LOG_GRAD_H_ #include #include #include #include -#include "c_ops/primitive_c.h" +#include "ops/primitive_c.h" #include "abstract/abstract_value.h" #include "utils/check_convert_utils.h" namespace mindspore { +namespace ops { constexpr auto kNameLogGrad = "LogGrad"; class LogGrad : public PrimitiveC { public: @@ -33,6 +34,7 @@ class LogGrad : public PrimitiveC { MS_DECLARE_PARENT(LogGrad, PrimitiveC); void Init() {} }; +} // namespace ops } // namespace mindspore -#endif // MINDSPORE_CORE_C_OPS_LOGGRAD_H_ +#endif // MINDSPORE_CORE_OPS_LOG_GRAD_H_ diff --git a/mindspore/core/ops/grad/max_pool_grad.cc b/mindspore/core/ops/grad/max_pool_grad.cc new file mode 100644 index 00000000000..14b2667f12e --- /dev/null +++ b/mindspore/core/ops/grad/max_pool_grad.cc @@ -0,0 +1,69 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops/grad/max_pool_grad.h" +#include "ops/op_utils.h" + +namespace mindspore { +namespace ops { +void MaxPoolGrad::Init(const std::vector &kernel_size, const std::vector &strides, + const PadMode &pad_mode, const Format &data_format) { + this->set_data_format(data_format); + this->set_kernel_size(kernel_size); + this->set_strides(strides); + this->set_pad_mode(pad_mode); +} + +void MaxPoolGrad::set_data_format(const Format &data_format) { + int64_t swi = data_format; + this->AddAttr(kFormat, MakeValue(swi)); +} + +Format MaxPoolGrad::get_data_format() const { + auto value_ptr = GetAttr(kFormat); + return Format(GetValue(value_ptr)); +} + +void MaxPoolGrad::set_kernel_size(const std::vector &kernel_size) { + std::vector k_size = _grad_check_vector(kSize, kernel_size, this->name()); + k_size = this->get_data_format() == NCHW ? k_size : std::vector{k_size[0], k_size[2], k_size[3], k_size[1]}; + this->AddAttr(kSize, MakeValue(k_size)); +} + +void MaxPoolGrad::set_strides(const std::vector &strides) { + std::vector stride_ = _grad_check_vector(kStrides, strides, this->name()); + stride_ = + this->get_data_format() == NCHW ? stride_ : std::vector{stride_[0], stride_[2], stride_[3], stride_[1]}; + this->AddAttr(kStrides, MakeValue(stride_)); +} + +AbstractBasePtr MaxPoolGradInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + auto MaxPoolGrad_prim = primitive->cast(); + MS_EXCEPTION_IF_NULL(MaxPoolGrad_prim); + auto op_name = MaxPoolGrad_prim->name(); + MS_EXCEPTION_IF_NULL(input_args[0]->BuildValue()); + auto x1_shape = CheckAndConvertUtils::ConvertShapePtrToShape("x1_shape", input_args[0]->BuildShape(), op_name); + auto tensor_type = input_args[0]->BuildType()->cast(); + MS_EXCEPTION_IF_NULL(tensor_type); + auto element = tensor_type->element(); + return std::make_shared(element, x1_shape); +} + +REGISTER_PRIMITIVE_EVAL_IMPL(MaxPoolGrad, prim::kPrimMaxPoolGrad, MaxPoolGradInfer); +REGISTER_PRIMITIVE_C(kNameMaxPoolGrad, MaxPoolGrad); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/grad/max_pool_grad.h b/mindspore/core/ops/grad/max_pool_grad.h new file mode 100644 index 00000000000..0d45beee228 --- /dev/null +++ b/mindspore/core/ops/grad/max_pool_grad.h @@ -0,0 +1,50 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_MAX_POOL_GRAD_H_ +#define MINDSPORE_CORE_OPS_MAX_POOL_GRAD_H_ +#include +#include +#include +#include +#include "ops/grad/pool_grad.h" +#include "ops/primitive_c.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameMaxPoolGrad = "MaxPoolGrad"; +class MaxPoolGrad : public PoolGrad { + public: + MaxPoolGrad() : PoolGrad(kNameMaxPoolGrad) { InitIOName({"x_origin", "out_origin", "grad"}, {"output"}); } + ~MaxPoolGrad() = default; + MS_DECLARE_PARENT(MaxPoolGrad, PoolGrad); + void Init(const std::vector &kernel_size = {1}, const std::vector &strides = {1}, + const PadMode &pad_mode = VALID, const Format &data_format = NCHW); + void set_kernel_size(const std::vector &kernel_size); + void set_strides(const std::vector &strides); + void set_data_format(const Format &data_format); + Format get_data_format() const; +}; + +AbstractBasePtr MaxPoolGradInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); +using PrimMaxPoolGradPtr = std::shared_ptr; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_MAX_POOL_GRAD_H_ diff --git a/mindspore/core/c_ops/adam.cc b/mindspore/core/ops/grad/maximum_grad.cc similarity index 53% rename from mindspore/core/c_ops/adam.cc rename to mindspore/core/ops/grad/maximum_grad.cc index 315505285c9..eafbfb79bf0 100644 --- a/mindspore/core/c_ops/adam.cc +++ b/mindspore/core/ops/grad/maximum_grad.cc @@ -14,27 +14,30 @@ * limitations under the License. */ -#include "c_ops/adam.h" -#include "c_ops/op_utils.h" -#include "utils/check_convert_utils.h" +#include "ops/grad/maximum_grad.h" +#include "ops/op_utils.h" namespace mindspore { -void Adam::set_use_locking(const bool &use_locking) { this->AddAttr(kUseLocking, MakeValue(use_locking)); } +namespace ops { +void MaximumGrad::Init(const bool grad_x, const bool grad_y) { + set_grad_x(grad_x); + set_grad_y(grad_y); +} -bool Adam::get_use_locking() const { - auto value_ptr = GetAttr(kUseLocking); +void MaximumGrad::set_grad_x(const bool grad_x) { this->AddAttr(kGradX, MakeValue(grad_x)); } + +void MaximumGrad::set_grad_y(const bool grad_y) { this->AddAttr(kGradY, MakeValue(grad_y)); } + +bool MaximumGrad::get_grad_x() const { + auto value_ptr = GetAttr(kGradX); return GetValue(value_ptr); } -void Adam::set_use_nesteroy(const bool &use_nesteroy) { this->AddAttr(kUseNesteroy, MakeValue(use_nesteroy)); } - -bool Adam::get_use_nesteroy() const { - auto value_ptr = GetAttr(kUseNesteroy); +bool MaximumGrad::get_grad_y() const { + auto value_ptr = GetAttr(kGradY); return GetValue(value_ptr); } -void Adam::Init(const bool &use_locking, const bool &use_nesteroy) { - this->set_use_locking(use_locking); - this->set_use_nesteroy(use_nesteroy); -} -REGISTER_PRIMITIVE_C(kNameAdam, Adam); + +REGISTER_PRIMITIVE_C(kNameMaximumGrad, MaximumGrad); +} // namespace ops } // namespace mindspore diff --git a/mindspore/core/ops/grad/maximum_grad.h b/mindspore/core/ops/grad/maximum_grad.h new file mode 100644 index 00000000000..2cfc7afe654 --- /dev/null +++ b/mindspore/core/ops/grad/maximum_grad.h @@ -0,0 +1,40 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_MAXIMUM_GRAD_H_ +#define MINDSPORE_CORE_OPS_MAXIMUM_GRAD_H_ +#include "ops/primitive_c.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameMaximumGrad = "MaximumGrad"; +class MaximumGrad : public PrimitiveC { + public: + MaximumGrad() : PrimitiveC(kNameMaximumGrad) {} + ~MaximumGrad() = default; + MS_DECLARE_PARENT(MaximumGrad, PrimitiveC); + void Init(const bool grad_x = true, const bool grad_y = true); + void set_grad_x(const bool grad_x); + void set_grad_y(const bool grad_y); + bool get_grad_x() const; + bool get_grad_y() const; +}; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_MAXIMUM_GRAD_H_ diff --git a/mindspore/core/ops/grad/minimum_grad.cc b/mindspore/core/ops/grad/minimum_grad.cc new file mode 100644 index 00000000000..13488409313 --- /dev/null +++ b/mindspore/core/ops/grad/minimum_grad.cc @@ -0,0 +1,43 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops/grad/minimum_grad.h" +#include "ops/op_utils.h" + +namespace mindspore { +namespace ops { +void MinimumGrad::Init(const bool grad_x, const bool grad_y) { + set_grad_x(grad_x); + set_grad_y(grad_y); +} + +void MinimumGrad::set_grad_x(const bool grad_x) { this->AddAttr(kGradX, MakeValue(grad_x)); } + +void MinimumGrad::set_grad_y(const bool grad_y) { this->AddAttr(kGradY, MakeValue(grad_y)); } + +bool MinimumGrad::get_grad_x() const { + auto value_ptr = GetAttr(kGradX); + return GetValue(value_ptr); +} + +bool MinimumGrad::get_grad_y() const { + auto value_ptr = GetAttr(kGradY); + return GetValue(value_ptr); +} + +REGISTER_PRIMITIVE_C(kNameMinimumGrad, MinimumGrad); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/grad/minimum_grad.h b/mindspore/core/ops/grad/minimum_grad.h new file mode 100644 index 00000000000..9159cd79012 --- /dev/null +++ b/mindspore/core/ops/grad/minimum_grad.h @@ -0,0 +1,40 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_MINIMUM_GRAD_H_ +#define MINDSPORE_CORE_OPS_MINIMUM_GRAD_H_ +#include "ops/primitive_c.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameMinimumGrad = "MinimumGrad"; +class MinimumGrad : public PrimitiveC { + public: + MinimumGrad() : PrimitiveC(kNameMinimumGrad) {} + ~MinimumGrad() = default; + MS_DECLARE_PARENT(MinimumGrad, PrimitiveC); + void Init(const bool grad_x = true, const bool grad_y = true); + void set_grad_x(const bool grad_x); + void set_grad_y(const bool grad_y); + bool get_grad_x() const; + bool get_grad_y() const; +}; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_MINIMUM_GRAD_H_ diff --git a/mindspore/core/ops/grad/mul_grad.cc b/mindspore/core/ops/grad/mul_grad.cc new file mode 100644 index 00000000000..861387f4906 --- /dev/null +++ b/mindspore/core/ops/grad/mul_grad.cc @@ -0,0 +1,25 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops/grad/mul_grad.h" +#include "utils/check_convert_utils.h" +#include "abstract/primitive_infer_map.h" + +namespace mindspore { +namespace ops { +REGISTER_PRIMITIVE_C(kNameMulGrad, MulGrad); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/grad/mul_grad.h b/mindspore/core/ops/grad/mul_grad.h new file mode 100644 index 00000000000..fbd3b02d04b --- /dev/null +++ b/mindspore/core/ops/grad/mul_grad.h @@ -0,0 +1,39 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_CORE_OPS_MUL_GRAD_H_ +#define MINDSPORE_CORE_OPS_MUL_GRAD_H_ +#include + +#include "ops/primitive_c.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameMulGrad = "MulGrad"; +class MulGrad : public PrimitiveC { + public: + MulGrad() : PrimitiveC(kNameMulGrad) {} + ~MulGrad() = default; + MS_DECLARE_PARENT(MulGrad, PrimitiveC); + void Init() {} +}; + +using PrimMulGradPtr = std::shared_ptr; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_MUL_GRAD_H_ diff --git a/mindspore/core/ops/grad/neg_grad.cc b/mindspore/core/ops/grad/neg_grad.cc new file mode 100644 index 00000000000..86d597229ef --- /dev/null +++ b/mindspore/core/ops/grad/neg_grad.cc @@ -0,0 +1,23 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops/grad/neg_grad.h" + +namespace mindspore { +namespace ops { +REGISTER_PRIMITIVE_C(kNameNegGrad, NegGrad); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/grad/neg_grad.h b/mindspore/core/ops/grad/neg_grad.h new file mode 100644 index 00000000000..e063a495444 --- /dev/null +++ b/mindspore/core/ops/grad/neg_grad.h @@ -0,0 +1,44 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_NEG_GRAD_H_ +#define MINDSPORE_CORE_OPS_NEG_GRAD_H_ + +#include +#include +#include +#include +#include +#include "ops/op_utils.h" +#include "ops/primitive_c.h" +#include "abstract/primitive_infer_map.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameNegGrad = "NegGrad"; +class NegGrad : public PrimitiveC { + public: + NegGrad() : PrimitiveC(kNameNegGrad) {} + ~NegGrad() = default; + MS_DECLARE_PARENT(NegGrad, PrimitiveC); + void Init() {} +}; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_NEG_GRAD_H_ diff --git a/mindspore/core/ops/grad/pool_grad.cc b/mindspore/core/ops/grad/pool_grad.cc new file mode 100644 index 00000000000..3b5c5c073bc --- /dev/null +++ b/mindspore/core/ops/grad/pool_grad.cc @@ -0,0 +1,89 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops/grad/pool_grad.h" +#include "ops/op_utils.h" + +namespace mindspore { +namespace ops { + +std::vector PoolGrad::_grad_check_vector(std::string arg_name, std::vector arg_val, + std::string op_name) { + std::vector ret; + std::string error_msg = "For '" + op_name + "'" + " the '" + arg_name + + "' should be a vector of one or two or four " + "positive int number, but got error arg_val"; + switch ((int64_t)arg_val.size()) { + case 1: + ret = {1, 1, arg_val[0], arg_val[0]}; + break; + case 2: + ret = {1, 1, arg_val[0], arg_val[1]}; + break; + case 4: + ret = arg_val; + break; + default: + MS_LOG(EXCEPTION) << error_msg; + } + for (auto it : arg_val) { + if (it <= 0) { + MS_LOG(EXCEPTION) << error_msg; + } + } + return ret; +} + +void PoolGrad::Init(const std::vector &kernel_size, const std::vector &strides, + const PadMode &pad_mode) { + this->set_kernel_size(kernel_size); + this->set_strides(strides); + this->set_pad_mode(pad_mode); +} + +void PoolGrad::set_kernel_size(const std::vector &kernel_size) { + std::vector k_size = _grad_check_vector(kSize, kernel_size, this->name()); + this->AddAttr(kSize, MakeValue(k_size)); +} + +void PoolGrad::set_strides(const std::vector &strides) { + std::vector strides_ = _grad_check_vector(kStrides, strides, this->name()); + this->AddAttr(kStrides, MakeValue(strides_)); +} + +void PoolGrad::set_pad_mode(const PadMode &pad_mode) { + int64_t swi = pad_mode; + this->AddAttr(kPadMode, MakeValue(swi)); +} + +std::vector PoolGrad::get_kernel_size() const { + auto value_ptr = GetAttr(kSize); + return GetValue>(value_ptr); +} + +std::vector PoolGrad::get_strides() const { + auto value_ptr = GetAttr(kStrides); + return GetValue>(value_ptr); +} + +PadMode PoolGrad::get_pad_mode() const { + auto value_ptr = GetAttr(kPadMode); + return PadMode(GetValue(value_ptr)); +} + +REGISTER_PRIMITIVE_C(kNamePoolGrad, PoolGrad); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/grad/pool_grad.h b/mindspore/core/ops/grad/pool_grad.h new file mode 100644 index 00000000000..2127d7069a5 --- /dev/null +++ b/mindspore/core/ops/grad/pool_grad.h @@ -0,0 +1,53 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_POOL_GRAD_H_ +#define MINDSPORE_CORE_OPS_POOL_GRAD_H_ +#include +#include +#include +#include +#include "ops/primitive_c.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNamePoolGrad = "PoolGrad"; +class PoolGrad : public PrimitiveC { + public: + PoolGrad() : PrimitiveC(kNamePoolGrad) { InitIOName({"x_origin", "out_origin", "grad"}, {"output"}); } + explicit PoolGrad(const std::string k_name) : PrimitiveC(k_name) { + InitIOName({"x_origin", "out_origin", "grad"}, {"output"}); + } + ~PoolGrad() = default; + MS_DECLARE_PARENT(PoolGrad, PrimitiveC); + virtual void Init(const std::vector &kernel_size = {1}, const std::vector &strides = {1}, + const PadMode &pad_mode = VALID); + virtual void set_kernel_size(const std::vector &kernel_size); + virtual void set_strides(const std::vector &strides); + void set_pad_mode(const PadMode &pad_mode); + + std::vector get_kernel_size() const; + std::vector get_strides() const; + PadMode get_pad_mode() const; + std::vector _grad_check_vector(const std::string arg_name, const std::vector arg_val, + const std::string op_name); +}; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_POOL_GRAD_H_ diff --git a/mindspore/core/ops/grad/pooling_grad.cc b/mindspore/core/ops/grad/pooling_grad.cc new file mode 100644 index 00000000000..2e9c12e2cec --- /dev/null +++ b/mindspore/core/ops/grad/pooling_grad.cc @@ -0,0 +1,105 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops/grad/pooling_grad.h" +#include "ops/op_utils.h" + +namespace mindspore { +namespace ops { +void PoolingGrad::Init(const PoolMode &pool_mode, const std::vector &window, + const std::vector &stride, const PadMode &pad_mode, + const std::vector &pad_list, const RoundMode &round_mode, const Format &format, + const bool global) { + set_pool_mode(pool_mode); + set_window(window); + set_stride(stride); + set_pad_mode(pad_mode); + set_pad_list(pad_list); + set_round_mode(round_mode); + set_format(format); + set_global(global); +} + +void PoolingGrad::set_pool_mode(const PoolMode &pool_mode) { + int64_t swi = pool_mode; + this->AddAttr(kPoolMode, MakeValue(swi)); +} + +PoolMode PoolingGrad::get_pool_mode() const { + auto value_ptr = GetAttr(kPoolMode); + return PoolMode(GetValue(value_ptr)); +} + +void PoolingGrad::set_window(const std::vector &window) { this->AddAttr(kWindow, MakeValue(window)); } + +std::vector PoolingGrad::get_window() const { + auto value_ptr = GetAttr(kWindow); + return GetValue>(value_ptr); +} + +void PoolingGrad::set_stride(const std::vector &stride) { this->AddAttr(kStride, MakeValue(stride)); } + +std::vector PoolingGrad::get_stride() const { + auto value_ptr = GetAttr(kStride); + return GetValue>(value_ptr); +} + +void PoolingGrad::set_pad_mode(const PadMode &pad_mode) { + int64_t swi = pad_mode; + this->AddAttr(kPadMode, MakeValue(swi)); +} + +PadMode PoolingGrad::get_pad_mode() const { + auto value_ptr = GetAttr(kPadMode); + return PadMode(GetValue(value_ptr)); +} + +void PoolingGrad::set_pad_list(const std::vector &pad_list) { this->AddAttr(kPadList, MakeValue(pad_list)); } + +std::vector PoolingGrad::get_pad_list() const { + auto value_ptr = GetAttr(kPadList); + return GetValue>(value_ptr); +} + +void PoolingGrad::set_round_mode(const RoundMode &round_mode) { + int64_t swi = round_mode; + this->AddAttr(kRoundMode, MakeValue(swi)); +} + +RoundMode PoolingGrad::get_round_mode() const { + auto value_ptr = GetAttr(kRoundMode); + return RoundMode(GetValue(value_ptr)); +} + +void PoolingGrad::set_format(const Format &format) { + int64_t swi = format; + this->AddAttr(kFormat, MakeValue(swi)); +} + +Format PoolingGrad::get_format() const { + auto value_ptr = GetAttr(kFormat); + return Format(GetValue(value_ptr)); +} + +void PoolingGrad::set_global(const bool global) { this->AddAttr(kGlobal, MakeValue(global)); } + +bool PoolingGrad::get_global() const { + auto value_ptr = GetAttr(kGlobal); + return GetValue(value_ptr); +} +REGISTER_PRIMITIVE_C(kNamePoolingGrad, PoolingGrad); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/grad/pooling_grad.h b/mindspore/core/ops/grad/pooling_grad.h new file mode 100644 index 00000000000..561dc270fd1 --- /dev/null +++ b/mindspore/core/ops/grad/pooling_grad.h @@ -0,0 +1,62 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_POOLING_GRAD_H_ +#define MINDSPORE_CORE_OPS_POOLING_GRAD_H_ +#include +#include +#include +#include +#include "ops/primitive_c.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNamePoolingGrad = "PoolingGrad"; +class PoolingGrad : public PrimitiveC { + public: + PoolingGrad() : PrimitiveC(kNamePoolingGrad) {} + ~PoolingGrad() = default; + MS_DECLARE_PARENT(PoolingGrad, PrimitiveC); + void Init(const PoolMode &pool_mode, const std::vector &window, const std::vector &stride, + const PadMode &pad_mode, const std::vector &pad_list, const RoundMode &round_mode, + const Format &format = NCHW, const bool global = false); + void set_pool_mode(const PoolMode &pool_mode); + void set_window(const std::vector &window); + void set_stride(const std::vector &stride); + void set_pad_mode(const PadMode &pad_mode); + void set_pad_list(const std::vector &pad_list); + void set_round_mode(const RoundMode &round_mode); + void set_format(const Format &format); + void set_global(const bool global); + // window(h, w) + // stride(h, w) + // pad_list(up, down, left, right) + + PoolMode get_pool_mode() const; + std::vector get_window() const; + std::vector get_stride() const; + PadMode get_pad_mode() const; + std::vector get_pad_list() const; + RoundMode get_round_mode() const; + Format get_format() const; + bool get_global() const; +}; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_POOLING_GRAD_H_ diff --git a/mindspore/core/ops/grad/power_grad.cc b/mindspore/core/ops/grad/power_grad.cc new file mode 100644 index 00000000000..62c635d626e --- /dev/null +++ b/mindspore/core/ops/grad/power_grad.cc @@ -0,0 +1,55 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops/grad/power_grad.h" +#include +#include +#include +#include +#include +#include "ops/op_utils.h" +#include "utils/check_convert_utils.h" +#include "abstract/primitive_infer_map.h" + +namespace mindspore { +namespace ops { + +void PowerGrad::set_power(const float power) { this->AddAttr(kPower, MakeValue(power)); } +float PowerGrad::get_power() const { + auto value_ptr = GetAttr(kPower); + return GetValue(value_ptr); +} + +void PowerGrad::set_scale(const float scale) { this->AddAttr(kScale, MakeValue(scale)); } +float PowerGrad::get_scale() const { + auto value_ptr = GetAttr(kScale); + return GetValue(value_ptr); +} + +void PowerGrad::set_shift(const float shift) { this->AddAttr(kShift, MakeValue(shift)); } +float PowerGrad::get_shift() const { + auto value_ptr = GetAttr(kShift); + return GetValue(value_ptr); +} + +void PowerGrad::Init(const float power, const float scale, const float shift) { + this->set_power(power); + this->set_scale(scale); + this->set_shift(shift); +} +REGISTER_PRIMITIVE_C(kNamePowerGrad, PowerGrad); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/grad/power_grad.h b/mindspore/core/ops/grad/power_grad.h new file mode 100644 index 00000000000..45f8c710574 --- /dev/null +++ b/mindspore/core/ops/grad/power_grad.h @@ -0,0 +1,46 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_POWER_GRAD_H_ +#define MINDSPORE_CORE_OPS_POWER_GRAD_H_ +#include +#include +#include +#include +#include "ops/primitive_c.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNamePowerGrad = "PowerGrad"; +class PowerGrad : public PrimitiveC { + public: + PowerGrad() : PrimitiveC(kNamePowerGrad) {} + ~PowerGrad() = default; + MS_DECLARE_PARENT(PowerGrad, PrimitiveC); + void Init(const float power, const float scale, const float shift); + void set_power(const float power); + void set_scale(const float scale); + void set_shift(const float shift); + float get_power() const; + float get_scale() const; + float get_shift() const; +}; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_POWER_GRAD_H_ diff --git a/mindspore/core/ops/grad/sigmoid_cross_entropy_with_logits_grad.cc b/mindspore/core/ops/grad/sigmoid_cross_entropy_with_logits_grad.cc new file mode 100644 index 00000000000..b313af0f194 --- /dev/null +++ b/mindspore/core/ops/grad/sigmoid_cross_entropy_with_logits_grad.cc @@ -0,0 +1,66 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops/grad/sigmoid_cross_entropy_with_logits_grad.h" +#include +#include +#include +#include +#include +#include "ops/op_utils.h" +#include "utils/check_convert_utils.h" +#include "abstract/primitive_infer_map.h" + +namespace mindspore { +namespace ops { +AbstractBasePtr SigmoidCrossEntropyWithLogitsGradInfer(const abstract::AnalysisEnginePtr &, + const PrimitivePtr &primitive, + const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(primitive); + auto sigmoid_prim = primitive->cast(); + MS_EXCEPTION_IF_NULL(sigmoid_prim); + auto prim_name = sigmoid_prim->name(); + CheckAndConvertUtils::CheckInteger("sigmoid_cross_entropy_with_logits_grad_infer", input_args.size(), kEqual, 3, + prim_name); + + // Infer Shape + auto x_shape = CheckAndConvertUtils::ConvertShapePtrToShape("x_shape", input_args[0]->BuildShape(), prim_name); + auto y_shape = CheckAndConvertUtils::ConvertShapePtrToShape("y_shape", input_args[1]->BuildShape(), prim_name); + auto dout_shape = CheckAndConvertUtils::ConvertShapePtrToShape("dout_shape", input_args[2]->BuildShape(), prim_name); + CheckAndConvertUtils::Check("x_shape", x_shape, kEqual, "y_shape", y_shape, prim_name, TypeError); + CheckAndConvertUtils::Check("x_shape", x_shape, kEqual, "dout_shape", dout_shape, prim_name, TypeError); + + // Infer type + const std::set valid_types = { + kNumberTypeBool, kNumberTypeInt, kNumberTypeInt8, kNumberTypeInt16, + kNumberTypeInt32, kNumberTypeInt64, kNumberTypeUInt, kNumberTypeUInt8, + kNumberTypeUInt16, kNumberTypeUInt32, kNumberTypeUInt64, kNumberTypeFloat, + kNumberTypeFloat16, kNumberTypeFloat32, kNumberTypeFloat64, kNumberTypeComplex64}; + std::map args; + args.emplace("x_type", input_args[0]->BuildType()); + args.emplace("y_type", input_args[1]->BuildType()); + args.emplace("dout_type", input_args[2]->BuildType()); + CheckAndConvertUtils::CheckTensorTypeSame(args, valid_types, prim_name); + auto dout_type = input_args[2]->BuildType()->cast()->element(); + + return std::make_shared(dout_type, x_shape); +} + +REGISTER_PRIMITIVE_EVAL_IMPL(SigmoidCrossEntropyWithLogitsGrad, prim::kPrimSigmoidCrossEntropyWithLogitsGrad, + SigmoidCrossEntropyWithLogitsGradInfer); +REGISTER_PRIMITIVE_C(kNameSigmoidCrossEntropyWithLogitsGrad, SigmoidCrossEntropyWithLogitsGrad); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/grad/sigmoid_cross_entropy_with_logits_grad.h b/mindspore/core/ops/grad/sigmoid_cross_entropy_with_logits_grad.h new file mode 100644 index 00000000000..a6ca4b4e48f --- /dev/null +++ b/mindspore/core/ops/grad/sigmoid_cross_entropy_with_logits_grad.h @@ -0,0 +1,46 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_SIGMOID_CROSS_ENTROPY_WITH_LOGITS_GRAD_H_ +#define MINDSPORE_CORE_OPS_SIGMOID_CROSS_ENTROPY_WITH_LOGITS_GRAD_H_ +#include +#include +#include +#include +#include "ops/primitive_c.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameSigmoidCrossEntropyWithLogitsGrad = "SigmoidCrossEntropyWithLogitsGrad"; +class SigmoidCrossEntropyWithLogitsGrad : public PrimitiveC { + public: + SigmoidCrossEntropyWithLogitsGrad() : PrimitiveC(kNameSigmoidCrossEntropyWithLogitsGrad) { + InitIOName({"x", "y", "dout"}, {"x_grad"}); + } + ~SigmoidCrossEntropyWithLogitsGrad() = default; + MS_DECLARE_PARENT(SigmoidCrossEntropyWithLogitsGrad, PrimitiveC); + void Init() {} +}; +AbstractBasePtr SigmoidCrossEntropyWithLogitsGradInfer(const abstract::AnalysisEnginePtr &, + const PrimitivePtr &primitive, + const std::vector &input_args); +using PrimSigmoidCrossEntropyWithLogitsGradPtr = std::shared_ptr; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_SIGMOID_CROSS_ENTROPY_WITH_LOGITS_GRAD_H_ diff --git a/mindspore/core/ops/grad/smooth_l1_loss_grad.cc b/mindspore/core/ops/grad/smooth_l1_loss_grad.cc new file mode 100644 index 00000000000..f5db7576b7e --- /dev/null +++ b/mindspore/core/ops/grad/smooth_l1_loss_grad.cc @@ -0,0 +1,69 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include +#include "ops/grad/smooth_l1_loss_grad.h" +#include "ops/op_utils.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +void SmoothL1LossGrad::Init(const float beta) { this->set_beta(beta); } + +void SmoothL1LossGrad::set_beta(const float beta) { this->AddAttr(kBeta, MakeValue(beta)); } + +float SmoothL1LossGrad::get_beta() const { + auto value_ptr = this->GetAttr(kBeta); + return GetValue(value_ptr); +} + +AbstractBasePtr SmoothL1LossGradInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(primitive); + auto smooth_prim = primitive->cast(); + MS_EXCEPTION_IF_NULL(smooth_prim); + auto prim_name = smooth_prim->name(); + CheckAndConvertUtils::CheckInteger("smooth_l1_loss_grad_infer", input_args.size(), kEqual, 3, prim_name); + + // Infer shape + auto prediction = CheckAndConvertUtils::ConvertShapePtrToShape("prediction", input_args[0]->BuildShape(), prim_name); + auto target = CheckAndConvertUtils::ConvertShapePtrToShape("target", input_args[1]->BuildShape(), prim_name); + auto dloss = CheckAndConvertUtils::ConvertShapePtrToShape("dloss", input_args[2]->BuildShape(), prim_name); + CheckAndConvertUtils::Check("prediction shape", prediction, kEqual, "target shape", target, prim_name, TypeError); + CheckAndConvertUtils::Check("prediction shape", prediction, kEqual, "dloss", dloss, prim_name, TypeError); + + // Infer type + const std::set valid_types = { + kNumberTypeBool, kNumberTypeInt, kNumberTypeInt8, kNumberTypeInt16, + kNumberTypeInt32, kNumberTypeInt64, kNumberTypeUInt, kNumberTypeUInt8, + kNumberTypeUInt16, kNumberTypeUInt32, kNumberTypeUInt64, kNumberTypeFloat, + kNumberTypeFloat16, kNumberTypeFloat32, kNumberTypeFloat64, kNumberTypeComplex64}; + std::map args; + args.emplace("prediction", input_args[0]->BuildType()); + args.emplace("target", input_args[1]->BuildType()); + args.emplace("dloss", input_args[2]->BuildType()); + CheckAndConvertUtils::CheckTensorTypeSame(args, valid_types, prim_name); + auto dloss_type = input_args[2]->BuildType()->cast()->element(); + + return std::make_shared(dloss_type, prediction); +} +REGISTER_PRIMITIVE_EVAL_IMPL(SmoothL1LossGrad, prim::kPrimSmoothL1LossGrad, SmoothL1LossGradInfer); +REGISTER_PRIMITIVE_C(kNameSmoothL1LossGrad, SmoothL1LossGrad); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/grad/smooth_l1_loss_grad.h b/mindspore/core/ops/grad/smooth_l1_loss_grad.h new file mode 100644 index 00000000000..11fc4379c81 --- /dev/null +++ b/mindspore/core/ops/grad/smooth_l1_loss_grad.h @@ -0,0 +1,44 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_SMOOTH_L1_LOSS_GRAD_H_ +#define MINDSPORE_CORE_OPS_SMOOTH_L1_LOSS_GRAD_H_ +#include +#include + +#include "ops/primitive_c.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameSmoothL1LossGrad = "SmoothL1LossGrad"; +class SmoothL1LossGrad : public PrimitiveC { + public: + SmoothL1LossGrad() : PrimitiveC(kNameSmoothL1LossGrad) {} + ~SmoothL1LossGrad() = default; + MS_DECLARE_PARENT(SmoothL1LossGrad, PrimitiveC); + void Init(); + void Init(const float beta); + void set_beta(const float beta); + float get_beta() const; +}; +AbstractBasePtr SmoothL1LossGradInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); +using PrimSmoothL1LossGradPtr = std::shared_ptr; +} // namespace ops +} // namespace mindspore +#endif // MINDSPORE_CORE_OPS_SMOOTH_L1_LOSS_GRAD_H_ diff --git a/mindspore/core/ops/grad/sub_grad.cc b/mindspore/core/ops/grad/sub_grad.cc new file mode 100644 index 00000000000..89d66c7ee65 --- /dev/null +++ b/mindspore/core/ops/grad/sub_grad.cc @@ -0,0 +1,25 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops/grad/sub_grad.h" +#include "utils/check_convert_utils.h" +#include "abstract/primitive_infer_map.h" + +namespace mindspore { +namespace ops { +REGISTER_PRIMITIVE_C(kNameSubGrad, SubGrad); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/grad/sub_grad.h b/mindspore/core/ops/grad/sub_grad.h new file mode 100644 index 00000000000..e6069014245 --- /dev/null +++ b/mindspore/core/ops/grad/sub_grad.h @@ -0,0 +1,39 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_CORE_OPS_SUB_GRAD_H_ +#define MINDSPORE_CORE_OPS_SUB_GRAD_H_ +#include + +#include "ops/primitive_c.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameSubGrad = "SubGrad"; +class SubGrad : public PrimitiveC { + public: + SubGrad() : PrimitiveC(kNameSubGrad) {} + ~SubGrad() = default; + MS_DECLARE_PARENT(SubGrad, PrimitiveC); + void Init() {} +}; + +using PrimSubGradPtr = std::shared_ptr; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_SUB_GRAD_H_ diff --git a/mindspore/core/c_ops/greater.cc b/mindspore/core/ops/greater.cc similarity index 92% rename from mindspore/core/c_ops/greater.cc rename to mindspore/core/ops/greater.cc index 22e902d7717..2d4e86c5387 100644 --- a/mindspore/core/c_ops/greater.cc +++ b/mindspore/core/ops/greater.cc @@ -14,9 +14,11 @@ * limitations under the License. */ -#include "c_ops/greater.h" +#include "ops/greater.h" #include namespace mindspore { +namespace ops { REGISTER_PRIMITIVE_C(kNameGreater, Greater); +} // namespace ops } // namespace mindspore diff --git a/mindspore/core/c_ops/greater.h b/mindspore/core/ops/greater.h similarity index 85% rename from mindspore/core/c_ops/greater.h rename to mindspore/core/ops/greater.h index 9dfae66b59f..06751f94012 100644 --- a/mindspore/core/c_ops/greater.h +++ b/mindspore/core/ops/greater.h @@ -14,17 +14,18 @@ * limitations under the License. */ -#ifndef MINDSPORE_CORE_C_OPS_GREATER_H_ -#define MINDSPORE_CORE_C_OPS_GREATER_H_ +#ifndef MINDSPORE_CORE_OPS_GREATER_H_ +#define MINDSPORE_CORE_OPS_GREATER_H_ #include #include #include #include -#include "c_ops/primitive_c.h" +#include "ops/primitive_c.h" #include "abstract/abstract_value.h" #include "utils/check_convert_utils.h" namespace mindspore { +namespace ops { constexpr auto kNameGreater = "Greater"; class Greater : public PrimitiveC { public: @@ -33,6 +34,7 @@ class Greater : public PrimitiveC { MS_DECLARE_PARENT(Greater, PrimitiveC); void Init() {} }; +} // namespace ops } // namespace mindspore -#endif // MINDSPORE_CORE_C_OPS_GREATER_H_ +#endif // MINDSPORE_CORE_OPS_GREATER_H_ diff --git a/mindspore/core/c_ops/greater_equal.cc b/mindspore/core/ops/greater_equal.cc similarity index 84% rename from mindspore/core/c_ops/greater_equal.cc rename to mindspore/core/ops/greater_equal.cc index d9d32e15b92..5cb75e6bdbe 100644 --- a/mindspore/core/c_ops/greater_equal.cc +++ b/mindspore/core/ops/greater_equal.cc @@ -14,8 +14,13 @@ * limitations under the License. */ -#include "c_ops/greater_equal.h" +#include +#include + +#include "ops/greater_equal.h" namespace mindspore { +namespace ops { REGISTER_PRIMITIVE_C(kNameGreaterEqual, GreaterEqual); -} +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/c_ops/greater_equal.h b/mindspore/core/ops/greater_equal.h similarity index 84% rename from mindspore/core/c_ops/greater_equal.h rename to mindspore/core/ops/greater_equal.h index 0dc946a1fcd..d8151d3983a 100644 --- a/mindspore/core/c_ops/greater_equal.h +++ b/mindspore/core/ops/greater_equal.h @@ -14,16 +14,17 @@ * limitations under the License. */ -#ifndef MINDSPORE_CORE_C_OPS_GREATEREQUAL_H_ -#define MINDSPORE_CORE_C_OPS_GREATEREQUAL_H_ +#ifndef MINDSPORE_CORE_OPS_GREATER_EQUAL_H_ +#define MINDSPORE_CORE_OPS_GREATER_EQUAL_H_ #include #include -#include "c_ops/primitive_c.h" -#include "c_ops/op_utils.h" +#include "ops/primitive_c.h" +#include "ops/op_utils.h" #include "abstract/abstract_value.h" namespace mindspore { +namespace ops { constexpr auto kNameGreaterEqual = "GreaterEqual"; class GreaterEqual : public PrimitiveC { public: @@ -34,5 +35,6 @@ class GreaterEqual : public PrimitiveC { AbstractBasePtr GreaterEqualInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, const std::vector &input_args); using PrimGreaterEqual = std::shared_ptr; +} // namespace ops } // namespace mindspore -#endif // MINDSPORE_CORE_C_OPS_GREATEREQUAL_H_ +#endif // MINDSPORE_CORE_OPS_GREATER_EQUAL_H_ diff --git a/mindspore/core/ops/hashtable_lookup.cc b/mindspore/core/ops/hashtable_lookup.cc new file mode 100644 index 00000000000..cc592a671de --- /dev/null +++ b/mindspore/core/ops/hashtable_lookup.cc @@ -0,0 +1,56 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + +#include "ops/hashtable_lookup.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { + +AbstractBasePtr HashtableLookupInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(primitive); + auto HashtableLookup_prim = primitive->cast(); + MS_EXCEPTION_IF_NULL(HashtableLookup_prim); + for (auto input : input_args) { + MS_EXCEPTION_IF_NULL(input); + } + auto op_name = HashtableLookup_prim->name(); + std::vector hits_shape; + auto input = CheckAndConvertUtils::ConvertShapePtrToShape("input_shape", input_args[0]->BuildShape(), op_name); + hits_shape.push_back(input[0]); + + auto value_type = input_args[2]->BuildType(); + auto tensor_type = value_type->cast(); + MS_EXCEPTION_IF_NULL(tensor_type); + auto data_type = tensor_type->element(); + std::vector value_shape; + auto output = std::make_shared(data_type, value_shape); + auto hits = std::make_shared(TypeIdToType(kNumberTypeInt8), hits_shape); + AbstractBasePtrList output1 = {output, hits}; + + if (input_args[0]->BuildValue()->cast()->data_c() == nullptr) { + MS_LOG(INFO) << "Do infer shape in runtime."; + } + return std::make_shared(output1); +} + +REGISTER_PRIMITIVE_EVAL_IMPL(HashtableLookup, prim::kPrimHashtableLookup, HashtableLookupInfer); +REGISTER_PRIMITIVE_C(kNameHashtableLookup, HashtableLookup); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/c_ops/hashtable_lookup.h b/mindspore/core/ops/hashtable_lookup.h similarity index 72% rename from mindspore/core/c_ops/hashtable_lookup.h rename to mindspore/core/ops/hashtable_lookup.h index bd4f8edc498..6ab20abb442 100644 --- a/mindspore/core/c_ops/hashtable_lookup.h +++ b/mindspore/core/ops/hashtable_lookup.h @@ -13,15 +13,17 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -#ifndef MINDSPORE_CORE_C_OPS_HASHTABLELOOKUP_H_ -#define MINDSPORE_CORE_C_OPS_HASHTABLELOOKUP_H_ +#ifndef MINDSPORE_CORE_OPS_HASHTABLE_LOOKUP_H_ +#define MINDSPORE_CORE_OPS_HASHTABLE_LOOKUP_H_ +#include #include -#include "c_ops/primitive_c.h" +#include "ops/primitive_c.h" #include "abstract/abstract_value.h" #include "utils/check_convert_utils.h" namespace mindspore { +namespace ops { constexpr auto kNameHashtableLookup = "HashtableLookup"; class HashtableLookup : public PrimitiveC { public: @@ -31,7 +33,10 @@ class HashtableLookup : public PrimitiveC { void Init() {} }; +AbstractBasePtr HashtableLookupInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); using PrimHashtableLookupPtr = std::shared_ptr; +} // namespace ops } // namespace mindspore -#endif // MINDSPORE_CORE_C_OPS_HASHTABLELOOKUP_H_ +#endif // MINDSPORE_CORE_OPS_HASHTABLE_LOOKUP_H_ diff --git a/mindspore/core/c_ops/identity.cc b/mindspore/core/ops/identity.cc similarity index 84% rename from mindspore/core/c_ops/identity.cc rename to mindspore/core/ops/identity.cc index 2f42f95cf02..a8f3ae7f107 100644 --- a/mindspore/core/c_ops/identity.cc +++ b/mindspore/core/ops/identity.cc @@ -14,9 +14,15 @@ * limitations under the License. */ -#include "c_ops/identity.h" +#include +#include +#include +#include "ops/identity.h" #include "utils/check_convert_utils.h" +#include "ops/op_utils.h" namespace mindspore { +namespace ops { REGISTER_PRIMITIVE_C(kNameIdentity, Identity); +} // namespace ops } // namespace mindspore diff --git a/mindspore/core/c_ops/identity.h b/mindspore/core/ops/identity.h similarity index 77% rename from mindspore/core/c_ops/identity.h rename to mindspore/core/ops/identity.h index 24697369334..164b9d805b5 100644 --- a/mindspore/core/c_ops/identity.h +++ b/mindspore/core/ops/identity.h @@ -14,13 +14,16 @@ * limitations under the License. */ -#ifndef MINDSPORE_CORE_C_OPS_IDENTITY_H_ -#define MINDSPORE_CORE_C_OPS_IDENTITY_H_ -#include "c_ops/primitive_c.h" +#ifndef MINDSPORE_CORE_OPS_IDENTITY_H_ +#define MINDSPORE_CORE_OPS_IDENTITY_H_ +#include +#include +#include "ops/primitive_c.h" #include "abstract/abstract_value.h" #include "utils/check_convert_utils.h" namespace mindspore { +namespace ops { constexpr auto kNameIdentity = "Identity"; class Identity : public PrimitiveC { public: @@ -29,6 +32,8 @@ class Identity : public PrimitiveC { MS_DECLARE_PARENT(Identity, PrimitiveC); void Init() {} }; +using PrimIdentityPtr = std::shared_ptr; +} // namespace ops } // namespace mindspore -#endif // MINDSPORE_CORE_C_OPS_IDENTITY_H_ +#endif // MINDSPORE_CORE_OPS_IDENTITY_H_ diff --git a/mindspore/core/c_ops/instance_norm.cc b/mindspore/core/ops/instance_norm.cc similarity index 78% rename from mindspore/core/c_ops/instance_norm.cc rename to mindspore/core/ops/instance_norm.cc index df5e31c17a1..83d4fc96ca1 100644 --- a/mindspore/core/c_ops/instance_norm.cc +++ b/mindspore/core/ops/instance_norm.cc @@ -14,24 +14,26 @@ * limitations under the License. */ -#include "c_ops/instance_norm.h" +#include "ops/instance_norm.h" #include #include #include #include #include -#include "c_ops/op_utils.h" +#include "ops/op_utils.h" #include "utils/check_convert_utils.h" #include "abstract/primitive_infer_map.h" namespace mindspore { +namespace ops { +void InstanceNorm::Init(const float epsilon) { this->set_epsilon(epsilon); } -void InstanceNorm::set_epsilon(const float &epsilon) { this->AddAttr(kEpsilon, MakeValue(epsilon)); } +void InstanceNorm::set_epsilon(const float epsilon) { this->AddAttr(kEpsilon, MakeValue(epsilon)); } float InstanceNorm::get_epsilon() const { auto value_ptr = GetAttr(kEpsilon); return GetValue(value_ptr); } -void InstanceNorm::Init(const float &epsilon) { this->set_epsilon(epsilon); } REGISTER_PRIMITIVE_C(kNameInstanceNorm, InstanceNorm); +} // namespace ops } // namespace mindspore diff --git a/mindspore/core/c_ops/instance_norm.h b/mindspore/core/ops/instance_norm.h similarity index 78% rename from mindspore/core/c_ops/instance_norm.h rename to mindspore/core/ops/instance_norm.h index 4012c70846f..d1cd8cc63b8 100644 --- a/mindspore/core/c_ops/instance_norm.h +++ b/mindspore/core/ops/instance_norm.h @@ -14,27 +14,29 @@ * limitations under the License. */ -#ifndef MINDSPORE_CORE_C_OPS_INSTANCENORM_H_ -#define MINDSPORE_CORE_C_OPS_INSTANCENORM_H_ +#ifndef MINDSPORE_CORE_OPS_INSTANCE_NORM_H_ +#define MINDSPORE_CORE_OPS_INSTANCE_NORM_H_ #include #include #include #include -#include "c_ops/primitive_c.h" +#include "ops/primitive_c.h" #include "abstract/abstract_value.h" #include "utils/check_convert_utils.h" namespace mindspore { +namespace ops { constexpr auto kNameInstanceNorm = "InstanceNorm"; class InstanceNorm : public PrimitiveC { public: InstanceNorm() : PrimitiveC(kNameInstanceNorm) {} ~InstanceNorm() = default; MS_DECLARE_PARENT(InstanceNorm, PrimitiveC); - void Init(const float &epsilon = 0.00001); - void set_epsilon(const float &epsilon); + void Init(const float epsilon = 0.00001); + void set_epsilon(const float epsilon); float get_epsilon() const; }; +} // namespace ops } // namespace mindspore -#endif // MINDSPORE_CORE_C_OPS_INSTANCENORM_H_ +#endif // MINDSPORE_CORE_OPS_INSTANCE_NORM_H_ diff --git a/mindspore/core/ops/l2_normalize.cc b/mindspore/core/ops/l2_normalize.cc new file mode 100644 index 00000000000..86c8b5dec54 --- /dev/null +++ b/mindspore/core/ops/l2_normalize.cc @@ -0,0 +1,65 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + +#include "ops/l2_normalize.h" + +namespace mindspore { +namespace ops { +void L2Normalize::Init(const std::vector &axis, const float epsilon) { + this->set_axis(axis); + this->set_epsilon(epsilon); +} + +void L2Normalize::set_axis(const std::vector &axis) { AddAttr(kAxis, MakeValue(axis)); } + +void L2Normalize::set_epsilon(const float epsilon) { AddAttr(kEpsilon, MakeValue(epsilon)); } + +std::vector L2Normalize::get_axis() const { + auto value_ptr = GetAttr(kAxis); + return GetValue>(value_ptr); +} + +float L2Normalize::get_epsilon() const { + auto value_ptr = GetAttr(kEpsilon); + return GetValue(value_ptr); +} + +AbstractBasePtr L2NormalizeInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(primitive); + auto prim = primitive->cast(); + MS_EXCEPTION_IF_NULL(prim); + auto prim_name = prim->name(); + CheckAndConvertUtils::CheckInteger("input number", input_args.size(), kEqual, 1, prim_name); + for (const auto &item : input_args) { + MS_EXCEPTION_IF_NULL(item); + } + const std::set valid_types = {kNumberTypeFloat16, kNumberTypeFloat32}; + CheckAndConvertUtils::CheckTensorTypeValid("input_x", input_args[0]->BuildType(), valid_types, prim_name); + auto x_shape = CheckAndConvertUtils::ConvertShapePtrToShape("x_shape", input_args[0]->BuildShape(), prim_name); + auto x_rank = SizeToLong(x_shape.size()); + auto axiss = prim->get_axis(); + for (auto &axis : axiss) { + CheckAndConvertUtils::CheckInRange("axis", axis, kIncludeLeft, {-x_rank, x_rank}, prim_name); + } + return input_args[0]->Broaden(); +} +REGISTER_PRIMITIVE_EVAL_IMPL(L2Normalize, prim::kPrimL2Normalize, L2NormalizeInfer); +REGISTER_PRIMITIVE_C(kNameL2Normalize, L2Normalize); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/c_ops/l2_normalize.h b/mindspore/core/ops/l2_normalize.h similarity index 64% rename from mindspore/core/c_ops/l2_normalize.h rename to mindspore/core/ops/l2_normalize.h index cfc9851b48f..0f9cd207d99 100644 --- a/mindspore/core/c_ops/l2_normalize.h +++ b/mindspore/core/ops/l2_normalize.h @@ -14,31 +14,33 @@ * limitations under the License. */ -#ifndef MINDSPORE_CORE_C_OPS_L2NORMALIZE_H_ -#define MINDSPORE_CORE_C_OPS_L2NORMALIZE_H_ +#ifndef MINDSPORE_CORE_OPS_L2_NORMALIZE_H_ +#define MINDSPORE_CORE_OPS_L2_NORMALIZE_H_ #include #include - -#include "c_ops/primitive_c.h" -#include "c_ops/op_utils.h" +#include +#include "ops/primitive_c.h" +#include "ops/op_utils.h" #include "abstract/abstract_value.h" #include "utils/check_convert_utils.h" namespace mindspore { +namespace ops { constexpr auto kNameL2Normalize = "L2Normalize"; class L2Normalize : public PrimitiveC { public: - L2Normalize() : PrimitiveC(kNameL2Normalize) {} + explicit L2Normalize(const std::string &name = kNameL2Normalize) : PrimitiveC(name) {} ~L2Normalize() = default; MS_DECLARE_PARENT(L2Normalize, PrimitiveC); - void Init(int64_t axis = 0, float epsilon = 1e-4); - void set_axis(int64_t axis); - void set_epsilon(float epsilon); - int64_t get_axis(); - float get_epsilon(); + void Init(const std::vector &axis, const float epsilon = 1e-4); + void set_axis(const std::vector &axis); + void set_epsilon(const float epsilon); + std::vector get_axis() const; + float get_epsilon() const; }; AbstractBasePtr L2NormalizeInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, const std::vector &input_args); -using PrimL2Normalize = std::shared_ptr; +using PrimL2NormalizePtr = std::shared_ptr; +} // namespace ops } // namespace mindspore -#endif // MINDSPORE_CORE_C_OPS_L2NORMALIZE_H_ +#endif // MINDSPORE_CORE_OPS_L2_NORMALIZE_H_ diff --git a/mindspore/core/c_ops/layer_norm.cc b/mindspore/core/ops/layer_norm.cc similarity index 68% rename from mindspore/core/c_ops/layer_norm.cc rename to mindspore/core/ops/layer_norm.cc index 364aac1ecbf..58d756ddff7 100644 --- a/mindspore/core/c_ops/layer_norm.cc +++ b/mindspore/core/ops/layer_norm.cc @@ -14,35 +14,37 @@ * limitations under the License. */ -#include "c_ops/layer_norm.h" -#include "c_ops/op_utils.h" +#include "ops/layer_norm.h" +#include "ops/op_utils.h" #include "utils/check_convert_utils.h" namespace mindspore { -void LayerNorm::Init(int64_t begin_norm_axis, int64_t begin_params_axis, float epsilon) { +namespace ops { +void LayerNorm::Init(const int64_t begin_norm_axis, const int64_t begin_params_axis, const float epsilon) { this->set_begin_norm_axis(begin_norm_axis); this->set_begin_params_axis(begin_params_axis); this->set_epsilon(epsilon); } -void LayerNorm::set_begin_norm_axis(int64_t begin_norm_axis) { +void LayerNorm::set_begin_norm_axis(const int64_t begin_norm_axis) { this->AddAttr(kBeginNormAxis, MakeValue(begin_norm_axis)); } -void LayerNorm::set_begin_params_axis(int64_t begin_params_axis) { +void LayerNorm::set_begin_params_axis(const int64_t begin_params_axis) { this->AddAttr(kBeginParamsAxis, MakeValue(begin_params_axis)); } -void LayerNorm::set_epsilon(float epsilon) { this->AddAttr(kEpsilon, MakeValue(epsilon)); } +void LayerNorm::set_epsilon(const float epsilon) { this->AddAttr(kEpsilon, MakeValue(epsilon)); } -int64_t LayerNorm::get_begin_norm_axis() { +int64_t LayerNorm::get_begin_norm_axis() const { auto value_ptr = this->GetAttr(kBeginNormAxis); return GetValue(value_ptr); } -int64_t LayerNorm::get_begin_params_axis() { +int64_t LayerNorm::get_begin_params_axis() const { auto value_ptr = this->GetAttr(kBeginParamsAxis); return GetValue(value_ptr); } -float LayerNorm::get_epsilon() { +float LayerNorm::get_epsilon() const { auto value_ptr = this->GetAttr(kEpsilon); return GetValue(value_ptr); } REGISTER_PRIMITIVE_C(kNameLayerNorm, LayerNorm); +} // namespace ops } // namespace mindspore diff --git a/mindspore/core/c_ops/layer_norm.h b/mindspore/core/ops/layer_norm.h similarity index 58% rename from mindspore/core/c_ops/layer_norm.h rename to mindspore/core/ops/layer_norm.h index 1781d22747d..c1c9d89f0d1 100644 --- a/mindspore/core/c_ops/layer_norm.h +++ b/mindspore/core/ops/layer_norm.h @@ -14,27 +14,32 @@ * limitations under the License. */ -#ifndef MINDSPORE_CORE_C_OPS_LAYERNORM_H_ -#define MINDSPORE_CORE_C_OPS_LAYERNORM_H_ -#include "c_ops/primitive_c.h" +#ifndef MINDSPORE_CORE_OPS_LAYER_NORM_H_ +#define MINDSPORE_CORE_OPS_LAYER_NORM_H_ +#include + +#include "ops/primitive_c.h" #include "abstract/abstract_value.h" #include "utils/check_convert_utils.h" namespace mindspore { +namespace ops { constexpr auto kNameLayerNorm = "LayerNorm"; class LayerNorm : public PrimitiveC { public: LayerNorm() : PrimitiveC(kNameLayerNorm) {} + explicit LayerNorm(const std::string k_name) : PrimitiveC(k_name) {} ~LayerNorm() = default; MS_DECLARE_PARENT(LayerNorm, PrimitiveC); - void Init(int64_t begin_norm_axis = 1, int64_t begin_params_axis = 1, float epsilon = 1e-7); - void set_begin_norm_axis(int64_t begin_norm_axis); - void set_begin_params_axis(int64_t begin_params_axis); - void set_epsilon(float epsilon); - int64_t get_begin_norm_axis(); - int64_t get_begin_params_axis(); - float get_epsilon(); + void Init(const int64_t begin_norm_axis = 1, const int64_t begin_params_axis = 1, const float epsilon = 1e-7); + void set_begin_norm_axis(const int64_t begin_norm_axis); + void set_begin_params_axis(const int64_t begin_params_axis); + void set_epsilon(const float epsilon); + int64_t get_begin_norm_axis() const; + int64_t get_begin_params_axis() const; + float get_epsilon() const; }; +} // namespace ops } // namespace mindspore -#endif // MINDSPORE_CORE_C_OPS_LAYERNORM_H_ +#endif // MINDSPORE_CORE_OPS_LAYER_NORM_H_ diff --git a/mindspore/core/ops/leaky_relu.cc b/mindspore/core/ops/leaky_relu.cc new file mode 100644 index 00000000000..a0a37e10d06 --- /dev/null +++ b/mindspore/core/ops/leaky_relu.cc @@ -0,0 +1,57 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops/leaky_relu.h" + +namespace mindspore { +namespace ops { +namespace { +abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(primitive); + auto x = input_args[0]->BuildShape(); + auto shape_element = x->cast(); + MS_EXCEPTION_IF_NULL(shape_element); + return shape_element; +} + +TypePtr InferType(const PrimitivePtr &prim, const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(prim); + CheckAndConvertUtils::CheckInteger("input number", input_args.size(), kEqual, 1, prim->name()); + for (const auto &item : input_args) { + MS_EXCEPTION_IF_NULL(item); + } + std::map types; + types.emplace("x", input_args[0]->BuildType()); + auto infer_type = CheckAndConvertUtils::CheckTensorTypeSame(types, common_valid_types, prim->name()); + return TypeIdToType(infer_type); +} +} // namespace +void LeakyRelu::Init(const float negative_slope) { this->set_negative_slope(negative_slope); } + +void LeakyRelu::set_negative_slope(const float negative_slope) { + this->AddAttr(kNegativeSlope, MakeValue(negative_slope)); +} +float LeakyRelu::get_negative_slope() const { return GetValue(GetAttr(kNegativeSlope)); } + +AbstractBasePtr LeakyReluInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + return std::make_shared(InferType(primitive, input_args), + InferShape(primitive, input_args)->shape()); +} +REGISTER_PRIMITIVE_EVAL_IMPL(LeakyRelu, prim::kPrimLeakyRelu, LeakyReluInfer); +REGISTER_PRIMITIVE_C(kNameLeakyRelu, LeakyRelu); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/leaky_relu.h b/mindspore/core/ops/leaky_relu.h new file mode 100644 index 00000000000..09dc63915be --- /dev/null +++ b/mindspore/core/ops/leaky_relu.h @@ -0,0 +1,47 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_LEAKY_RELU_H_ +#define MINDSPORE_CORE_OPS_LEAKY_RELU_H_ +#include +#include +#include +#include +#include "ops/primitive_c.h" +#include "ops/op_utils.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameLeakyRelu = "LeakyRelu"; +class LeakyRelu : public PrimitiveC { + public: + LeakyRelu() : PrimitiveC(kNameLeakyRelu) {} + ~LeakyRelu() = default; + MS_DECLARE_PARENT(LeakyRelu, PrimitiveC); + void Init(const float negative_slope); + void set_negative_slope(const float negative_slope); + float get_negative_slope() const; +}; + +AbstractBasePtr LeakyReluInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); +using PrimLeakyReluPtr = std::shared_ptr; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_LEAKY_RELU_H_ diff --git a/mindspore/core/ops/less.cc b/mindspore/core/ops/less.cc new file mode 100644 index 00000000000..4a502d7b48c --- /dev/null +++ b/mindspore/core/ops/less.cc @@ -0,0 +1,56 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include + +#include "ops/less.h" +#include "ops/op_utils.h" +#include "utils/check_convert_utils.h" +#include "abstract/primitive_infer_map.h" + +namespace mindspore { +namespace ops { +namespace { +abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(primitive); + auto less_prim = primitive->cast(); + MS_EXCEPTION_IF_NULL(less_prim); + auto op_name = less_prim->name(); + return BroadCastInferShape(op_name, input_args); +} + +TypePtr InferType(const PrimitivePtr &prim, const std::vector &input_args) { + for (const auto &item : input_args) { + MS_EXCEPTION_IF_NULL(item); + } + std::map types; + types.emplace("x", input_args[0]->BuildType()); + types.emplace("y", input_args[1]->BuildType()); + CheckAndConvertUtils::CheckTensorTypeSame(types, common_valid_types, prim->name()); + return TypeIdToType(kNumberTypeBool); +} +} // namespace + +AbstractBasePtr LessInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + return std::make_shared(InferType(primitive, input_args), + InferShape(primitive, input_args)->shape()); +} +REGISTER_PRIMITIVE_EVAL_IMPL(Less, prim::kPrimLess, LessInfer); +REGISTER_PRIMITIVE_C(kNameLess, Less); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/c_ops/less.h b/mindspore/core/ops/less.h similarity index 82% rename from mindspore/core/c_ops/less.h rename to mindspore/core/ops/less.h index f78441e0106..c5dd51835df 100644 --- a/mindspore/core/c_ops/less.h +++ b/mindspore/core/ops/less.h @@ -14,17 +14,18 @@ * limitations under the License. */ -#ifndef MINDSPORE_CORE_C_OPS_LESS_H_ -#define MINDSPORE_CORE_C_OPS_LESS_H_ +#ifndef MINDSPORE_CORE_OPS_LESS_H_ +#define MINDSPORE_CORE_OPS_LESS_H_ #include #include -#include "c_ops/primitive_c.h" -#include "c_ops/op_utils.h" +#include "ops/primitive_c.h" +#include "ops/op_utils.h" #include "abstract/abstract_value.h" #include "utils/check_convert_utils.h" namespace mindspore { +namespace ops { constexpr auto kNameLess = "Less"; class Less : public PrimitiveC { public: @@ -34,6 +35,7 @@ class Less : public PrimitiveC { }; AbstractBasePtr LessInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, const std::vector &input_args); -using PrimLess = std::shared_ptr; +using PrimLessPtr = std::shared_ptr; +} // namespace ops } // namespace mindspore -#endif // MINDSPORE_CORE_C_OPS_LESS_H_ +#endif // MINDSPORE_CORE_OPS_LESS_H_ diff --git a/mindspore/core/ops/less_equal.cc b/mindspore/core/ops/less_equal.cc new file mode 100644 index 00000000000..75da5355361 --- /dev/null +++ b/mindspore/core/ops/less_equal.cc @@ -0,0 +1,57 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include +#include +#include +#include "ops/less_equal.h" +#include "ops/op_utils.h" +#include "utils/check_convert_utils.h" +#include "abstract/primitive_infer_map.h" + +namespace mindspore { +namespace ops { +namespace { +abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(primitive); + auto equal_prim = primitive->cast(); + MS_EXCEPTION_IF_NULL(equal_prim); + auto op_name = equal_prim->name(); + return BroadCastInferShape(op_name, input_args); +} + +TypePtr InferType(const PrimitivePtr &prim, const std::vector &input_args) { + if (std::any_of(input_args.begin(), input_args.end(), [](const AbstractBasePtr &a) { return a == nullptr; })) { + MS_LOG(EXCEPTION) << "nullptr"; + } + std::map types; + types.emplace("x", input_args[0]->BuildType()); + types.emplace("y", input_args[1]->BuildType()); + auto infer_type = CheckAndConvertUtils::CheckTensorTypeSame(types, common_valid_types, prim->name()); + return TypeIdToType(infer_type); +} +} // namespace + +AbstractBasePtr LessEqualInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + return std::make_shared(InferType(primitive, input_args), + InferShape(primitive, input_args)->shape()); +} +REGISTER_PRIMITIVE_EVAL_IMPL(LessEqual, prim::kPrimLessEqual, LessEqualInfer); +REGISTER_PRIMITIVE_C(kNameLessEqual, LessEqual); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/c_ops/less_equal.h b/mindspore/core/ops/less_equal.h similarity index 68% rename from mindspore/core/c_ops/less_equal.h rename to mindspore/core/ops/less_equal.h index 1f95a048026..70f228b6c52 100644 --- a/mindspore/core/c_ops/less_equal.h +++ b/mindspore/core/ops/less_equal.h @@ -14,13 +14,17 @@ * limitations under the License. */ -#ifndef MINDSPORE_CORE_C_OPS_LESSEQUAL_H_ -#define MINDSPORE_CORE_C_OPS_LESSEQUAL_H_ -#include "c_ops/primitive_c.h" +#ifndef MINDSPORE_CORE_OPS_LESS_EQUAL_H_ +#define MINDSPORE_CORE_OPS_LESS_EQUAL_H_ +#include +#include + +#include "ops/primitive_c.h" #include "abstract/abstract_value.h" #include "utils/check_convert_utils.h" namespace mindspore { +namespace ops { constexpr auto kNameLessEqual = "LessEqual"; class LessEqual : public PrimitiveC { public: @@ -29,6 +33,11 @@ class LessEqual : public PrimitiveC { MS_DECLARE_PARENT(LessEqual, PrimitiveC); void Init() {} }; + +AbstractBasePtr LessEqualInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); +using PrimLessEqualPtr = std::shared_ptr; +} // namespace ops } // namespace mindspore -#endif // MINDSPORE_CORE_C_OPS_LESSEQUAL_H_ +#endif // MINDSPORE_CORE_OPS_LESS_EQUAL_H_ diff --git a/mindspore/core/ops/local_response_normalization.cc b/mindspore/core/ops/local_response_normalization.cc new file mode 100644 index 00000000000..27ea7d216ba --- /dev/null +++ b/mindspore/core/ops/local_response_normalization.cc @@ -0,0 +1,98 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops/local_response_normalization.h" +#include +#include +#include +#include +#include +#include "ops/op_utils.h" +#include "utils/check_convert_utils.h" +#include "abstract/primitive_infer_map.h" + +namespace mindspore { +namespace ops { +namespace { +abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(primitive); + auto x = input_args[0]->BuildShape(); + auto shape_element = x->cast(); + MS_EXCEPTION_IF_NULL(shape_element); + return shape_element; +} + +TypePtr InferType(const PrimitivePtr &prim, const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(prim); + CheckAndConvertUtils::CheckInteger("input number", input_args.size(), kEqual, 1, prim->name()); + for (const auto &item : input_args) { + MS_EXCEPTION_IF_NULL(item); + } + std::map types; + types.emplace("x", input_args[0]->BuildType()); + auto infer_type = CheckAndConvertUtils::CheckTensorTypeSame(types, common_valid_types, prim->name()); + return TypeIdToType(infer_type); +} +} // namespace + +void LocalResponseNormalization::Init(const int64_t depth_radius, const float bias, const float alpha, + const float beta) { + this->set_depth_radius(depth_radius); + this->set_bias(bias); + this->set_alpha(alpha); + this->set_beta(beta); +} + +void LocalResponseNormalization::set_depth_radius(const int64_t depth_radius) { + this->AddAttr(kDepthRadius, MakeValue(depth_radius)); +} + +int64_t LocalResponseNormalization::get_depth_radius() const { + auto value_ptr = GetAttr(kDepthRadius); + return GetValue(value_ptr); +} + +void LocalResponseNormalization::set_bias(const float bias) { this->AddAttr(kBias, MakeValue(bias)); } + +float LocalResponseNormalization::get_bias() const { + auto value_ptr = GetAttr(kBias); + return GetValue(value_ptr); +} + +void LocalResponseNormalization::set_alpha(const float alpha) { this->AddAttr(kAlpha, MakeValue(alpha)); } + +float LocalResponseNormalization::get_alpha() const { + auto value_ptr = GetAttr(kAlpha); + return GetValue(value_ptr); +} + +void LocalResponseNormalization::set_beta(const float beta) { this->AddAttr(kBeta, MakeValue(beta)); } + +float LocalResponseNormalization::get_beta() const { + auto value_ptr = GetAttr(kBeta); + return GetValue(value_ptr); +} + +AbstractBasePtr LocalResponseNormalizationInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + return std::make_shared(InferType(primitive, input_args), + InferShape(primitive, input_args)->shape()); +} +REGISTER_PRIMITIVE_EVAL_IMPL(LocalResponseNormalization, prim::kPrimLocalResponseNormalization, + LocalResponseNormalizationInfer); +REGISTER_PRIMITIVE_C(kNameLocalResponseNormalization, LocalResponseNormalization); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/c_ops/local_response_normalization.h b/mindspore/core/ops/local_response_normalization.h similarity index 60% rename from mindspore/core/c_ops/local_response_normalization.h rename to mindspore/core/ops/local_response_normalization.h index 81e52d91059..a87dbc89595 100644 --- a/mindspore/core/c_ops/local_response_normalization.h +++ b/mindspore/core/ops/local_response_normalization.h @@ -14,34 +14,40 @@ * limitations under the License. */ -#ifndef MINDSPORE_CORE_C_OPS_LOCALRESPONSENORMALIZATION_H_ -#define MINDSPORE_CORE_C_OPS_LOCALRESPONSENORMALIZATION_H_ +#ifndef MINDSPORE_CORE_OPS_LOCAL_RESPONSE_NORMALIZATION_H_ +#define MINDSPORE_CORE_OPS_LOCAL_RESPONSE_NORMALIZATION_H_ #include #include #include #include -#include "c_ops/primitive_c.h" +#include "ops/primitive_c.h" #include "abstract/abstract_value.h" #include "utils/check_convert_utils.h" namespace mindspore { +namespace ops { constexpr auto kNameLocalResponseNormalization = "LocalResponseNormalization"; class LocalResponseNormalization : public PrimitiveC { public: LocalResponseNormalization() : PrimitiveC(kNameLocalResponseNormalization) {} ~LocalResponseNormalization() = default; MS_DECLARE_PARENT(LocalResponseNormalization, PrimitiveC); - void Init(const int64_t &depth_radius, const float &bias, const float &alpha, const float &beta); - void set_depth_radius(const int64_t &depth_radius); - void set_bias(const float &bias); - void set_alpha(const float &alpha); - void set_beta(const float &beta); + void Init(const int64_t depth_radius, const float bias, const float alpha, const float beta); + void set_depth_radius(const int64_t depth_radius); + void set_bias(const float bias); + void set_alpha(const float alpha); + void set_beta(const float beta); int64_t get_depth_radius() const; float get_bias() const; float get_alpha() const; float get_beta() const; }; + +AbstractBasePtr LocalResponseNormalizationInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); +using PrimLocalResponseNormalizationPtr = std::shared_ptr; +} // namespace ops } // namespace mindspore -#endif // MINDSPORE_CORE_C_OPS_LOCALRESPONSENORMALIZATION_H_ +#endif // MINDSPORE_CORE_OPS_LOCAL_RESPONSE_NORMALIZATION_H_ diff --git a/mindspore/core/ops/log.cc b/mindspore/core/ops/log.cc new file mode 100644 index 00000000000..088a8c441b9 --- /dev/null +++ b/mindspore/core/ops/log.cc @@ -0,0 +1,47 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include "ops/log.h" +#include "ops/op_utils.h" +#include "utils/check_convert_utils.h" +#include "abstract/primitive_infer_map.h" + +namespace mindspore { +namespace ops { +namespace { +abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector &input_args) { + auto x_shape = CheckAndConvertUtils::ConvertShapePtrToShape("x_shape", input_args[0]->BuildShape(), "Log"); + return std::make_shared(x_shape); +} + +TypePtr InferType(const PrimitivePtr &prim, const std::vector &input_args) { + auto infer_type = input_args[0]->BuildType()->cast()->element(); + const std::set valid_types = {TypeIdToType(kObjectTypeTensorType)}; + CheckAndConvertUtils::CheckSubClass("infer type", input_args[0]->BuildType(), valid_types, prim->name()); + return infer_type; +} +} // namespace + +AbstractBasePtr LogInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + return std::make_shared(InferType(primitive, input_args), + InferShape(primitive, input_args)->shape()); +} +REGISTER_PRIMITIVE_EVAL_IMPL(Log, prim::kPrimLog, LogInfer); +REGISTER_PRIMITIVE_C(kNameLog, Log); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/c_ops/log.h b/mindspore/core/ops/log.h similarity index 82% rename from mindspore/core/c_ops/log.h rename to mindspore/core/ops/log.h index 62e61428056..c847dec3b5c 100644 --- a/mindspore/core/c_ops/log.h +++ b/mindspore/core/ops/log.h @@ -14,16 +14,17 @@ * limitations under the License. */ -#ifndef MINDSPORE_CORE_C_OPS_LOG_H_ -#define MINDSPORE_CORE_C_OPS_LOG_H_ +#ifndef MINDSPORE_CORE_OPS_LOG_H_ +#define MINDSPORE_CORE_OPS_LOG_H_ #include #include -#include "c_ops/primitive_c.h" -#include "c_ops/op_utils.h" +#include "ops/primitive_c.h" +#include "ops/op_utils.h" #include "abstract/abstract_value.h" namespace mindspore { +namespace ops { constexpr auto kNameLog = "Log"; class Log : public PrimitiveC { public: @@ -33,6 +34,7 @@ class Log : public PrimitiveC { }; AbstractBasePtr LogInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, const std::vector &input_args); -using PrimLog = std::shared_ptr; +using PrimLogPtr = std::shared_ptr; +} // namespace ops } // namespace mindspore -#endif // MINDSPORE_CORE_C_OPS_LOG_H_ +#endif // MINDSPORE_CORE_OPS_LOG_H_ diff --git a/mindspore/core/ops/logical_and.cc b/mindspore/core/ops/logical_and.cc new file mode 100644 index 00000000000..9cabd5af997 --- /dev/null +++ b/mindspore/core/ops/logical_and.cc @@ -0,0 +1,61 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include "ops/logical_and.h" + +#include "ops/op_utils.h" +#include "utils/check_convert_utils.h" +#include "abstract/primitive_infer_map.h" + +namespace mindspore { +namespace ops { +namespace { +abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(primitive); + auto logicaland_prim = primitive->cast(); + MS_EXCEPTION_IF_NULL(logicaland_prim); + auto op_name = logicaland_prim->name(); + return BroadCastInferShape(op_name, input_args); +} + +TypePtr InferType(const PrimitivePtr &prim, const std::vector &input_args) { + for (const auto &item : input_args) { + MS_EXCEPTION_IF_NULL(item); + } + std::map types; + const std::set valid_types = {kNumberTypeBool}; + types.emplace("x", input_args[0]->BuildType()); + types.emplace("y", input_args[1]->BuildType()); + auto infer_type = CheckAndConvertUtils::CheckTensorTypeSame(types, valid_types, prim->name()); + if (infer_type == kNumberTypeBool) { + return TypeIdToType(infer_type); + } + return std::make_shared(TypeIdToType(kNumberTypeBool)); +} +} // namespace + +AbstractBasePtr LogicalAndInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + return std::make_shared(InferType(primitive, input_args), + InferShape(primitive, input_args)->shape()); +} +REGISTER_PRIMITIVE_EVAL_IMPL(LogicalAnd, prim::kPrimLogicalAnd, LogicalAndInfer); +REGISTER_PRIMITIVE_C(kNameLogicalAnd, LogicalAnd); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/c_ops/logical_and.h b/mindspore/core/ops/logical_and.h similarity index 71% rename from mindspore/core/c_ops/logical_and.h rename to mindspore/core/ops/logical_and.h index 3e579e4e4b7..e05099afdb1 100644 --- a/mindspore/core/c_ops/logical_and.h +++ b/mindspore/core/ops/logical_and.h @@ -14,17 +14,18 @@ * limitations under the License. */ -#ifndef MINDSPORE_CORE_C_OPS_LOGICAL_AND_H_ -#define MINDSPORE_CORE_C_OPS_LOGICAL_AND_H_ +#ifndef MINDSPORE_CORE_OPS_LOGICAL_AND_H_ +#define MINDSPORE_CORE_OPS_LOGICAL_AND_H_ #include #include #include #include -#include "c_ops/primitive_c.h" +#include "ops/primitive_c.h" #include "abstract/abstract_value.h" #include "utils/check_convert_utils.h" namespace mindspore { +namespace ops { constexpr auto kNameLogicalAnd = "LogicalAnd"; class LogicalAnd : public PrimitiveC { public: @@ -33,6 +34,10 @@ class LogicalAnd : public PrimitiveC { MS_DECLARE_PARENT(LogicalAnd, PrimitiveC); void Init() {} }; +AbstractBasePtr LogicalAndInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); +using PrimLogicalAndPtr = std::shared_ptr; +} // namespace ops } // namespace mindspore -#endif // MINDSPORE_CORE_C_OPS_LOGICAL_AND_H_ +#endif // MINDSPORE_CORE_OPS_LOGICAL_AND_H_ diff --git a/mindspore/core/ops/logical_not.cc b/mindspore/core/ops/logical_not.cc new file mode 100644 index 00000000000..c15956d73f3 --- /dev/null +++ b/mindspore/core/ops/logical_not.cc @@ -0,0 +1,57 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + +#include "ops/logical_not.h" +#include "ops/op_utils.h" + +namespace mindspore { +namespace ops { +namespace { +abstract::ShapePtr LogicalNotInferShape(const PrimitivePtr &primitive, const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(primitive); + auto LogicalNot_prim = primitive->cast(); + MS_EXCEPTION_IF_NULL(LogicalNot_prim); + auto op_name = LogicalNot_prim->name(); + auto in_shape = CheckAndConvertUtils::ConvertShapePtrToShape("input_shape", input_args[0]->BuildShape(), op_name); + return std::make_shared(in_shape); +} + +TypePtr LogicalNotInferType(const PrimitivePtr &prim, const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(prim); + auto LogicalNot_prim = prim->cast(); + MS_EXCEPTION_IF_NULL(LogicalNot_prim); + auto op_name = LogicalNot_prim->name(); + auto infer_dtype = input_args[0]->BuildType(); + std::set local_bool = {kNumberTypeBool}; + CheckAndConvertUtils::CheckTensorTypeValid("x", infer_dtype, local_bool, op_name); + auto tensor_type = infer_dtype->cast(); + MS_EXCEPTION_IF_NULL(tensor_type); + auto element = tensor_type->element(); + return element; +} +} // namespace +AbstractBasePtr LogicalNotInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + return std::make_shared(LogicalNotInferType(primitive, input_args), + LogicalNotInferShape(primitive, input_args)->shape()); +} + +REGISTER_PRIMITIVE_EVAL_IMPL(LogicalNot, prim::kPrimLogicalNot, LogicalNotInfer); +REGISTER_PRIMITIVE_C(kNameLogicalNot, LogicalNot); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/c_ops/logical_not.h b/mindspore/core/ops/logical_not.h similarity index 68% rename from mindspore/core/c_ops/logical_not.h rename to mindspore/core/ops/logical_not.h index 0ba1eae6fea..8fea01be8b6 100644 --- a/mindspore/core/c_ops/logical_not.h +++ b/mindspore/core/ops/logical_not.h @@ -14,13 +14,16 @@ * limitations under the License. */ -#ifndef MINDSPORE_CORE_C_OPS_LOGICALNOT_H_ -#define MINDSPORE_CORE_C_OPS_LOGICALNOT_H_ -#include "c_ops/primitive_c.h" +#ifndef MINDSPORE_CORE_OPS_LOGICAL_NOT_H_ +#define MINDSPORE_CORE_OPS_LOGICAL_NOT_H_ +#include +#include +#include "ops/primitive_c.h" #include "abstract/abstract_value.h" #include "utils/check_convert_utils.h" namespace mindspore { +namespace ops { constexpr auto kNameLogicalNot = "LogicalNot"; class LogicalNot : public PrimitiveC { public: @@ -29,6 +32,11 @@ class LogicalNot : public PrimitiveC { MS_DECLARE_PARENT(LogicalNot, PrimitiveC); void Init() {} }; + +AbstractBasePtr LogicalNotInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); +using PrimLogicalNotPtr = std::shared_ptr; +} // namespace ops } // namespace mindspore -#endif // MINDSPORE_CORE_C_OPS_LOGICALNOT_H_ +#endif // MINDSPORE_CORE_OPS_LOGICAL_NOT_H_ diff --git a/mindspore/core/ops/logical_or.cc b/mindspore/core/ops/logical_or.cc new file mode 100644 index 00000000000..46eebf4b007 --- /dev/null +++ b/mindspore/core/ops/logical_or.cc @@ -0,0 +1,62 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include +#include + +#include "ops/logical_or.h" +#include "ops/op_utils.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +namespace { +abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(primitive); + auto logicalor_prim = primitive->cast(); + MS_EXCEPTION_IF_NULL(logicalor_prim); + auto op_name = logicalor_prim->name(); + return BroadCastInferShape(op_name, input_args); +} + +TypePtr InferType(const PrimitivePtr &prim, const std::vector &input_args) { + for (const auto &item : input_args) { + MS_EXCEPTION_IF_NULL(item); + } + std::map types; + const std::set valid_types = {kNumberTypeBool}; + types.emplace("x", input_args[0]->BuildType()); + types.emplace("y", input_args[1]->BuildType()); + auto infer_type = CheckAndConvertUtils::CheckTensorTypeSame(types, valid_types, prim->name()); + if (infer_type == kNumberTypeBool) { + return TypeIdToType(infer_type); + } + return std::make_shared(TypeIdToType(kNumberTypeBool)); +} +} // namespace + +AbstractBasePtr LogicalOrInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + return std::make_shared(InferType(primitive, input_args), + InferShape(primitive, input_args)->shape()); +} +REGISTER_PRIMITIVE_EVAL_IMPL(LogicalOr, prim::kPrimLogicalOr, LogicalOrInfer); +REGISTER_PRIMITIVE_C(kNameLogicalOr, LogicalOr); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/c_ops/logical_or.h b/mindspore/core/ops/logical_or.h similarity index 68% rename from mindspore/core/c_ops/logical_or.h rename to mindspore/core/ops/logical_or.h index d9dc683d888..0e3fc4b8c14 100644 --- a/mindspore/core/c_ops/logical_or.h +++ b/mindspore/core/ops/logical_or.h @@ -14,13 +14,16 @@ * limitations under the License. */ -#ifndef MINDSPORE_CORE_C_OPS_LOGICALOR_H_ -#define MINDSPORE_CORE_C_OPS_LOGICALOR_H_ -#include "c_ops/primitive_c.h" +#ifndef MINDSPORE_CORE_OPS_LOGICAL_OR_H_ +#define MINDSPORE_CORE_OPS_LOGICAL_OR_H_ +#include +#include +#include "ops/primitive_c.h" #include "abstract/abstract_value.h" #include "utils/check_convert_utils.h" namespace mindspore { +namespace ops { constexpr auto kNameLogicalOr = "LogicalOr"; class LogicalOr : public PrimitiveC { public: @@ -29,6 +32,10 @@ class LogicalOr : public PrimitiveC { MS_DECLARE_PARENT(LogicalOr, PrimitiveC); void Init() {} }; +AbstractBasePtr LogicalOrInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); +using PrimLogicalOrPtr = std::shared_ptr; +} // namespace ops } // namespace mindspore -#endif // MINDSPORE_CORE_C_OPS_LOGICALOR_H_ +#endif // MINDSPORE_CORE_OPS_LOGICAL_OR_H_ diff --git a/mindspore/core/ops/logical_xor.cc b/mindspore/core/ops/logical_xor.cc new file mode 100644 index 00000000000..427758715fa --- /dev/null +++ b/mindspore/core/ops/logical_xor.cc @@ -0,0 +1,23 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops/logical_xor.h" + +namespace mindspore { +namespace ops { +REGISTER_PRIMITIVE_C(kNameLogicalXor, LogicalXor); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/logical_xor.h b/mindspore/core/ops/logical_xor.h new file mode 100644 index 00000000000..c765f7b3c60 --- /dev/null +++ b/mindspore/core/ops/logical_xor.h @@ -0,0 +1,36 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_LOGICAL_XOR_H_ +#define MINDSPORE_CORE_OPS_LOGICAL_XOR_H_ +#include "ops/primitive_c.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameLogicalXor = "LogicalXor"; +class LogicalXor : public PrimitiveC { + public: + LogicalXor() : PrimitiveC(kNameLogicalXor) {} + ~LogicalXor() = default; + MS_DECLARE_PARENT(LogicalXor, PrimitiveC); + void Init() {} +}; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_LOGICAL_XOR_H_ diff --git a/mindspore/core/c_ops/loop.cc b/mindspore/core/ops/loop.cc similarity index 74% rename from mindspore/core/c_ops/loop.cc rename to mindspore/core/ops/loop.cc index 793f4e0ae15..b08edbb9549 100644 --- a/mindspore/core/c_ops/loop.cc +++ b/mindspore/core/ops/loop.cc @@ -14,18 +14,22 @@ * limitations under the License. */ -#include "c_ops/loop.h" -#include "c_ops/op_utils.h" +#include "ops/loop.h" +#include "ops/op_utils.h" #include "utils/check_convert_utils.h" namespace mindspore { -void Loop::Init(int64_t sub_graph_index) { this->set_sub_graph_index(sub_graph_index); } +namespace ops { +void Loop::Init(const int64_t sub_graph_index) { this->set_sub_graph_index(sub_graph_index); } -void Loop::set_sub_graph_index(int64_t sub_graph_index) { this->AddAttr(kSubGraphIndex, MakeValue(sub_graph_index)); } +void Loop::set_sub_graph_index(const int64_t sub_graph_index) { + this->AddAttr(kSubGraphIndex, MakeValue(sub_graph_index)); +} int64_t Loop::get_sub_graph_index() const { auto value_ptr = this->GetAttr(kSubGraphIndex); return GetValue(value_ptr); } REGISTER_PRIMITIVE_C(kNameLoop, Loop); +} // namespace ops } // namespace mindspore diff --git a/mindspore/core/c_ops/loop.h b/mindspore/core/ops/loop.h similarity index 78% rename from mindspore/core/c_ops/loop.h rename to mindspore/core/ops/loop.h index e3f88811b60..f63230126b9 100644 --- a/mindspore/core/c_ops/loop.h +++ b/mindspore/core/ops/loop.h @@ -14,27 +14,29 @@ * limitations under the License. */ -#ifndef MINDSPORE_CORE_C_OPS_LOOP_H_ -#define MINDSPORE_CORE_C_OPS_LOOP_H_ +#ifndef MINDSPORE_CORE_OPS_LOOP_H_ +#define MINDSPORE_CORE_OPS_LOOP_H_ #include -#include "c_ops/primitive_c.h" +#include "ops/primitive_c.h" #include "abstract/abstract_value.h" #include "utils/check_convert_utils.h" namespace mindspore { +namespace ops { constexpr auto kNameLoop = "Loop"; class Loop : public PrimitiveC { public: Loop() : PrimitiveC(kNameLoop) {} ~Loop() = default; MS_DECLARE_PARENT(Loop, PrimitiveC); - void Init(int64_t sub_graph_index); - void set_sub_graph_index(int64_t sub_graph_index); + void Init(const int64_t sub_graph_index); + void set_sub_graph_index(const int64_t sub_graph_index); int64_t get_sub_graph_index() const; }; using PrimLoopPtr = std::shared_ptr; +} // namespace ops } // namespace mindspore -#endif // MINDSPORE_CORE_C_OPS_LOOP_H_ +#endif // MINDSPORE_CORE_OPS_LOOP_H_ diff --git a/mindspore/core/c_ops/lp_normalization.cc b/mindspore/core/ops/lp_normalization.cc similarity index 75% rename from mindspore/core/c_ops/lp_normalization.cc rename to mindspore/core/ops/lp_normalization.cc index ef6763fad15..935b88bf411 100644 --- a/mindspore/core/c_ops/lp_normalization.cc +++ b/mindspore/core/ops/lp_normalization.cc @@ -14,28 +14,30 @@ * limitations under the License. */ -#include "c_ops/lp_normalization.h" -#include "c_ops/op_utils.h" +#include "ops/lp_normalization.h" +#include "ops/op_utils.h" #include "utils/check_convert_utils.h" namespace mindspore { -void LpNormalization::Init(int64_t axis, int64_t p) { +namespace ops { +void LpNormalization::Init(const int64_t axis, const int64_t p) { this->set_axis(axis); this->set_p(p); } -void LpNormalization::set_axis(int64_t axis) { this->AddAttr(kAxis, MakeValue(axis)); } +void LpNormalization::set_axis(const int64_t axis) { this->AddAttr(kAxis, MakeValue(axis)); } int64_t LpNormalization::get_axis() const { auto value_ptr = this->GetAttr(kAxis); return GetValue(value_ptr); } -void LpNormalization::set_p(int64_t p) { this->AddAttr(kP, MakeValue(p)); } +void LpNormalization::set_p(const int64_t p) { this->AddAttr(kP, MakeValue(p)); } int64_t LpNormalization::get_p() const { auto value_ptr = this->GetAttr(kP); return GetValue(value_ptr); } REGISTER_PRIMITIVE_C(kNameLpNormalization, LpNormalization); +} // namespace ops } // namespace mindspore diff --git a/mindspore/core/c_ops/lp_normalization.h b/mindspore/core/ops/lp_normalization.h similarity index 77% rename from mindspore/core/c_ops/lp_normalization.h rename to mindspore/core/ops/lp_normalization.h index 969219e874f..f3851233de5 100644 --- a/mindspore/core/c_ops/lp_normalization.h +++ b/mindspore/core/ops/lp_normalization.h @@ -14,29 +14,31 @@ * limitations under the License. */ -#ifndef MINDSPORE_CORE_C_OPS_LPNORMALIZATION_H_ -#define MINDSPORE_CORE_C_OPS_LPNORMALIZATION_H_ +#ifndef MINDSPORE_CORE_OPS_LP_NORMALIZATION_H_ +#define MINDSPORE_CORE_OPS_LP_NORMALIZATION_H_ #include -#include "c_ops/primitive_c.h" +#include "ops/primitive_c.h" #include "abstract/abstract_value.h" #include "utils/check_convert_utils.h" namespace mindspore { +namespace ops { constexpr auto kNameLpNormalization = "LpNormalization"; class LpNormalization : public PrimitiveC { public: LpNormalization() : PrimitiveC(kNameLpNormalization) {} ~LpNormalization() = default; MS_DECLARE_PARENT(LpNormalization, PrimitiveC); - void Init(int64_t axis, int64_t p); - void set_axis(int64_t axis); - void set_p(int64_t p); + void Init(const int64_t axis, const int64_t p); + void set_axis(const int64_t axis); + void set_p(const int64_t p); int64_t get_axis() const; int64_t get_p() const; }; using PrimLpNormalizationPtr = std::shared_ptr; +} // namespace ops } // namespace mindspore -#endif // MINDSPORE_CORE_C_OPS_LPNORMALIZATION_H_ +#endif // MINDSPORE_CORE_OPS_LP_NORMALIZATION_H_ diff --git a/mindspore/core/c_ops/lrn.cc b/mindspore/core/ops/lrn.cc similarity index 50% rename from mindspore/core/c_ops/lrn.cc rename to mindspore/core/ops/lrn.cc index ca0a1d10864..9e5417f11ac 100644 --- a/mindspore/core/c_ops/lrn.cc +++ b/mindspore/core/ops/lrn.cc @@ -14,18 +14,19 @@ * limitations under the License. */ -#include "c_ops/lrn.h" +#include "ops/lrn.h" #include #include #include #include #include -#include "c_ops/op_utils.h" +#include "ops/op_utils.h" #include "utils/check_convert_utils.h" #include "abstract/primitive_infer_map.h" namespace mindspore { -void Lrn::set_depth_radius(const int64_t &depth_radius) { +namespace ops { +void Lrn::set_depth_radius(const int64_t depth_radius) { CheckAndConvertUtils::CheckInteger(kDepthRadius, depth_radius, kGreaterEqual, 0, this->name()); this->AddAttr(kDepthRadius, MakeValue(depth_radius)); } @@ -35,21 +36,21 @@ int64_t Lrn::get_depth_radius() const { return GetValue(value_ptr); } -void Lrn::set_bias(const float &bias) { this->AddAttr(kBias, MakeValue(bias)); } +void Lrn::set_bias(const float bias) { this->AddAttr(kBias, MakeValue(bias)); } float Lrn::get_bias() const { auto value_ptr = GetAttr(kBias); return GetValue(value_ptr); } -void Lrn::set_alpha(const float &alpha) { this->AddAttr(kAlpha, MakeValue(alpha)); } +void Lrn::set_alpha(const float alpha) { this->AddAttr(kAlpha, MakeValue(alpha)); } float Lrn::get_alpha() const { auto value_ptr = GetAttr(kAlpha); return GetValue(value_ptr); } -void Lrn::set_beta(const float &beta) { this->AddAttr(kBeta, MakeValue(beta)); } +void Lrn::set_beta(const float beta) { this->AddAttr(kBeta, MakeValue(beta)); } float Lrn::get_beta() const { auto value_ptr = GetAttr(kBeta); @@ -64,7 +65,7 @@ std::string Lrn::get_norm_region() const { auto value_ptr = GetAttr(kNormRegion); return GetValue(value_ptr); } -void Lrn::Init(const int64_t &depth_radius, const float &bias, const float &alpha, const float &beta, +void Lrn::Init(const int64_t depth_radius, const float bias, const float alpha, const float beta, const std::string &norm_region) { this->set_depth_radius(depth_radius); this->set_bias(bias); @@ -72,5 +73,36 @@ void Lrn::Init(const int64_t &depth_radius, const float &bias, const float &alph this->set_beta(beta); this->set_norm_region(norm_region); } + +namespace { +abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(primitive); + auto lrn_prim = primitive->cast(); + MS_EXCEPTION_IF_NULL(lrn_prim); + auto prim_name = lrn_prim->name(); + auto in_shape = CheckAndConvertUtils::ConvertShapePtrToShape("x_shape", input_args[0]->BuildShape(), prim_name); + CheckAndConvertUtils::CheckInteger("input shape", in_shape.size(), kEqual, 4, prim_name); + return std::make_shared(in_shape); +} + +TypePtr InferType(const PrimitivePtr &prim, const std::vector &input_args) { + const std::set valid_types = {kNumberTypeFloat16, kNumberTypeFloat32}; + if (std::any_of(input_args.begin(), input_args.end(), [](AbstractBasePtr a) { return a == nullptr; })) { + MS_LOG(EXCEPTION) << "nullptr"; + } + std::map types; + types.emplace("x", input_args[0]->BuildType()); + auto infer_type = CheckAndConvertUtils::CheckTensorTypeSame(types, valid_types, prim->name()); + return TypeIdToType(infer_type); +} +} // namespace + +AbstractBasePtr LrnInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + return std::make_shared(InferType(primitive, input_args), + InferShape(primitive, input_args)->shape()); +} +REGISTER_PRIMITIVE_EVAL_IMPL(Lrn, prim::kPrimLrn, LrnInfer); REGISTER_PRIMITIVE_C(kNameLrn, Lrn); +} // namespace ops } // namespace mindspore diff --git a/mindspore/core/c_ops/lrn.h b/mindspore/core/ops/lrn.h similarity index 66% rename from mindspore/core/c_ops/lrn.h rename to mindspore/core/ops/lrn.h index 91606c5177d..ad9aa45252d 100644 --- a/mindspore/core/c_ops/lrn.h +++ b/mindspore/core/ops/lrn.h @@ -14,29 +14,30 @@ * limitations under the License. */ -#ifndef MINDSPORE_CORE_C_OPS_LRN_H_ -#define MINDSPORE_CORE_C_OPS_LRN_H_ +#ifndef MINDSPORE_CORE_OPS_LRN_H_ +#define MINDSPORE_CORE_OPS_LRN_H_ #include #include #include #include -#include "c_ops/primitive_c.h" +#include "ops/primitive_c.h" #include "abstract/abstract_value.h" #include "utils/check_convert_utils.h" namespace mindspore { +namespace ops { constexpr auto kNameLrn = "Lrn"; class Lrn : public PrimitiveC { public: Lrn() : PrimitiveC(kNameLrn) { InitIOName({"x"}, {"y"}); } ~Lrn() = default; MS_DECLARE_PARENT(Lrn, PrimitiveC); - void Init(const int64_t &depth_radius = 5, const float &bias = 1.0, const float &alpha = 1.0, const float &beta = 0.5, + void Init(const int64_t depth_radius = 5, const float bias = 1.0, const float alpha = 1.0, const float beta = 0.5, const std::string &norm_region = "ACROSS_CHANNELS"); - void set_depth_radius(const int64_t &depth_radius); - void set_bias(const float &bias); - void set_alpha(const float &alpha); - void set_beta(const float &beta); + void set_depth_radius(const int64_t depth_radius); + void set_bias(const float bias); + void set_alpha(const float alpha); + void set_beta(const float beta); void set_norm_region(const std::string &norm_region); int64_t get_depth_radius() const; float get_bias() const; @@ -44,6 +45,9 @@ class Lrn : public PrimitiveC { float get_beta() const; std::string get_norm_region() const; }; +AbstractBasePtr LrnInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); +using PrimLrn = std::shared_ptr; +} // namespace ops } // namespace mindspore - -#endif // MINDSPORE_CORE_C_OPS_LRN_H_ +#endif // MINDSPORE_CORE_OPS_LRN_H_ diff --git a/mindspore/core/ops/lsh_projection.cc b/mindspore/core/ops/lsh_projection.cc new file mode 100644 index 00000000000..768f1fdd9bf --- /dev/null +++ b/mindspore/core/ops/lsh_projection.cc @@ -0,0 +1,71 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops/lsh_projection.h" +#include "ops/op_utils.h" + +namespace mindspore { +namespace ops { +void LshProjection::Init(const LshProjectionType &type) { set_type(type); } + +void LshProjection::set_type(const LshProjectionType &type) { + int64_t swi = (int64_t)type; + AddAttr(kType, MakeValue(swi)); +} + +LshProjectionType LshProjection::get_type() const { + auto value_ptr = GetAttr(kType); + return LshProjectionType(GetValue(value_ptr)); +} + +AbstractBasePtr LshProjectionInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(primitive); + auto LshProjection_prim = primitive->cast(); + MS_EXCEPTION_IF_NULL(LshProjection_prim); + // if (input_args.size() != 2 && input_args.size() != 3) { + // MS_LOG(ERROR) << "inputs to LshProjection operator should be 2 or 3, but " << input_args.size() << " is given."; + // } + auto op_name = LshProjection_prim->name(); + auto input0 = CheckAndConvertUtils::ConvertShapePtrToShape("input0_shape", input_args[0]->BuildShape(), op_name); + auto input1 = CheckAndConvertUtils::ConvertShapePtrToShape("input1_shape", input_args[1]->BuildShape(), op_name); + CheckAndConvertUtils::CheckInteger("input0_shape", input0.size(), kEqual, 2, op_name); + CheckAndConvertUtils::CheckInteger("input0_shape_dimen_1", input0[1], kLessEqual, 32, op_name); + CheckAndConvertUtils::CheckInteger("input1_shape", input1.size(), kGreaterEqual, 1, op_name); + + if (input_args.size() == 3) { + auto input2 = CheckAndConvertUtils::ConvertShapePtrToShape("input2_shape", input_args[2]->BuildShape(), op_name); + CheckAndConvertUtils::CheckInteger("input2_shape", input2.size(), kEqual, 1, op_name); + CheckAndConvertUtils::CheckInteger("input2_shape_dimen_0", input2[0], kEqual, input1[0], op_name); + } + + std::vector out_shape; + switch ((int64_t)LshProjection_prim->get_type()) { + case (int64_t)LshProjectionType::SPARSE: + out_shape.push_back(input0[0]); + break; + case (int64_t)LshProjectionType::DENSE: + out_shape.push_back(input0[0] * input0[1]); + break; + } + TypePtr infer_type = TypeIdToType(kNumberTypeInt32); + return std::make_shared(infer_type, out_shape); +} + +REGISTER_PRIMITIVE_EVAL_IMPL(LshProjection, prim::kPrimLshProjection, LshProjectionInfer); +REGISTER_PRIMITIVE_C(kNameLshProjection, LshProjection); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/lsh_projection.h b/mindspore/core/ops/lsh_projection.h new file mode 100644 index 00000000000..a122f7001d1 --- /dev/null +++ b/mindspore/core/ops/lsh_projection.h @@ -0,0 +1,46 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_LSH_PROJECTION_H_ +#define MINDSPORE_CORE_OPS_LSH_PROJECTION_H_ +#include +#include +#include +#include +#include "ops/primitive_c.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameLshProjection = "LshProjection"; +class LshProjection : public PrimitiveC { + public: + LshProjection() : PrimitiveC(kNameLshProjection) {} + ~LshProjection() = default; + MS_DECLARE_PARENT(LshProjection, PrimitiveC); + void Init(const LshProjectionType &type); + void set_type(const LshProjectionType &type); + LshProjectionType get_type() const; +}; + +AbstractBasePtr LshProjectionInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); +using PrimLshProjectionPtr = std::shared_ptr; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_LSH_PROJECTION_H_ diff --git a/mindspore/core/ops/lstm.cc b/mindspore/core/ops/lstm.cc new file mode 100644 index 00000000000..e2e83952501 --- /dev/null +++ b/mindspore/core/ops/lstm.cc @@ -0,0 +1,176 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops/lstm.h" + +namespace mindspore { +namespace ops { +namespace { +AbstractBasePtr LstmInfer(const PrimitivePtr &primitive, const std::vector &input_args) { + // infer shape + MS_EXCEPTION_IF_NULL(primitive); + auto lstm_prim = primitive->cast(); + MS_EXCEPTION_IF_NULL(lstm_prim); + auto prim_name = lstm_prim->name(); + CheckAndConvertUtils::CheckInteger("lstm_prim_infer", input_args.size(), kEqual, 4, prim_name); + auto x_input_shape = CheckAndConvertUtils::ConvertShapePtrToShape("x_shape", input_args[0]->BuildShape(), prim_name); + auto h_input_shape = CheckAndConvertUtils::ConvertShapePtrToShape("h_shape", input_args[1]->BuildShape(), prim_name); + auto c_input_shape = CheckAndConvertUtils::ConvertShapePtrToShape("c_shape", input_args[2]->BuildShape(), prim_name); + + int64_t input_x_size = lstm_prim->get_input_size(); + CheckAndConvertUtils::CheckInteger("x_shape.size()", x_input_shape.size(), kEqual, 3, prim_name); + CheckAndConvertUtils::CheckInteger("x_shape[2]", x_input_shape[2], kEqual, input_x_size, prim_name); + + CheckAndConvertUtils::CheckInteger("h_shape.size()", h_input_shape.size(), kEqual, 3, prim_name); + CheckAndConvertUtils::Check("h_shape", h_input_shape, kEqual, "c_shape", c_input_shape, lstm_prim->name()); + + int64_t num_layers = lstm_prim->get_num_layers(); + int64_t num_directions = lstm_prim->get_num_directions(); + int64_t hidden_size = lstm_prim->get_hidden_size(); + int64_t input_size = lstm_prim->get_input_size(); + CheckAndConvertUtils::CheckInteger("h_shape[0]", h_input_shape[0], kEqual, num_layers * num_directions, prim_name); + CheckAndConvertUtils::CheckInteger("h_shape[1]", h_input_shape[1], kEqual, x_input_shape[1], prim_name); + CheckAndConvertUtils::CheckInteger("h_shape[2]", h_input_shape[2], kEqual, hidden_size, prim_name); + + std::vector y_shape = {x_input_shape[0], x_input_shape[1], hidden_size * num_directions}; + + int64_t type_size = 4; + int64_t gates_ws_ld = lstm_prim->get_good_ld(hidden_size * 4, type_size); + int64_t states_ws_ld = lstm_prim->get_good_ld(std::max(hidden_size, input_size), type_size); + int64_t ws_gates_size = num_layers * num_directions * x_input_shape[0] * x_input_shape[1] * gates_ws_ld * type_size; + int64_t ws_states_size = + (num_layers + 1) * num_directions * (x_input_shape[0] + 1) * x_input_shape[1] * states_ws_ld * type_size; + int64_t ws_c_states_size = + (num_layers + 1) * num_directions * (x_input_shape[0] + 1) * x_input_shape[1] * states_ws_ld * type_size; + int64_t ws_diff_states_size = + (num_layers + 1) * num_directions * 3 * (x_input_shape[0] + 1) * x_input_shape[1] * states_ws_ld * type_size; + int64_t ws_grad_comp_size = 0; + int64_t page_size = 4096; + int64_t current_offset = 0; + current_offset += ws_gates_size; + current_offset = ((current_offset / page_size - 1) / page_size) * page_size; + current_offset += ws_states_size; + current_offset = ((current_offset / page_size - 1) / page_size) * page_size; + current_offset += ws_c_states_size; + current_offset = ((current_offset / page_size - 1) / page_size) * page_size; + current_offset += ws_diff_states_size; + current_offset = ((current_offset / page_size - 1) / page_size) * page_size; + current_offset += ws_grad_comp_size; + std::vector x_shape = {x_input_shape}; + // std::vector h_shape = {h_input_shape}; + std::vector c_shape = {c_input_shape}; + std::vector reverse_shape = {current_offset, 1}; + std::vector state_shape = {1, 1}; + + // infer type + CheckAndConvertUtils::CheckInteger("lstm_prim_infer", input_args.size(), kEqual, 4, prim_name); + for (const auto &item : input_args) { + MS_EXCEPTION_IF_NULL(item); + } + auto infer_type0 = input_args[0]->BuildType()->cast()->element(); + auto infer_type1 = input_args[1]->BuildType()->cast()->element(); + auto infer_type2 = input_args[2]->BuildType()->cast()->element(); + auto infer_type3 = input_args[3]->BuildType()->cast()->element(); + auto infer_type4 = input_args[4]->BuildType()->cast()->element(); + auto output0 = std::make_shared(infer_type0, x_shape); + auto output1 = std::make_shared(infer_type1, y_shape); + auto output2 = std::make_shared(infer_type2, c_shape); + auto output3 = std::make_shared(infer_type3, reverse_shape); + auto output4 = std::make_shared(infer_type4, state_shape); + AbstractBasePtrList output = {output0, output1, output2, output3, output4}; + return std::make_shared(output); +} +} // namespace + +void LSTM::set_input_size(const int64_t input_size) { + CheckAndConvertUtils::CheckInteger(kInput_size, input_size, kGreaterThan, 0, this->name()); + AddAttr(kInput_size, MakeValue(input_size)); +} +int64_t LSTM::get_input_size() const { + auto value_ptr = this->GetAttr(kInput_size); + return GetValue(value_ptr); +} +void LSTM::set_hidden_size(const int64_t hidden_size) { + CheckAndConvertUtils::CheckInteger(kHidden_size, hidden_size, kGreaterThan, 0, this->name()); + AddAttr(kHidden_size, MakeValue(hidden_size)); +} +int64_t LSTM::get_hidden_size() const { + auto value_ptr = this->GetAttr(kHidden_size); + return GetValue(value_ptr); +} +void LSTM::set_num_layers(const int64_t num_layers) { + CheckAndConvertUtils::CheckInteger(kNumLayers, num_layers, kGreaterThan, 0, this->name()); + AddAttr(kNumLayers, MakeValue(num_layers)); +} +int64_t LSTM::get_num_layers() const { + auto value_ptr = this->GetAttr(kNumLayers); + return GetValue(value_ptr); +} +void LSTM::set_has_bias(const bool has_bias) { AddAttr(kHasBias, MakeValue(has_bias)); } +bool LSTM::get_has_bias() const { + auto value_ptr = this->GetAttr(kHasBias); + return GetValue(value_ptr); +} +void LSTM::set_dropout(const float dropout) { + CheckAndConvertUtils::CheckInRange(kDropout, dropout, kIncludeBoth, {0.0, 1.0}, this->name()); + AddAttr(kDropout, MakeValue(dropout)); +} +float LSTM::get_dropout() const { + auto value_ptr = this->GetAttr(kDropout); + return GetValue(value_ptr); +} +void LSTM::set_bidirectional(const bool bidirectional) { AddAttr(kBidirectional, MakeValue(bidirectional)); } +bool LSTM::get_bidirectional() const { + auto value_ptr = this->GetAttr(kBidirectional); + return GetValue(value_ptr); +} +void LSTM::set_num_directions(const int64_t num_directions) { AddAttr(kNumDirections, MakeValue(num_directions)); } +int64_t LSTM::get_num_directions() const { + auto value_ptr = this->GetAttr(kNumDirections); + return GetValue(value_ptr); +} +void LSTM::Init(const int64_t input_size, const int64_t hidden_size, const int64_t num_layers, const bool has_bias, + const float dropout, const bool bidirectional) { + this->set_input_size(input_size); + this->set_hidden_size(hidden_size); + this->set_num_layers(num_layers); + this->set_has_bias(has_bias); + this->set_dropout(dropout); + this->set_bidirectional(bidirectional); + if (bidirectional) { + this->set_num_directions(2); + } else { + this->set_num_directions(1); + } +} + +int64_t LSTM::get_good_ld(const int64_t dim, const int64_t type_size) { + int64_t ld = ((dim + (64 / type_size) - 1) / (64 / type_size)) * (64 / type_size); + if (ld * 256 == 0) { + return ld + 64 / type_size; + } + return ld; +} + +AbstractBasePtr LstmInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + return std::make_shared(LstmInfer(primitive, input_args)); +} + +REGISTER_PRIMITIVE_EVAL_IMPL(LSTM, prim::kPrimLstm, LstmInfer); +REGISTER_PRIMITIVE_C(kNameLSTM, LSTM); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/c_ops/lstm.h b/mindspore/core/ops/lstm.h similarity index 56% rename from mindspore/core/c_ops/lstm.h rename to mindspore/core/ops/lstm.h index ec69721f502..d45e3f05acf 100644 --- a/mindspore/core/c_ops/lstm.h +++ b/mindspore/core/ops/lstm.h @@ -14,44 +14,50 @@ * limitations under the License. */ -#ifndef MINDSPORE_CORE_C_OPS_LSTM_H_ -#define MINDSPORE_CORE_C_OPS_LSTM_H_ +#ifndef MINDSPORE_CORE_OPS_LSTM_H_ +#define MINDSPORE_CORE_OPS_LSTM_H_ #include #include #include #include #include -#include "c_ops/op_utils.h" -#include "c_ops/primitive_c.h" +#include "ops/op_utils.h" +#include "ops/primitive_c.h" #include "abstract/primitive_infer_map.h" #include "abstract/abstract_value.h" #include "utils/check_convert_utils.h" namespace mindspore { +namespace ops { constexpr auto kNameLSTM = "LSTM"; class LSTM : public PrimitiveC { public: LSTM() : PrimitiveC(kNameLSTM) {} ~LSTM() = default; MS_DECLARE_PARENT(LSTM, PrimitiveC); - void Init(const int64_t &input_size, const int64_t &hidden_size, const int64_t &num_layers, const bool &has_bias, - const float &dropout, const bool &bidirectional = false); - void set_input_size(const int64_t &input_size); + void Init(const int64_t input_size, const int64_t hidden_size, const int64_t num_layers, const bool has_bias, + const float dropout, const bool bidirectional = false); + void set_input_size(const int64_t input_size); int64_t get_input_size() const; - void set_hidden_size(const int64_t &hidden_size); + void set_hidden_size(const int64_t hidden_size); int64_t get_hidden_size() const; - void set_num_layers(const int64_t &num_layers); + void set_num_layers(const int64_t num_layers); int64_t get_num_layers() const; - void set_has_bias(const bool &has_bias); + void set_has_bias(const bool has_bias); bool get_has_bias() const; - void set_dropout(const float &dropout); + void set_dropout(const float dropout); float get_dropout() const; - void set_bidirectional(const bool &bidirectional); + void set_bidirectional(const bool bidirectional); bool get_bidirectional() const; - void set_num_directions(const int64_t &num_directions); + void set_num_directions(const int64_t num_directions); int64_t get_num_directions() const; + int64_t get_good_ld(const int64_t dim, const int64_t type_size); }; +AbstractBasePtr LstmInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); +using PrimLstmPtr = std::shared_ptr; +} // namespace ops } // namespace mindspore -#endif // MINDSPORE_CORE_C_OPS_LSTM_H_ +#endif // MINDSPORE_CORE_OPS_LSTM_H_ diff --git a/mindspore/core/ops/make_tuple.cc b/mindspore/core/ops/make_tuple.cc new file mode 100644 index 00000000000..0b25ec00384 --- /dev/null +++ b/mindspore/core/ops/make_tuple.cc @@ -0,0 +1,31 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops/make_tuple.h" +#include +#include +#include +#include +#include +#include "ops/op_utils.h" +#include "utils/check_convert_utils.h" +#include "abstract/primitive_infer_map.h" + +namespace mindspore { +namespace ops { +REGISTER_PRIMITIVE_C(kNameMakeTuple, MakeTuple); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/make_tuple.h b/mindspore/core/ops/make_tuple.h new file mode 100644 index 00000000000..e082c31f559 --- /dev/null +++ b/mindspore/core/ops/make_tuple.h @@ -0,0 +1,40 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_MAKE_TUPLE_H_ +#define MINDSPORE_CORE_OPS_MAKE_TUPLE_H_ +#include +#include +#include +#include +#include "ops/primitive_c.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameMakeTuple = "MakeTuple"; +class MakeTuple : public PrimitiveC { + public: + MakeTuple() : PrimitiveC(kNameMakeTuple) {} + ~MakeTuple() = default; + MS_DECLARE_PARENT(MakeTuple, PrimitiveC); + void Init() {} +}; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_MAKE_TUPLE_H_ diff --git a/mindspore/core/ops/mat_mul.cc b/mindspore/core/ops/mat_mul.cc new file mode 100644 index 00000000000..6d6c77e3e28 --- /dev/null +++ b/mindspore/core/ops/mat_mul.cc @@ -0,0 +1,26 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include "ops/mat_mul.h" + +namespace mindspore { +namespace ops { +REGISTER_PRIMITIVE_C(kNameMatMul, MatMul); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/mat_mul.h b/mindspore/core/ops/mat_mul.h new file mode 100644 index 00000000000..57601487bfa --- /dev/null +++ b/mindspore/core/ops/mat_mul.h @@ -0,0 +1,44 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_MAT_MUL_H_ +#define MINDSPORE_CORE_OPS_MAT_MUL_H_ +#include +#include + +#include "ops/primitive_c.h" +#include "ops/op_utils.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameMatMul = "MatMul"; +class MatMul : public PrimitiveC { + public: + MatMul() : PrimitiveC(kNameMatMul) { InitIOName({"x1", "x2"}, {"output"}); } + ~MatMul() = default; + MS_DECLARE_PARENT(MatMul, PrimitiveC); + void Init(bool transpose_a = false, bool transpose_b = false); + void set_transpose_a(bool transpose_a); + void set_transpose_b(bool transpose_b); + bool get_transpose_a() const; + bool get_transpose_b() const; +}; +using PrimMatMulPtr = std::shared_ptr; +} // namespace ops +} // namespace mindspore +#endif // MINDSPORE_CORE_OPS_MAT_MUL_H_ diff --git a/mindspore/core/ops/matrix_diag.cc b/mindspore/core/ops/matrix_diag.cc new file mode 100644 index 00000000000..98e56fa3d53 --- /dev/null +++ b/mindspore/core/ops/matrix_diag.cc @@ -0,0 +1,86 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops/matrix_diag.h" +#include +#include +#include +#include +#include +#include "ops/op_utils.h" +#include "utils/check_convert_utils.h" +#include "abstract/primitive_infer_map.h" + +namespace mindspore { +namespace ops { +namespace { +abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(primitive); + auto matrixdiag_prim = primitive->cast(); + MS_EXCEPTION_IF_NULL(matrixdiag_prim); + auto prim_name = matrixdiag_prim->name(); + auto x_shape = CheckAndConvertUtils::ConvertShapePtrToShape("x_shape", input_args[0]->BuildShape(), prim_name); + auto assist_shape = + CheckAndConvertUtils::ConvertShapePtrToShape("assist_shape", input_args[1]->BuildShape(), prim_name); + + CheckAndConvertUtils::CheckInteger("assist rank", (int64_t)assist_shape.size(), kGreaterEqual, 2, prim_name); + CheckAndConvertUtils::Check("x_shape rank", (int64_t)x_shape.size() + 1, kLessEqual, "assist rank", + (int64_t)assist_shape.size(), prim_name); + CheckAndConvertUtils::Check("assist's penultimate dimension", assist_shape[(int64_t)assist_shape.size() - 2], kEqual, + "assist's last dimension", assist_shape[(int64_t)assist_shape.size() - 1], prim_name); + + int64_t x_end_dim = x_shape.size() - 1; + int64_t assist_end_dim = assist_shape.size() - 1; + while (x_end_dim >= 0) { + if (x_shape[x_end_dim] != 1) { + CheckAndConvertUtils::Check("reverse x dim", x_shape[x_end_dim], kEqual, "reverse assist dim", + assist_shape[assist_end_dim - 1], prim_name); + } + x_end_dim--; + assist_end_dim--; + } + return std::make_shared(assist_shape); +} + +TypePtr InferType(const PrimitivePtr &prim, const std::vector &input_args) { + for (const auto &item : input_args) { + MS_EXCEPTION_IF_NULL(item); + } + const std::set valid_types = {kNumberTypeInt8, kNumberTypeInt32, kNumberTypeUInt8, kNumberTypeFloat16, + kNumberTypeFloat32}; + std::map types; + types.emplace("x", input_args[0]->BuildType()); + types.emplace("assist", input_args[1]->BuildType()); + CheckAndConvertUtils::CheckTensorTypeSame(types, valid_types, prim->name()); + auto type = input_args[0]->BuildType(); + MS_EXCEPTION_IF_NULL(type); + auto tensor_type = type->cast(); + MS_EXCEPTION_IF_NULL(tensor_type); + auto data_type = tensor_type->element(); + MS_EXCEPTION_IF_NULL(data_type); + return data_type; +} +} // namespace + +AbstractBasePtr MatrixDiagInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + return std::make_shared(InferType(primitive, input_args), + InferShape(primitive, input_args)->shape()); +} +REGISTER_PRIMITIVE_EVAL_IMPL(MatrixDiag, prim::kPrimMatrixDiag, MatrixDiagInfer); +REGISTER_PRIMITIVE_C(kNameMatrixDiag, MatrixDiag); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/matrix_diag.h b/mindspore/core/ops/matrix_diag.h new file mode 100644 index 00000000000..d12b65da0ad --- /dev/null +++ b/mindspore/core/ops/matrix_diag.h @@ -0,0 +1,43 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_MATRIX_DIAG_H_ +#define MINDSPORE_CORE_OPS_MATRIX_DIAG_H_ +#include +#include +#include +#include +#include "ops/primitive_c.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameMatrixDiag = "MatrixDiag"; +class MatrixDiag : public PrimitiveC { + public: + MatrixDiag() : PrimitiveC(kNameMatrixDiag) {} + ~MatrixDiag() = default; + MS_DECLARE_PARENT(MatrixDiag, PrimitiveC); + void Init() {} +}; +AbstractBasePtr MatrixDiagInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); +using PrimMatrixDiagPtr = std::shared_ptr; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_MATRIX_DIAG_H_ diff --git a/mindspore/core/ops/max_pool.cc b/mindspore/core/ops/max_pool.cc new file mode 100644 index 00000000000..b43f50c8a4c --- /dev/null +++ b/mindspore/core/ops/max_pool.cc @@ -0,0 +1,152 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops/max_pool.h" +#include +#include +#include +#include +#include +#include "ops/op_utils.h" +#include "utils/check_convert_utils.h" +#include "abstract/primitive_infer_map.h" + +namespace mindspore { +namespace ops { +void MaxPool::set_pad_mode(const PadMode &pad_mode) { + int64_t swi = pad_mode; + this->AddAttr(kPadMode, MakeValue(swi)); +} + +PadMode MaxPool::get_pad_mode() const { + auto value_ptr = GetAttr(kPadMode); + return PadMode(GetValue(value_ptr)); +} +void MaxPool::set_kernel_size(const std::vector &kernel_size) { + this->AddAttr(kKernelSize, MakeValue(CheckAndConvertUtils::CheckPositiveVector(kKernelSize, kernel_size, this->name(), + false, true))); +} + +std::vector MaxPool::get_kernel_size() const { + auto value_ptr = GetAttr(kKernelSize); + return GetValue>(value_ptr); +} +void MaxPool::set_strides(const std::vector &strides) { + this->AddAttr(kStrides, + MakeValue(CheckAndConvertUtils::CheckPositiveVector(kStrides, strides, this->name(), false, true))); +} + +std::vector MaxPool::get_strides() const { + auto value_ptr = GetAttr(kStrides); + return GetValue>(value_ptr); +} + +void MaxPool::set_format(const Format &format) { + int64_t f = format; + this->AddAttr(kFormat, MakeValue(f)); +} + +Format MaxPool::get_format() const { + auto value_ptr = GetAttr(kFormat); + return Format(GetValue(value_ptr)); +} + +void MaxPool::set_pad(const std::vector &pad) { this->AddAttr(kPad, MakeValue(pad)); } + +std::vector MaxPool::get_pad() const { + auto value_ptr = GetAttr(kPad); + return GetValue>(value_ptr); +} + +void MaxPool::set_round_mode(const RoundMode &round_mode) { + int64_t swi = round_mode; + this->AddAttr(kRoundMode, MakeValue(swi)); +} + +RoundMode MaxPool::get_round_mode() const { + auto value_ptr = GetAttr(kRoundMode); + return RoundMode(GetValue(value_ptr)); +} + +void MaxPool::Init(const std::vector &kernel_size, const std::vector &stride, const PadMode &pad_mode, + const Format &format, const std::vector &pad, const RoundMode &round_mode) { + this->set_pad_mode(pad_mode); + this->set_kernel_size(kernel_size); + this->set_strides(stride); + this->set_format(format); + this->set_pad(pad); + this->set_round_mode(round_mode); +} + +namespace { +abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(primitive); + auto pool_prim = primitive->cast(); + MS_EXCEPTION_IF_NULL(pool_prim); + auto op_name = pool_prim->name(); + auto in_shape = CheckAndConvertUtils::ConvertShapePtrToShape("x_shape", input_args[0]->GetShapeTrack(), op_name); + if (pool_prim->get_format() == NHWC) { + in_shape = {in_shape[0], in_shape[3], in_shape[1], in_shape[2]}; + } + CheckAndConvertUtils::CheckInteger("x_rank", in_shape.size(), kEqual, 4, op_name); + auto kernel_size = pool_prim->get_kernel_size(); + auto pad_mode = pool_prim->get_pad_mode(); + auto batch = in_shape[0]; + auto channel = in_shape[1]; + auto in_h = in_shape[2]; + auto in_w = in_shape[3]; + + auto strides = pool_prim->get_strides(); + auto kernel_h = kernel_size[2]; + auto kernel_w = kernel_size[3]; + auto stride_h = strides[2]; + auto stride_w = strides[3]; + int64_t out_h = -1; + int64_t out_w = -1; + if (pad_mode == VALID) { + out_h = ceil((in_h - (kernel_h - 1)) / stride_h); + out_w = ceil((in_w - (kernel_w - 1)) / stride_w); + } else if (pad_mode == SAME) { + out_h = ceil(in_h / stride_h); + out_w = ceil(in_w / stride_w); + } + std::vector out_shape = {batch, channel, out_h, out_w}; + if (pool_prim->get_format() == NHWC) { + out_shape = {batch, out_h, out_w, channel}; + } + if (std::any_of(out_shape.begin(), out_shape.end(), [](int64_t a) { return a <= 0; })) { + MS_LOG(EXCEPTION) << "Kernel size is not valid."; + } + return std::make_shared(out_shape); +} + +TypePtr InferType(const PrimitivePtr &prim, const std::vector &input_args) { + if (std::any_of(input_args.begin(), input_args.end(), [](AbstractBasePtr a) { return a == nullptr; })) { + MS_LOG(EXCEPTION) << "nullptr"; + } + return input_args[0]->BuildType(); +} +} // namespace + +AbstractBasePtr MaxPoolInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + return std::make_shared(InferType(primitive, input_args), + InferShape(primitive, input_args)->shape()); +} +REGISTER_PRIMITIVE_EVAL_IMPL(MaxPool, prim::kPrimMaxPool, MaxPoolInfer); +REGISTER_PRIMITIVE_C(kNameMaxPool, MaxPool); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/max_pool.h b/mindspore/core/ops/max_pool.h new file mode 100644 index 00000000000..c0c6e93e243 --- /dev/null +++ b/mindspore/core/ops/max_pool.h @@ -0,0 +1,61 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_MAX_POOL_H_ +#define MINDSPORE_CORE_OPS_MAX_POOL_H_ + +#include +#include +#include +#include +#include "ops/primitive_c.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameMaxPool = "MaxPool"; +class MaxPool : public PrimitiveC { + public: + MaxPool() : PrimitiveC(kNameMaxPool) { InitIOName({"x"}, {"output"}); } + explicit MaxPool(const std::string k_name) : PrimitiveC(k_name) { InitIOName({"x"}, {"output"}); } + ~MaxPool() = default; + MS_DECLARE_PARENT(MaxPool, PrimitiveC); + void Init(const std::vector &kernel_size = {1}, const std::vector &stride = {1}, + const PadMode &pad_mode = VALID, const Format &format = NCHW, + const std::vector &pad = {0, 0, 0, 0}, const RoundMode &round_mode = FLOOR); + void set_pad_mode(const PadMode &pad_mode); + void set_kernel_size(const std::vector &kernel_size); + void set_strides(const std::vector &strides); + void set_format(const Format &format); + void set_pad(const std::vector &pad); + void set_round_mode(const RoundMode &round_mode); + + std::vector get_kernel_size() const; + std::vector get_strides() const; + PadMode get_pad_mode() const; + Format get_format() const; + std::vector get_pad() const; + RoundMode get_round_mode() const; +}; + +AbstractBasePtr MaxPoolInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); +using PrimMaxPoolPtr = std::shared_ptr; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_MAX_POOL_H_ diff --git a/mindspore/core/ops/maximum.cc b/mindspore/core/ops/maximum.cc new file mode 100644 index 00000000000..2039069b4d2 --- /dev/null +++ b/mindspore/core/ops/maximum.cc @@ -0,0 +1,54 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include "ops/maximum.h" +#include "ops/op_utils.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +namespace { +abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(primitive); + auto maximum_prim = primitive->cast(); + MS_EXCEPTION_IF_NULL(maximum_prim); + auto op_name = maximum_prim->name(); + return BroadCastInferShape(op_name, input_args); +} + +TypePtr InferType(const PrimitivePtr &prim, const std::vector &input_args) { + for (const auto &item : input_args) { + MS_EXCEPTION_IF_NULL(item); + } + std::map types; + types.emplace("x", input_args[0]->BuildType()); + types.emplace("y", input_args[1]->BuildType()); + auto infer_type = CheckAndConvertUtils::CheckTensorTypeSame(types, common_valid_types, prim->name()); + return TypeIdToType(infer_type); +} +} // namespace + +AbstractBasePtr MaximumInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + return std::make_shared(InferType(primitive, input_args), + InferShape(primitive, input_args)->shape()); +} +REGISTER_PRIMITIVE_EVAL_IMPL(Maximum, prim::kPrimMaximum, MaximumInfer); +REGISTER_PRIMITIVE_C(kNameMaximum, Maximum); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/maximum.h b/mindspore/core/ops/maximum.h new file mode 100644 index 00000000000..3550d80b901 --- /dev/null +++ b/mindspore/core/ops/maximum.h @@ -0,0 +1,40 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_MAXIMUM_H_ +#define MINDSPORE_CORE_OPS_MAXIMUM_H_ + +#include +#include + +#include "ops/primitive_c.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameMaximum = "Maximum"; +class Maximum : public PrimitiveC { + public: + Maximum() : PrimitiveC(kNameMaximum) { InitIOName({"x", "y"}, {"output"}); } + ~Maximum() = default; + MS_DECLARE_PARENT(Maximum, PrimitiveC); + void Init() {} +}; +AbstractBasePtr MaximumInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); +using PrimMaximumPtr = std::shared_ptr; +} // namespace ops +} // namespace mindspore +#endif // MINDSPORE_CORE_OPS_MAXIMUM_H_ diff --git a/mindspore/core/ops/merge.cc b/mindspore/core/ops/merge.cc new file mode 100644 index 00000000000..6378a2a56fb --- /dev/null +++ b/mindspore/core/ops/merge.cc @@ -0,0 +1,59 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include +#include + +#include "ops/merge.h" +#include "ops/op_utils.h" + +namespace mindspore { +namespace ops { + +AbstractBasePtr MergeInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(primitive); + auto Merge_prim = primitive->cast(); + MS_EXCEPTION_IF_NULL(Merge_prim); + auto op_name = Merge_prim->name(); + auto inputs_type = input_args[0]->BuildType()->cast()->elements(); + auto inputs_shape = input_args[0]->BuildShape()->cast()->shape(); + std::map args; + for (int64_t i = 0; i != (int64_t)inputs_type.size(); i++) { + args.insert({"input[" + std::to_string(i) + "]", inputs_type[i]}); + } + std::set template_type = {TypeIdToType(kNumberTypeBool)}; + for (auto item : common_valid_types) { + template_type.insert(TypeIdToType(item)); + } + CheckAndConvertUtils::CheckScalarOrTensorTypesSame(args, template_type, op_name); + std::vector in_shape0 = inputs_shape[0]->cast()->shape(); + + auto output1 = + std::make_shared(inputs_type[0]->cast()->element(), in_shape0); + auto output2 = std::make_shared(TypeIdToType(kNumberTypeInt32), std::vector{1}); + + AbstractBasePtrList output = {output1, output2}; + return std::make_shared(output); +} + +REGISTER_PRIMITIVE_EVAL_IMPL(Merge, prim::kPrimMerge, MergeInfer); +REGISTER_PRIMITIVE_C(kNameMerge, Merge); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/merge.h b/mindspore/core/ops/merge.h new file mode 100644 index 00000000000..7268e36fa45 --- /dev/null +++ b/mindspore/core/ops/merge.h @@ -0,0 +1,42 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_MERGE_H_ +#define MINDSPORE_CORE_OPS_MERGE_H_ +#include +#include +#include "ops/primitive_c.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameMerge = "Merge"; +class Merge : public PrimitiveC { + public: + Merge() : PrimitiveC(kNameMerge) {} + ~Merge() = default; + MS_DECLARE_PARENT(Merge, PrimitiveC); + void Init() {} +}; + +AbstractBasePtr MergeInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); +using PrimMergePtr = std::shared_ptr; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_MERGE_H_ diff --git a/mindspore/core/ops/mfcc.cc b/mindspore/core/ops/mfcc.cc new file mode 100644 index 00000000000..2d7e66f8ff2 --- /dev/null +++ b/mindspore/core/ops/mfcc.cc @@ -0,0 +1,99 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops/mfcc.h" +#include "ops/op_utils.h" +#include "utils/check_convert_utils.h" +#include "abstract/primitive_infer_map.h" + +namespace mindspore { +namespace ops { +namespace { +abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(primitive); + auto mfcc_prim = primitive->cast(); + MS_EXCEPTION_IF_NULL(mfcc_prim); + auto prim_name = mfcc_prim->name(); + auto first_input_shape = + CheckAndConvertUtils::ConvertShapePtrToShape("first_input_shape", input_args[0]->BuildShape(), prim_name); + auto second_input_shape = + CheckAndConvertUtils::ConvertShapePtrToShape("second_input_shape", input_args[1]->BuildShape(), prim_name); + CheckAndConvertUtils::CheckInteger("first input rank", first_input_shape.size(), kEqual, 3, prim_name); + CheckAndConvertUtils::CheckInteger("second input rank", second_input_shape.size(), kEqual, 1, prim_name); + std::vector out_shape = {first_input_shape[0], first_input_shape[1], mfcc_prim->get_dct_coeff_num()}; + return std::make_shared(out_shape); +} + +TypePtr InferType(const PrimitivePtr &prim, const std::vector &input_args) { + for (const auto &item : input_args) { + MS_EXCEPTION_IF_NULL(item); + } + auto infer_type = input_args[0]->BuildType()->cast()->element(); + return infer_type; +} +} // namespace + +void Mfcc::Init(const float freq_upper_limit, const float freq_lower_limit, const int64_t filter_bank_channel_num, + const int64_t dct_coeff_num) { + this->set_freq_upper_limit(freq_upper_limit); + this->set_freq_lower_limit(freq_lower_limit); + this->set_filter_bank_channel_num(filter_bank_channel_num); + this->set_dct_coeff_num(dct_coeff_num); +} + +void Mfcc::set_freq_upper_limit(const float freq_upper_limit) { + this->AddAttr(kFreqUpperLimit, MakeValue(freq_upper_limit)); +} + +float Mfcc::get_freq_upper_limit() const { + auto value_ptr = this->GetAttr(kFreqUpperLimit); + return GetValue(value_ptr); +} + +void Mfcc::set_freq_lower_limit(const float freq_lower_limit) { + this->AddAttr(kFreqLowerLimit, MakeValue(freq_lower_limit)); +} + +float Mfcc::get_freq_lower_limit() const { + auto value_ptr = this->GetAttr(kFreqLowerLimit); + return GetValue(value_ptr); +} + +void Mfcc::set_filter_bank_channel_num(const int64_t filter_bank_channel_num) { + this->AddAttr(kFilterBankChannelNum, MakeValue(filter_bank_channel_num)); +} + +int64_t Mfcc::get_filter_bank_channel_num() const { + auto value_ptr = this->GetAttr(kFilterBankChannelNum); + return GetValue(value_ptr); +} + +void Mfcc::set_dct_coeff_num(const int64_t dct_coeff_num) { this->AddAttr(kDctCoeffNum, MakeValue(dct_coeff_num)); } + +int64_t Mfcc::get_dct_coeff_num() const { + auto value_ptr = this->GetAttr(kDctCoeffNum); + return GetValue(value_ptr); +} + +AbstractBasePtr MfccInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + return std::make_shared(InferType(primitive, input_args), + InferShape(primitive, input_args)->shape()); +} +REGISTER_PRIMITIVE_EVAL_IMPL(Mfcc, prim::kPrimMfcc, MfccInfer); +REGISTER_PRIMITIVE_C(kNameMfcc, Mfcc); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/mfcc.h b/mindspore/core/ops/mfcc.h new file mode 100644 index 00000000000..0975fd61f69 --- /dev/null +++ b/mindspore/core/ops/mfcc.h @@ -0,0 +1,50 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_CORE_OPS_MFCC_H_ +#define MINDSPORE_CORE_OPS_MFCC_H_ +#include +#include + +#include "ops/primitive_c.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameMfcc = "Mfcc"; +class Mfcc : public PrimitiveC { + public: + Mfcc() : PrimitiveC(kNameMfcc) {} + ~Mfcc() = default; + MS_DECLARE_PARENT(Mfcc, PrimitiveC); + void Init(const float freq_upper_limit, const float freq_lower_limit, const int64_t filter_bank_channel_num, + const int64_t dct_coeff_num); + void set_freq_upper_limit(const float freq_upper_limit); + void set_freq_lower_limit(const float freq_lower_limit); + void set_filter_bank_channel_num(const int64_t filter_bank_channel_num); + void set_dct_coeff_num(const int64_t dct_coeff_num); + float get_freq_upper_limit() const; + float get_freq_lower_limit() const; + int64_t get_filter_bank_channel_num() const; + int64_t get_dct_coeff_num() const; +}; +AbstractBasePtr MfccInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); +using PrimMfccPtr = std::shared_ptr; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_MFCC_H_ diff --git a/mindspore/core/ops/minimum.cc b/mindspore/core/ops/minimum.cc new file mode 100644 index 00000000000..027be2040f5 --- /dev/null +++ b/mindspore/core/ops/minimum.cc @@ -0,0 +1,58 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops/minimum.h" +#include +#include +#include +#include +#include +#include "ops/op_utils.h" +#include "utils/check_convert_utils.h" +#include "abstract/primitive_infer_map.h" + +namespace mindspore { +namespace ops { +namespace { +abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(primitive); + auto op_name = primitive->name(); + return BroadCastInferShape(op_name, input_args); +} + +TypePtr InferType(const PrimitivePtr &prim, const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(prim); + CheckAndConvertUtils::CheckInteger("input number", input_args.size(), kEqual, 2, prim->name()); + if (std::any_of(input_args.begin(), input_args.end(), [](const AbstractBasePtr &a) { return a == nullptr; })) { + MS_LOG(EXCEPTION) << "nullptr"; + } + std::map types; + types.emplace("x", input_args[0]->BuildType()); + types.emplace("y", input_args[1]->BuildType()); + auto infer_type = CheckAndConvertUtils::CheckTensorTypeSame(types, common_valid_types, prim->name()); + return TypeIdToType(infer_type); +} +} // namespace + +AbstractBasePtr MinimumInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + return std::make_shared(InferType(primitive, input_args), + InferShape(primitive, input_args)->shape()); +} +REGISTER_PRIMITIVE_EVAL_IMPL(Minimum, prim::kPrimMinimum, MinimumInfer); +REGISTER_PRIMITIVE_C(kNameMinimum, Minimum); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/minimum.h b/mindspore/core/ops/minimum.h new file mode 100644 index 00000000000..4dccb391bb8 --- /dev/null +++ b/mindspore/core/ops/minimum.h @@ -0,0 +1,44 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_MINIMUM_H_ +#define MINDSPORE_CORE_OPS_MINIMUM_H_ +#include +#include +#include +#include +#include "ops/primitive_c.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameMinimum = "Minimum"; +class Minimum : public PrimitiveC { + public: + Minimum() : PrimitiveC(kNameMinimum) { InitIOName({"x", "y"}, {"output"}); } + ~Minimum() = default; + MS_DECLARE_PARENT(Minimum, PrimitiveC); + void Init() {} +}; + +AbstractBasePtr MinimumInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); +using PrimMinimumPtr = std::shared_ptr; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_ADD_H_ diff --git a/mindspore/core/ops/mod.cc b/mindspore/core/ops/mod.cc new file mode 100644 index 00000000000..a49c2c1b4b4 --- /dev/null +++ b/mindspore/core/ops/mod.cc @@ -0,0 +1,24 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops/mod.h" +#include "ops/op_utils.h" + +namespace mindspore { +namespace ops { +REGISTER_PRIMITIVE_C(kNameMod, Mod); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/c_ops/div.h b/mindspore/core/ops/mod.h similarity index 68% rename from mindspore/core/c_ops/div.h rename to mindspore/core/ops/mod.h index be624080ad0..1b2af255134 100644 --- a/mindspore/core/c_ops/div.h +++ b/mindspore/core/ops/mod.h @@ -14,21 +14,23 @@ * limitations under the License. */ -#ifndef MINDSPORE_CORE_C_OPS_DIV_H_ -#define MINDSPORE_CORE_C_OPS_DIV_H_ -#include "c_ops/primitive_c.h" +#ifndef MINDSPORE_CORE_OPS_MOD_H_ +#define MINDSPORE_CORE_OPS_MOD_H_ +#include "ops/primitive_c.h" #include "abstract/abstract_value.h" #include "utils/check_convert_utils.h" namespace mindspore { -constexpr auto kNameDiv = "Div"; -class Div : public PrimitiveC { +namespace ops { +constexpr auto kNameMod = "Mod"; +class Mod : public PrimitiveC { public: - Div() : PrimitiveC(kNameDiv) { InitIOName({"x", "y"}, {"output"}); } - ~Div() = default; - MS_DECLARE_PARENT(Div, PrimitiveC); + Mod() : PrimitiveC(kNameMod) { InitIOName({"x", "y"}, {"output"}); } + ~Mod() = default; + MS_DECLARE_PARENT(Mod, PrimitiveC); void Init() {} }; +} // namespace ops } // namespace mindspore -#endif // MINDSPORE_CORE_C_OPS_DIV_H_ +#endif // MINDSPORE_CORE_OPS_MOD_H_ diff --git a/mindspore/core/ops/mul.cc b/mindspore/core/ops/mul.cc new file mode 100644 index 00000000000..f7d1922ccce --- /dev/null +++ b/mindspore/core/ops/mul.cc @@ -0,0 +1,31 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops/mul.h" +#include +#include +#include +#include +#include +#include "ops/op_utils.h" +#include "utils/check_convert_utils.h" +#include "abstract/primitive_infer_map.h" + +namespace mindspore { +namespace ops { +REGISTER_PRIMITIVE_C(kNameMul, Mul); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/mul.h b/mindspore/core/ops/mul.h new file mode 100644 index 00000000000..dd303dfb6b7 --- /dev/null +++ b/mindspore/core/ops/mul.h @@ -0,0 +1,41 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_MUL_H_ +#define MINDSPORE_CORE_OPS_MUL_H_ +#include +#include +#include +#include +#include "ops/primitive_c.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameMul = "Mul"; +class Mul : public PrimitiveC { + public: + Mul() : PrimitiveC(kNameMul) { InitIOName({"x", "y"}, {"output"}); } + explicit Mul(const std::string k_name) : PrimitiveC(k_name) { InitIOName({"x", "y"}, {"output"}); } + ~Mul() = default; + MS_DECLARE_PARENT(Mul, PrimitiveC); + void Init() {} +}; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_MUL_H_ diff --git a/mindspore/core/c_ops/addn.cc b/mindspore/core/ops/mul_fold.cc similarity index 86% rename from mindspore/core/c_ops/addn.cc rename to mindspore/core/ops/mul_fold.cc index b8071c5c8cf..5f9c486e4ff 100644 --- a/mindspore/core/c_ops/addn.cc +++ b/mindspore/core/ops/mul_fold.cc @@ -14,8 +14,10 @@ * limitations under the License. */ -#include "c_ops/addn.h" +#include "ops/mul_fold.h" namespace mindspore { -REGISTER_PRIMITIVE_C(kNameAddN, AddN); +namespace ops { +REGISTER_PRIMITIVE_C(kNameMulFold, MulFold); +} // namespace ops } // namespace mindspore diff --git a/mindspore/core/ops/mul_fold.h b/mindspore/core/ops/mul_fold.h new file mode 100644 index 00000000000..87ccb6dfee1 --- /dev/null +++ b/mindspore/core/ops/mul_fold.h @@ -0,0 +1,44 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_MUL_FOLD_H_ +#define MINDSPORE_CORE_OPS_MUL_FOLD_H_ + +#include +#include +#include +#include +#include +#include "ops/op_utils.h" +#include "ops/primitive_c.h" +#include "abstract/primitive_infer_map.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameMulFold = "MulFold"; +class MulFold : public PrimitiveC { + public: + MulFold() : PrimitiveC(kNameMulFold) {} + ~MulFold() = default; + MS_DECLARE_PARENT(MulFold, PrimitiveC); + void Init() {} +}; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_MUL_FOLD_H_ diff --git a/mindspore/core/ops/neg.cc b/mindspore/core/ops/neg.cc new file mode 100644 index 00000000000..8525e0dc02a --- /dev/null +++ b/mindspore/core/ops/neg.cc @@ -0,0 +1,40 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + +#include "ops/neg.h" +#include "ops/op_utils.h" +#include "utils/check_convert_utils.h" +#include "abstract/primitive_infer_map.h" + +namespace mindspore { +namespace ops { +AbstractBasePtr NegInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(primitive); + auto prim_name = primitive->name(); + CheckAndConvertUtils::CheckInteger("input number", input_args.size(), kEqual, 1, prim_name); + for (const auto &item : input_args) { + MS_EXCEPTION_IF_NULL(item); + } + CheckAndConvertUtils::CheckTensorTypeValid("input_x", input_args[0]->BuildType(), common_valid_types, prim_name); + return input_args[0]->Broaden(); +} +REGISTER_PRIMITIVE_EVAL_IMPL(Neg, prim::kPrimNeg, NegInfer); +REGISTER_PRIMITIVE_C(kNameNeg, Neg); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/neg.h b/mindspore/core/ops/neg.h new file mode 100644 index 00000000000..a428c9c9174 --- /dev/null +++ b/mindspore/core/ops/neg.h @@ -0,0 +1,42 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_NEG_H_ +#define MINDSPORE_CORE_OPS_NEG_H_ +#include +#include +#include "ops/primitive_c.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameNeg = "Neg"; +class Neg : public PrimitiveC { + public: + Neg() : PrimitiveC(kNameNeg) { InitIOName({"x"}, {"y"}); } + ~Neg() = default; + MS_DECLARE_PARENT(Neg, PrimitiveC); + void Init() {} +}; + +AbstractBasePtr NegInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); +using PrimNegPtr = std::shared_ptr; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_NEG_H_ diff --git a/mindspore/core/ops/net_output.cc b/mindspore/core/ops/net_output.cc new file mode 100644 index 00000000000..9f465e0e149 --- /dev/null +++ b/mindspore/core/ops/net_output.cc @@ -0,0 +1,31 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops/net_output.h" +#include +#include +#include +#include +#include +#include "ops/op_utils.h" +#include "utils/check_convert_utils.h" +#include "abstract/primitive_infer_map.h" + +namespace mindspore { +namespace ops { +REGISTER_PRIMITIVE_C(kNameNetOutput, NetOutput); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/net_output.h b/mindspore/core/ops/net_output.h new file mode 100644 index 00000000000..31d7ac02315 --- /dev/null +++ b/mindspore/core/ops/net_output.h @@ -0,0 +1,40 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_NET_OUTPUT_H_ +#define MINDSPORE_CORE_OPS_NET_OUTPUT_H_ +#include +#include +#include +#include +#include "ops/primitive_c.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameNetOutput = "NetOutput"; +class NetOutput : public PrimitiveC { + public: + NetOutput() : PrimitiveC(kNameNetOutput) {} + ~NetOutput() = default; + MS_DECLARE_PARENT(NetOutput, PrimitiveC); + void Init() {} +}; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_NET_OUTPUT_H_ diff --git a/mindspore/core/ops/non_max_suppression.cc b/mindspore/core/ops/non_max_suppression.cc new file mode 100644 index 00000000000..e744d072114 --- /dev/null +++ b/mindspore/core/ops/non_max_suppression.cc @@ -0,0 +1,45 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + +#include "ops/non_max_suppression.h" + +namespace mindspore { +namespace ops { + +void NonMaxSuppression::set_center_point_box(const int64_t center_point_box) { + AddAttr(kCenterPointBox, MakeValue(center_point_box)); +} +int64_t NonMaxSuppression::get_center_point_box() const { + auto value_ptr = this->GetAttr(kCenterPointBox); + return GetValue(value_ptr); +} +void NonMaxSuppression::Init(const int64_t center_point_box) { this->set_center_point_box(center_point_box); } + +AbstractBasePtr NonMaxSuppressionInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(primitive); + auto non_max_suppression_prim = primitive->cast(); + MS_EXCEPTION_IF_NULL(non_max_suppression_prim); + MS_LOG(INFO) << "NonMaxSuppression infer shape in runtime."; + return std::make_shared(TypeIdToType(kNumberTypeInt32), std::vector{}); +} + +REGISTER_PRIMITIVE_EVAL_IMPL(NonMaxSuppression, prim::kPrimNonMaxSuppression, NonMaxSuppressionInfer); +REGISTER_PRIMITIVE_C(kNameNonMaxSuppression, NonMaxSuppression); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/non_max_suppression.h b/mindspore/core/ops/non_max_suppression.h new file mode 100644 index 00000000000..fac7d7261d7 --- /dev/null +++ b/mindspore/core/ops/non_max_suppression.h @@ -0,0 +1,49 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_NON_MAX_SUPPRESSION_H_ +#define MINDSPORE_CORE_OPS_NON_MAX_SUPPRESSION_H_ + +#include +#include +#include +#include +#include +#include "ops/op_utils.h" +#include "ops/primitive_c.h" +#include "abstract/primitive_infer_map.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameNonMaxSuppression = "NonMaxSuppression"; +class NonMaxSuppression : public PrimitiveC { + public: + NonMaxSuppression() : PrimitiveC(kNameNonMaxSuppression) {} + ~NonMaxSuppression() = default; + MS_DECLARE_PARENT(NonMaxSuppression, PrimitiveC); + void Init(const int64_t center_point_box = 0); + void set_center_point_box(const int64_t center_point_box); + int64_t get_center_point_box() const; +}; +AbstractBasePtr NonMaxSuppressionInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); +using PrimNonMaxSuppressionPtr = std::shared_ptr; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_NON_MAX_SUPPRESSION_H_ diff --git a/mindspore/core/c_ops/asin.cc b/mindspore/core/ops/not_equal.cc similarity index 85% rename from mindspore/core/c_ops/asin.cc rename to mindspore/core/ops/not_equal.cc index a2e228e0dcf..c303ee7159e 100644 --- a/mindspore/core/c_ops/asin.cc +++ b/mindspore/core/ops/not_equal.cc @@ -14,8 +14,10 @@ * limitations under the License. */ -#include "c_ops/asin.h" +#include "ops/not_equal.h" namespace mindspore { -REGISTER_PRIMITIVE_C(kNameAsin, Asin); +namespace ops { +REGISTER_PRIMITIVE_C(kNameNotEqual, NotEqual); +} // namespace ops } // namespace mindspore diff --git a/mindspore/core/ops/not_equal.h b/mindspore/core/ops/not_equal.h new file mode 100644 index 00000000000..0ed50774001 --- /dev/null +++ b/mindspore/core/ops/not_equal.h @@ -0,0 +1,36 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_NOT_EQUAL_H_ +#define MINDSPORE_CORE_OPS_NOT_EQUAL_H_ +#include "ops/primitive_c.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameNotEqual = "NotEqual"; +class NotEqual : public PrimitiveC { + public: + NotEqual() : PrimitiveC(kNameNotEqual) { InitIOName({"x", "y"}, {"output"}); } + ~NotEqual() = default; + MS_DECLARE_PARENT(NotEqual, PrimitiveC); + void Init() {} +}; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_NOT_EQUAL_H_ diff --git a/mindspore/core/ops/one_hot.cc b/mindspore/core/ops/one_hot.cc new file mode 100644 index 00000000000..935473203b5 --- /dev/null +++ b/mindspore/core/ops/one_hot.cc @@ -0,0 +1,78 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include "ops/one_hot.h" +#include "ops/op_utils.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +void OneHot::Init(const int64_t axis) { this->set_axis(axis); } +void OneHot::set_axis(const int64_t axis) { this->AddAttr(kAxis, MakeValue(axis)); } + +int64_t OneHot::get_axis() const { + auto value_ptr = this->GetAttr(kAxis); + return GetValue(value_ptr); +} +namespace { +abstract::ShapePtr OneHotInferShape(const PrimitivePtr &primitive, const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(primitive); + auto OneHot_prim = primitive->cast(); + MS_EXCEPTION_IF_NULL(OneHot_prim); + auto op_name = OneHot_prim->name(); + int64_t axis = OneHot_prim->get_axis(); + auto in_shape = CheckAndConvertUtils::ConvertShapePtrToShape("input_shape", input_args[0]->BuildShape(), op_name); + CheckAndConvertUtils::CheckInRange("axis", axis, kIncludeBoth, {-1, SizeToLong(in_shape.size())}, op_name); + auto depth_val = GetValue(input_args[1]->BuildValue()); + CheckAndConvertUtils::CheckInteger("depth", depth_val, kGreaterEqual, 0, op_name); + if (axis >= 0) { + in_shape.insert(in_shape.begin() + axis, depth_val); + } else { + in_shape.push_back(depth_val); + } + return std::make_shared(in_shape); +} + +TypePtr OneHotInferType(const PrimitivePtr &prim, const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(prim); + auto OneHot_prim = prim->cast(); + MS_EXCEPTION_IF_NULL(OneHot_prim); + auto op_name = OneHot_prim->name(); + CheckAndConvertUtils::CheckTensorTypeValid("indices", input_args[0]->BuildType(), {kNumberTypeInt32}, op_name); + CheckAndConvertUtils::CheckTypeSame("depth", input_args[1]->BuildType(), + {kNumberTypeInt8, kNumberTypeInt16, kNumberTypeInt32, kNumberTypeInt64}, op_name); + auto value_type = input_args[2]->BuildType(); + auto tensor_type = value_type->cast(); + MS_EXCEPTION_IF_NULL(tensor_type); + auto element = tensor_type->element(); + MS_EXCEPTION_IF_NULL(element); + std::map args = {{"on_value", value_type}, {"off_dtype", input_args[3]->BuildType()}}; + CheckAndConvertUtils::CheckTensorTypeSame(args, {kNumberTypeFloat16, kNumberTypeFloat32}, op_name); + return element; +} +} // namespace +AbstractBasePtr OneHotInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + return std::make_shared(OneHotInferType(primitive, input_args), + OneHotInferShape(primitive, input_args)->shape()); +} + +REGISTER_PRIMITIVE_EVAL_IMPL(OneHot, prim::kPrimOneHot, OneHotInfer); +REGISTER_PRIMITIVE_C(kNameOneHot, OneHot); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/one_hot.h b/mindspore/core/ops/one_hot.h new file mode 100644 index 00000000000..36886cf2616 --- /dev/null +++ b/mindspore/core/ops/one_hot.h @@ -0,0 +1,45 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_ONE_HOT_H_ +#define MINDSPORE_CORE_OPS_ONE_HOT_H_ +#include +#include + +#include "ops/primitive_c.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameOneHot = "OneHot"; +class OneHot : public PrimitiveC { + public: + OneHot() : PrimitiveC(kNameOneHot) { InitIOName({"indices", "depth", "on_value", "off_value"}, {"output"}); } + ~OneHot() = default; + MS_DECLARE_PARENT(OneHot, PrimitiveC); + void Init(const int64_t axis); + void set_axis(const int64_t axis); + int64_t get_axis() const; +}; + +AbstractBasePtr OneHotInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); +using PrimOneHotPtr = std::shared_ptr; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_ONE_HOT_H_ diff --git a/mindspore/core/ops/ones_like.cc b/mindspore/core/ops/ones_like.cc new file mode 100644 index 00000000000..6c3b46a354a --- /dev/null +++ b/mindspore/core/ops/ones_like.cc @@ -0,0 +1,57 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include +#include "ops/ones_like.h" +#include "ops/op_utils.h" +#include "utils/check_convert_utils.h" +#include "abstract/primitive_infer_map.h" + +namespace mindspore { +namespace ops { +namespace { +abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(primitive); + auto OnesLike_prim = primitive->cast(); + MS_EXCEPTION_IF_NULL(OnesLike_prim); + auto prim_name = OnesLike_prim->name(); + auto input_shape = + CheckAndConvertUtils::ConvertShapePtrToShape("input_shape", input_args[0]->BuildShape(), prim_name); + return std::make_shared(input_shape); +} + +TypePtr InferType(const PrimitivePtr &primitive, const std::vector &input_args) { + // const std::set valid_types = {kNumberTypeInt8, kNumberTypeInt16, kNumberTypeInt32, kNumberTypeInt64, + // kNumberTypeUInt16, kNumberTypeUInt32, kNumberTypeUInt64, + // kNumberTypeFloat16, kNumberTypeFloat32, kNumberTypeFloat64, + // kNumberTypeBool}; + auto infer_type = input_args[0]->BuildType(); + CheckAndConvertUtils::CheckTensorTypeValid("infer_type", infer_type, common_valid_types, "OnesLike"); + return infer_type; +} +} // namespace +AbstractBasePtr OnesLikeInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + return std::make_shared(InferType(primitive, input_args), + InferShape(primitive, input_args)->shape()); +} +REGISTER_PRIMITIVE_EVAL_IMPL(OnesLike, prim::kPrimOnesLike, OnesLikeInfer); +REGISTER_PRIMITIVE_C(kNameOnesLike, OnesLike); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/ones_like.h b/mindspore/core/ops/ones_like.h new file mode 100644 index 00000000000..b0af5ef2a70 --- /dev/null +++ b/mindspore/core/ops/ones_like.h @@ -0,0 +1,41 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_ONES_LIKE_H_ +#define MINDSPORE_CORE_OPS_ONES_LIKE_H_ +#include +#include +#include "ops/primitive_c.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameOnesLike = "OnesLike"; +class OnesLike : public PrimitiveC { + public: + OnesLike() : PrimitiveC(kNameOnesLike) {} + ~OnesLike() = default; + MS_DECLARE_PARENT(OnesLike, PrimitiveC); + void Init() {} +}; +AbstractBasePtr OnesLikeInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); +using PrimOnesLikePtr = std::shared_ptr; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_ONES_LIKE_H_ diff --git a/mindspore/core/ops/onnx_int8_dequantize.cc b/mindspore/core/ops/onnx_int8_dequantize.cc new file mode 100644 index 00000000000..b1a48914d5c --- /dev/null +++ b/mindspore/core/ops/onnx_int8_dequantize.cc @@ -0,0 +1,25 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops/onnx_int8_dequantize.h" +#include "utils/check_convert_utils.h" +#include "abstract/primitive_infer_map.h" + +namespace mindspore { +namespace ops { +REGISTER_PRIMITIVE_C(kNameOnnxInt8Dequantize, OnnxInt8Dequantize); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/c_ops/batch_norm_fold.h b/mindspore/core/ops/onnx_int8_dequantize.h similarity index 58% rename from mindspore/core/c_ops/batch_norm_fold.h rename to mindspore/core/ops/onnx_int8_dequantize.h index db0fa14970f..24f6c174f9b 100644 --- a/mindspore/core/c_ops/batch_norm_fold.h +++ b/mindspore/core/ops/onnx_int8_dequantize.h @@ -13,25 +13,29 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -#ifndef MINDSPORE_CORE_C_OPS_BATCHNORMFOLD_H_ -#define MINDSPORE_CORE_C_OPS_BATCHNORMFOLD_H_ + +#ifndef MINDSPORE_CORE_OPS_ONNX_INT8_DEQUANTIZE_H_ +#define MINDSPORE_CORE_OPS_ONNX_INT8_DEQUANTIZE_H_ + #include -#include "c_ops/primitive_c.h" +#include "ops/primitive_c.h" #include "abstract/abstract_value.h" #include "utils/check_convert_utils.h" namespace mindspore { -constexpr auto kNameBatchNormFold = "BatchNormFold"; -class BatchNormFold : public PrimitiveC { +namespace ops { +constexpr auto kNameOnnxInt8Dequantize = "OnnxInt8Dequantize"; +class OnnxInt8Dequantize : public PrimitiveC { public: - BatchNormFold() : PrimitiveC(kNameBatchNormFold) {} - ~BatchNormFold() = default; - MS_DECLARE_PARENT(BatchNormFold, PrimitiveC); + OnnxInt8Dequantize() : PrimitiveC(kNameOnnxInt8Dequantize) {} + ~OnnxInt8Dequantize() = default; + MS_DECLARE_PARENT(OnnxInt8Dequantize, PrimitiveC); void Init() {} }; -using PrimBatchNormFoldPtr = std::shared_ptr; +using PrimOnnxInt8DequantizePtr = std::shared_ptr; +} // namespace ops } // namespace mindspore -#endif // MINDSPORE_CORE_C_OPS_BATCHNORMFOLD_H_ +#endif // MINDSPORE_CORE_OPS_ONNX_INT8_DEQUANTIZE_H_ diff --git a/mindspore/core/ops/onnx_int8_quantize.cc b/mindspore/core/ops/onnx_int8_quantize.cc new file mode 100644 index 00000000000..1f58f448154 --- /dev/null +++ b/mindspore/core/ops/onnx_int8_quantize.cc @@ -0,0 +1,31 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops/onnx_int8_quantize.h" +#include +#include +#include +#include +#include +#include "ops/op_utils.h" +#include "utils/check_convert_utils.h" +#include "abstract/primitive_infer_map.h" + +namespace mindspore { +namespace ops { +REGISTER_PRIMITIVE_C(kNameOnnxInt8Quantize, OnnxInt8Quantize); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/onnx_int8_quantize.h b/mindspore/core/ops/onnx_int8_quantize.h new file mode 100644 index 00000000000..c8f0312f083 --- /dev/null +++ b/mindspore/core/ops/onnx_int8_quantize.h @@ -0,0 +1,40 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_ONNX_INT8_QUANTIZE_H_ +#define MINDSPORE_CORE_OPS_ONNX_INT8_QUANTIZE_H_ +#include +#include +#include +#include +#include "ops/primitive_c.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameOnnxInt8Quantize = "OnnxInt8Quantize"; +class OnnxInt8Quantize : public PrimitiveC { + public: + OnnxInt8Quantize() : PrimitiveC(kNameOnnxInt8Quantize) {} + ~OnnxInt8Quantize() = default; + MS_DECLARE_PARENT(OnnxInt8Quantize, PrimitiveC); + void Init() {} +}; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_ONNX_INT8_QUANTIZE_H_ diff --git a/mindspore/core/c_ops/op_utils.cc b/mindspore/core/ops/op_utils.cc similarity index 97% rename from mindspore/core/c_ops/op_utils.cc rename to mindspore/core/ops/op_utils.cc index 664773ebdd0..3ca50dd84ec 100644 --- a/mindspore/core/c_ops/op_utils.cc +++ b/mindspore/core/ops/op_utils.cc @@ -19,11 +19,12 @@ #include #include #include -#include "c_ops/op_utils.h" +#include "ops/op_utils.h" #include "abstract/primitive_infer_map.h" #include "utils/check_convert_utils.h" namespace mindspore { +namespace ops { abstract::ShapePtr BroadCastInferShape(const std::string &op_name, const std::vector &input_args) { MS_LOG(INFO) << "Do infer shape for op " << op_name; auto x_shape = CheckAndConvertUtils::ConvertShapePtrToShape("x_shape", input_args[0]->GetShapeTrack(), op_name); @@ -54,4 +55,5 @@ abstract::ShapePtr BroadCastInferShape(const std::string &op_name, const std::ve } return std::make_shared(broadcast_shape); } +} // namespace ops } // namespace mindspore diff --git a/mindspore/core/c_ops/op_utils.h b/mindspore/core/ops/op_utils.h similarity index 67% rename from mindspore/core/c_ops/op_utils.h rename to mindspore/core/ops/op_utils.h index 799cf5260ef..8ebc43194b0 100644 --- a/mindspore/core/c_ops/op_utils.h +++ b/mindspore/core/ops/op_utils.h @@ -14,8 +14,8 @@ * limitations under the License. */ -#ifndef MINDSPORE_CORE_C_OPS_CONV_UTILS_H -#define MINDSPORE_CORE_C_OPS_CONV_UTILS_H +#ifndef MINDSPORE_CORE_OPS_OP_UTILS_H +#define MINDSPORE_CORE_OPS_OP_UTILS_H #include #include #include @@ -25,11 +25,13 @@ #include "utils/check_convert_utils.h" namespace mindspore { +namespace ops { constexpr auto kAlpha = "alpha"; constexpr auto kActivationType = "activation_type"; constexpr auto kAddress = "address"; constexpr auto kAlignCorners = "align_corners"; constexpr auto kAspectRatios = "aspect_ratios"; +constexpr auto kAxes = "axes"; constexpr auto kAxis = "axis"; constexpr auto kAxisType = "axis_type"; constexpr auto kBaseSize = "base_size"; @@ -41,11 +43,13 @@ constexpr auto kBeta = "beta"; constexpr auto kBias = "bias"; constexpr auto kBidirectional = "bidirectional"; constexpr auto kBlockSize = "block_size"; +constexpr auto kBlockShape = "block_shape"; constexpr auto kBodySubgraphIndex = "body_subgraph_index"; constexpr auto kCenterPointBox = "center_point_box"; constexpr auto kClip = "clip"; constexpr auto kCondition = "condition"; constexpr auto kCondSubgraphIndex = "cond_subgraph_index"; +constexpr auto kCrops = "crops"; constexpr auto kCustom = "custom"; constexpr auto kDampening = "dampening"; constexpr auto kDataType = "data_type"; @@ -53,13 +57,16 @@ constexpr auto kDctCoeffNum = "dct_coeff_num"; constexpr auto kDelta = "delta"; constexpr auto kDependMode = "depend_mode"; constexpr auto kDepthRadius = "depth_radius"; +constexpr auto kDetectionsPerClass = "detections_per_class"; constexpr auto kDilation = "dilation"; constexpr auto kDropout = "dropout"; constexpr auto kDstT = "dst_t"; constexpr auto kDType = "d_type"; constexpr auto kEllipsisMask = "ellipsis_mask"; constexpr auto kEndMask = "end_mask"; +constexpr auto kEps = "eps"; constexpr auto kEpsilon = "epsilon"; +constexpr auto kElement_dtype = "element_dtype"; constexpr auto kFeatStride = "feat_stride"; constexpr auto kFftLength = "fft_length"; constexpr auto kFilterBankChannelNum = "filter_bank_channel_num"; @@ -67,16 +74,22 @@ constexpr auto kFlip = "flip"; constexpr auto kFormat = "format"; constexpr auto kFreqLowerLimit = "freq_lower_limit"; constexpr auto kFreqUpperLimit = "freq_upper_limit"; +constexpr auto kFreezeBn = "freeze_bn"; constexpr auto kGlobal = "global"; +constexpr auto kGrad = "grad"; constexpr auto kGradientScale = "gradient_scale"; +constexpr auto kGradX = "grad_x"; +constexpr auto kGradY = "grad_y"; constexpr auto kGroup = "group"; constexpr auto kHasBias = "has_bias"; -constexpr auto kHidden_size = "hidden_size"; +constexpr auto kHiddenSize = "hidden_size"; constexpr auto kId = "id"; constexpr auto kImageSizeH = "image_size_h"; constexpr auto kImageSizeW = "image_size_w"; constexpr auto kIncludeALLGrams = "include_all_grams"; -constexpr auto kInput_size = "input_size"; +constexpr auto kInputSize = "input_size"; +constexpr auto kInChannel = "in_channel"; +constexpr auto kInputShape = "input_shape"; constexpr auto kIoFormat = "io_format"; constexpr auto kIsScale = "is_scale"; constexpr auto kIsTraining = "is_training"; @@ -88,10 +101,14 @@ constexpr auto kMagSquare = "mag_square"; constexpr auto kMax = "max"; constexpr auto kMaxSizes = "max_sizes"; constexpr auto kMaxSkipSize = "max_skip_size"; +constexpr auto kMaxClassesPerDetection = "max_classes_per_detection"; +constexpr auto kMaxDetections = "max_detections"; +constexpr auto kMaxNorm = "max_norm"; constexpr auto kMin = "min"; constexpr auto kMinSize = "min_size"; constexpr auto kMinSizes = "min_sizes"; constexpr auto kMode = "mode"; +constexpr auto kMomentum = "momentum"; constexpr auto kN = "n"; constexpr auto kNarrowRange = "narrow_range"; constexpr auto kNesterov = "nesterov"; @@ -99,17 +116,23 @@ constexpr auto kNewAxisMask = "new_axis_mask"; constexpr auto kNgramSize = "ngram_size"; constexpr auto kNmsThresh = "nms_thresh"; constexpr auto kNormRegion = "norm_region"; -constexpr auto kNum_layers = "num_layers"; +constexpr auto kNumLayers = "num_layers"; +constexpr auto kNumElements = "num_elements"; constexpr auto kNumBits = "num_bits"; constexpr auto kNumDirections = "num_directions"; constexpr auto kOffset = "offset"; +constexpr auto kNmsIouThreshold = "nms_iou_threshold"; +constexpr auto kNmsScoreThreshold = "nms_score_threshold"; +constexpr auto kNumClasses = "num_classes"; +constexpr auto kOffsets = "offsets"; constexpr auto kOffsetA = "offset_a"; constexpr auto kOrder = "order"; constexpr auto kOutChannel = "out_channel"; constexpr auto kOutMaxValue = "out_max_value"; -constexpr auto kOutputChannel = "out_channel"; +constexpr auto kOutputChannel = "output_channel"; constexpr auto kOutputNum = "output_num"; constexpr auto kOutputType = "output_type"; +constexpr auto kOutQuantized = "out_quantized"; constexpr auto kP = "p"; constexpr auto kPad = "pad"; constexpr auto kPadding = "padding"; @@ -122,6 +145,7 @@ constexpr auto kPads = "pads"; constexpr auto kPadSize = "pad_size"; constexpr auto kPooledH = "pooled_h"; constexpr auto kPooledW = "pooled_w"; +constexpr auto kPoolMode = "pool_mode"; constexpr auto kPostNmsTopn = "post_nms_topn"; constexpr auto kPower = "power"; constexpr auto kPreNmsTopn = "pre_nms_topn"; @@ -139,30 +163,71 @@ constexpr auto kShift = "shift"; constexpr auto kShrinkAxisMask = "shrink_axis_mask"; constexpr auto kSize = "size"; constexpr auto kSorted = "sorted"; -constexpr auto kSrcT = "srcT"; +constexpr auto kSrcT = "src_t"; constexpr auto kStart = "start"; constexpr auto kStepH = "step_h"; constexpr auto kStepW = "step_w"; constexpr auto kStride = "stride"; +constexpr auto kStrides = "strides"; +constexpr auto kShapeType = "shape_type"; constexpr auto kSubGraphIndex = "sub_graph_index"; +constexpr auto kSummarize = "summarize"; constexpr auto kTopK = "top_k"; constexpr auto kTransposeA = "transpose_a"; constexpr auto kTransposeB = "transpose_b"; +constexpr auto kNegativeSlope = "negative_slope"; +constexpr auto kType = "type"; constexpr auto kUseAxis = "use_axis"; constexpr auto kUseLocking = "use_locking"; constexpr auto kUseNesterov = "use_nesterov"; constexpr auto kUseNesteroy = "use_nesteroy"; +constexpr auto kUseRegularNms = "use_regular_nms"; constexpr auto kValid = "valid"; constexpr auto kValue = "value"; constexpr auto kVariances = "variances"; constexpr auto kWeightDecay = "weight_decay"; constexpr auto kWeightThreshold = "weight_threshold"; +constexpr auto kWindow = "window"; constexpr auto kWindowSize = "window_size"; +constexpr auto kPaddings = "paddings"; +constexpr auto kInput_size = "input_size"; +constexpr auto kHidden_size = "hidden_size"; +constexpr auto kChannelShared = "channel_shared"; +constexpr auto kSlope = "slope"; +constexpr auto kBase = "base"; +constexpr auto kConstantValue = "constant_value"; +constexpr auto kSizeSplits = "size_splits"; +constexpr auto kDims = "dims"; +constexpr auto kPaddingMode = "padding_mode"; +constexpr auto kLargest = "largest"; +constexpr auto kElementwiseAffine = "elementwise_affine"; +constexpr auto kMinVal = "min_val"; +constexpr auto kMaxVal = "max_val"; +constexpr auto kMethod = "method"; +constexpr auto kNewHeight = "new_height"; +constexpr auto kNewWidth = "new_width"; +constexpr auto kPreserveAspectRatio = "preserve_aspect_ratio"; +constexpr auto kCoordinateTransformMode = "coordinate_transform_mode"; +constexpr auto kCubicCoeff = "cubic_coeff"; +constexpr auto kExcludeOutside = "exclude_outside"; +constexpr auto kExtrapolationValue = "extrapolation_value"; +constexpr auto kNearestMode = "nearest_mode"; +constexpr auto kReduceToEnd = "reduce_to_end"; +constexpr auto kCoeff = "coeff"; +constexpr auto kIsDepthWise = "is_depth_wise"; +constexpr auto kIsDepthWiseNative = "is_depth_wise_native"; const std::set common_valid_types = { kNumberTypeInt8, kNumberTypeInt16, kNumberTypeInt32, kNumberTypeInt64, kNumberTypeUInt8, kNumberTypeUInt16, kNumberTypeUInt32, kNumberTypeUInt64, kNumberTypeFloat16, kNumberTypeFloat32, kNumberTypeFloat64}; +const std::set all_types = { + kNumberTypeBool, kNumberTypeInt, kNumberTypeInt8, kNumberTypeInt16, kNumberTypeInt32, kNumberTypeInt64, + kNumberTypeUInt, kNumberTypeUInt8, kNumberTypeUInt16, kNumberTypeUInt32, kNumberTypeUInt64, kNumberTypeFloat, + kNumberTypeFloat16, kNumberTypeFloat32, kNumberTypeFloat64, kNumberTypeComplex64, +}; + abstract::ShapePtr BroadCastInferShape(const std::string &op_name, const std::vector &input_args); +} // namespace ops } // namespace mindspore -#endif // MINDSPORE_CORE_C_OPS_CONV_UTILS_H +#endif // MINDSPORE_CORE_OPS_OP_UTILS_H diff --git a/mindspore/core/ops/pack.cc b/mindspore/core/ops/pack.cc new file mode 100644 index 00000000000..e448b912850 --- /dev/null +++ b/mindspore/core/ops/pack.cc @@ -0,0 +1,80 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops/pack.h" + +namespace mindspore { +namespace ops { +namespace { +std::vector _get_pack_shape(std::vector x_shapes, std::vector x_types, int64_t axis, + std::string name) { + CheckAndConvertUtils::CheckInteger("len of input_x", (int64_t)x_shapes.size(), kGreaterEqual, 1, name); + CheckAndConvertUtils::CheckSubClass("input_x[0]", x_types[0], {TypeIdToType(kObjectTypeTensorType)}, name); + auto output_shape = CheckAndConvertUtils::ConvertShapePtrToShape("x_shape[0]", x_shapes[0], name); + int64_t rank_base = output_shape.size(); + int64_t N = x_shapes.size(); + // CheckAndConvertUtils::CheckInRange("axis", axis, kIncludeBoth, {-rank_base-1, rank_base}, name); + if (axis < 0) { + axis = axis + rank_base + 1; + } + for (int64_t i = 1; i < N; i++) { + auto type = x_types[i]->cast()->element(); + MS_EXCEPTION_IF_NULL(type); + auto type0 = x_types[0]->cast()->element(); + MS_EXCEPTION_IF_NULL(type0); + CheckAndConvertUtils::Check("x_type[" + std::to_string(i) + "]", type->type_id(), kEqual, "base", type0->type_id(), + name); + auto shape = CheckAndConvertUtils::ConvertShapePtrToShape("x_shape" + std::to_string(i), x_shapes[i], name); + if (shape != output_shape) { + MS_EXCEPTION(ValueError) << "For '" + name + "' element " + std::to_string(i) + + "shape in input can't pack with first element."; + } + } + output_shape.insert(output_shape.begin() + axis, N); + return output_shape; +} +} // namespace + +void Pack::set_axis(const int64_t &axis) { AddAttr(kAxis, MakeValue(axis)); } + +int64_t Pack::get_axis() const { + auto value_ptr = this->GetAttr(kAxis); + return GetValue(value_ptr); +} + +void Pack::Init(const int64_t &axis) { this->set_axis(axis); } + +AbstractBasePtr PackInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(primitive); + auto pack_prim = primitive->cast(); + MS_EXCEPTION_IF_NULL(pack_prim); + auto prim_name = pack_prim->name(); + + auto x_shapes = input_args[0]->BuildShape()->cast()->shape(); + auto x_types = input_args[0]->BuildType()->cast()->elements(); + auto all_shape = _get_pack_shape(x_shapes, x_types, pack_prim->get_axis(), prim_name); + auto tensor_type = x_types[0]->cast(); + MS_EXCEPTION_IF_NULL(tensor_type); + auto data_type = tensor_type->element(); + MS_EXCEPTION_IF_NULL(data_type); + return std::make_shared(data_type, all_shape); +} + +REGISTER_PRIMITIVE_EVAL_IMPL(Pack, prim::kPrimPack, PackInfer); +REGISTER_PRIMITIVE_C(kNamePack, Pack); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/pack.h b/mindspore/core/ops/pack.h new file mode 100644 index 00000000000..732311616e4 --- /dev/null +++ b/mindspore/core/ops/pack.h @@ -0,0 +1,48 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_PACK_H_ +#define MINDSPORE_CORE_OPS_PACK_H_ + +#include +#include +#include +#include +#include +#include "ops/op_utils.h" +#include "ops/primitive_c.h" +#include "abstract/primitive_infer_map.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNamePack = "Pack"; +class Pack : public PrimitiveC { + public: + Pack() : PrimitiveC(kNamePack) {} + ~Pack() = default; + MS_DECLARE_PARENT(Pack, PrimitiveC); + void Init(const int64_t &axis = 0); + void set_axis(const int64_t &axis); + int64_t get_axis() const; +}; +AbstractBasePtr PackInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); +using PrimPackPtr = std::shared_ptr; +} // namespace ops +} // namespace mindspore +#endif // MINDSPORE_CORE_OPS_PACK_H_ diff --git a/mindspore/core/ops/pad.cc b/mindspore/core/ops/pad.cc new file mode 100644 index 00000000000..766761ef004 --- /dev/null +++ b/mindspore/core/ops/pad.cc @@ -0,0 +1,75 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include "ops/pad.h" +#include "ops/op_utils.h" + +namespace mindspore { +namespace ops { +namespace { +abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(primitive); + auto pad_prim = primitive->cast(); + MS_EXCEPTION_IF_NULL(pad_prim); + auto prim_name = pad_prim->name(); + auto paddings_attr = pad_prim->get_paddings(); + auto x_shape = CheckAndConvertUtils::ConvertShapePtrToShape("x_shape", input_args[0]->BuildShape(), "Pad"); + CheckAndConvertUtils::CheckInteger("paddings_size", paddings_attr.size(), kEqual, int64_t(2 * x_shape.size()), + prim_name); + int64_t size = paddings_attr.size(); + for (int64_t i = 0; i < size; i++) { + for (int64_t j = 0; j < 2; j++) { + if (paddings_attr[i][j] < 0) { + MS_LOG_ERROR << "All elements of paddings must be >= 0."; + } + } + } + std::vector out_shape; + for (int64_t i = 0; i < int64_t(paddings_attr.size() / 2); i++) { + out_shape.emplace_back(x_shape[i] + paddings_attr[i][0] + paddings_attr[i][1]); + } + return std::make_shared(out_shape); +} + +TypePtr InferType(const PrimitivePtr &prim, const std::vector &input_args) { + for (const auto &item : input_args) { + MS_EXCEPTION_IF_NULL(item); + } + const std::set valid_types = {TypeIdToType(kObjectTypeTensorType)}; + auto infer_type = input_args[0]->BuildType(); + CheckAndConvertUtils::CheckSubClass("infer type", infer_type, valid_types, prim->name()); + return infer_type; +} +} // namespace + +void Pad::Init(const std::vector> &paddings) { this->set_paddings(paddings); } +void Pad::set_paddings(const std::vector> &paddings) { + this->AddAttr(kPaddings, MakeValue(paddings)); +} +std::vector> Pad::get_paddings() const { + auto value_ptr = GetAttr(kPaddings); + return GetValue>>(value_ptr); +} +AbstractBasePtr PadInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + return std::make_shared(InferType(primitive, input_args), + InferShape(primitive, input_args)->shape()); +} +REGISTER_PRIMITIVE_EVAL_IMPL(Pad, prim::kPrimPad, PadInfer); +REGISTER_PRIMITIVE_C(kNamePad, Pad); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/pad.h b/mindspore/core/ops/pad.h new file mode 100644 index 00000000000..5337371fa40 --- /dev/null +++ b/mindspore/core/ops/pad.h @@ -0,0 +1,46 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_PAD_H_ +#define MINDSPORE_CORE_OPS_PAD_H_ +#include +#include +#include +#include +#include "ops/primitive_c.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNamePad = "Pad"; +class Pad : public PrimitiveC { + public: + Pad() : PrimitiveC(kNamePad) { InitIOName({"x"}, {"y"}); } + explicit Pad(const std::string k_name) : PrimitiveC(k_name) { InitIOName({"x"}, {"y"}); } + ~Pad() = default; + MS_DECLARE_PARENT(Pad, PrimitiveC); + void Init(const std::vector> &paddings); + void set_paddings(const std::vector> &paddings); + std::vector> get_paddings() const; +}; +AbstractBasePtr PadInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); +using PrimPadPtr = std::shared_ptr; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_PAD_H_ diff --git a/mindspore/core/ops/partial.cc b/mindspore/core/ops/partial.cc new file mode 100644 index 00000000000..8105873206f --- /dev/null +++ b/mindspore/core/ops/partial.cc @@ -0,0 +1,24 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops/partial.h" +#include "ops/op_utils.h" + +namespace mindspore { +namespace ops { +REGISTER_PRIMITIVE_C(kNamePartial, Partial); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/partial.h b/mindspore/core/ops/partial.h new file mode 100644 index 00000000000..66d2da58a66 --- /dev/null +++ b/mindspore/core/ops/partial.h @@ -0,0 +1,36 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_PARTIAL_H_ +#define MINDSPORE_CORE_OPS_PARTIAL_H_ +#include "ops/primitive_c.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNamePartial = "Partial"; +class Partial : public PrimitiveC { + public: + Partial() : PrimitiveC(kNamePartial) {} + ~Partial() = default; + MS_DECLARE_PARENT(Partial, PrimitiveC); + void Init() {} +}; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_PARTIAL_H_ diff --git a/mindspore/core/c_ops/bias_grad.cc b/mindspore/core/ops/permute.cc similarity index 68% rename from mindspore/core/c_ops/bias_grad.cc rename to mindspore/core/ops/permute.cc index b3e98fd8d0c..911b7e8ef12 100644 --- a/mindspore/core/c_ops/bias_grad.cc +++ b/mindspore/core/ops/permute.cc @@ -13,25 +13,27 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -#include "c_ops/bias_grad.h" +#include "ops/permute.h" #include #include #include #include #include -#include "c_ops/op_utils.h" +#include "ops/op_utils.h" #include "utils/check_convert_utils.h" #include "abstract/primitive_infer_map.h" namespace mindspore { -void BiasGrad::set_axis(const std::vector &axis) { this->AddAttr(kAxis, MakeValue(axis)); } +namespace ops { -std::vector BiasGrad::get_axis() const { - auto value_ptr = GetAttr(kAxis); +void Permute::set_order(const std::vector &order) { this->AddAttr(kOrder, MakeValue(order)); } + +std::vector Permute::get_order() const { + auto value_ptr = GetAttr(kOrder); return GetValue>(value_ptr); } -void BiasGrad::Init(const std::vector &axis) { this->set_axis(axis); } -REGISTER_PRIMITIVE_C(kNameBiasGrad, BiasGrad); +void Permute::Init(const std::vector &order) { this->set_order(order); } +REGISTER_PRIMITIVE_C(kNamePermute, Permute); +} // namespace ops } // namespace mindspore diff --git a/mindspore/core/ops/permute.h b/mindspore/core/ops/permute.h new file mode 100644 index 00000000000..574c4ada8fe --- /dev/null +++ b/mindspore/core/ops/permute.h @@ -0,0 +1,43 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_PERMUTE_H_ +#define MINDSPORE_CORE_OPS_PERMUTE_H_ +#include +#include +#include +#include +#include "ops/primitive_c.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNamePermute = "Permute"; +class Permute : public PrimitiveC { + public: + Permute() : PrimitiveC(kNamePermute) {} + ~Permute() = default; + MS_DECLARE_PARENT(Permute, PrimitiveC); + + void Init(const std::vector &order); + void set_order(const std::vector &order); + std::vector get_order() const; +}; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_PERMUTE_H_ diff --git a/mindspore/core/ops/pow.cc b/mindspore/core/ops/pow.cc new file mode 100644 index 00000000000..1cf0a143592 --- /dev/null +++ b/mindspore/core/ops/pow.cc @@ -0,0 +1,53 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops/pow.h" +#include "ops/op_utils.h" +#include "utils/check_convert_utils.h" +#include "abstract/primitive_infer_map.h" + +namespace mindspore { +namespace ops { +namespace { +abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(primitive); + auto pow_prim = primitive->cast(); + MS_EXCEPTION_IF_NULL(pow_prim); + auto op_name = pow_prim->name(); + return BroadCastInferShape(op_name, input_args); +} + +TypePtr InferType(const PrimitivePtr &prim, const std::vector &input_args) { + for (const auto &item : input_args) { + MS_EXCEPTION_IF_NULL(item); + } + std::map types; + types.emplace("x", input_args[0]->BuildType()); + types.emplace("y", input_args[1]->BuildType()); + auto infer_type = CheckAndConvertUtils::CheckTensorTypeSame(types, common_valid_types, prim->name()); + return TypeIdToType(infer_type); +} +} // namespace + +AbstractBasePtr PowInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + return std::make_shared(InferType(primitive, input_args), + InferShape(primitive, input_args)->shape()); +} +REGISTER_PRIMITIVE_EVAL_IMPL(Pow, prim::kPrimPow, PowInfer); +REGISTER_PRIMITIVE_C(kNamePow, Pow); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/pow.h b/mindspore/core/ops/pow.h new file mode 100644 index 00000000000..bea006585b9 --- /dev/null +++ b/mindspore/core/ops/pow.h @@ -0,0 +1,44 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_POW_H_ +#define MINDSPORE_CORE_OPS_POW_H_ +#include +#include +#include +#include +#include "ops/primitive_c.h" +#include "ops/op_utils.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNamePow = "Pow"; +class Pow : public PrimitiveC { + public: + explicit Pow(const std::string &k_name = kNamePow) : PrimitiveC(k_name) { InitIOName({"x", "y"}, {"output"}); } + ~Pow() = default; + MS_DECLARE_PARENT(Pow, PrimitiveC); + void Init(); +}; +AbstractBasePtr PowInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); +using PrimPowPtr = std::shared_ptr; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_POW_H_ diff --git a/mindspore/core/ops/prelu.cc b/mindspore/core/ops/prelu.cc new file mode 100644 index 00000000000..d05ddfb13d3 --- /dev/null +++ b/mindspore/core/ops/prelu.cc @@ -0,0 +1,66 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include "ops/prelu.h" + +namespace mindspore { +namespace ops { +namespace { +abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(primitive); + auto prim_name = primitive->name(); + auto x = input_args[0]->BuildShape(); + auto w = input_args[1]->BuildShape(); + auto x_shape = CheckAndConvertUtils::ConvertShapePtrToShape("x_shape", x, prim_name); + auto w_shape = CheckAndConvertUtils::ConvertShapePtrToShape("w_shape", w, prim_name); + + CheckAndConvertUtils::CheckInteger("x rank", x_shape.size(), kNotEqual, 1, prim_name); + CheckAndConvertUtils::CheckInteger("weight rank", w_shape.size(), kEqual, 1, prim_name); + if (w_shape[0] != x_shape[1] && w_shape[0] != 1) { + MS_LOG(EXCEPTION) << "For " << prim_name << ", channel of input_x and weight must be matched, " + << "while channel of input_x is " << x_shape[1] << ", weight_shape[0] is " << w_shape[0]; + } + + auto shape_element = x->cast(); + MS_EXCEPTION_IF_NULL(shape_element); + return shape_element; +} + +TypePtr InferType(const PrimitivePtr &prim, const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(prim); + CheckAndConvertUtils::CheckInteger("input number", input_args.size(), kEqual, 2, prim->name()); + for (const auto &item : input_args) { + MS_EXCEPTION_IF_NULL(item); + } + const std::set valid_types = {kNumberTypeFloat16, kNumberTypeFloat32}; + CheckAndConvertUtils::CheckTensorTypeValid("input_x", input_args[0]->BuildType(), valid_types, prim->name()); + CheckAndConvertUtils::CheckTensorTypeValid("weight", input_args[1]->BuildType(), valid_types, prim->name()); + auto tensor_type = input_args[0]->BuildType()->cast(); + MS_EXCEPTION_IF_NULL(tensor_type); + auto input_x_type = tensor_type->element(); + return input_x_type; +} +} // namespace +AbstractBasePtr PReLUInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + return std::make_shared(InferType(primitive, input_args), + InferShape(primitive, input_args)->shape()); +} +REGISTER_PRIMITIVE_EVAL_IMPL(PReLU, prim::kPrimPRelu, PReLUInfer); +REGISTER_PRIMITIVE_C(kNamePReLU, PReLU); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/prelu.h b/mindspore/core/ops/prelu.h new file mode 100644 index 00000000000..deae1b6034f --- /dev/null +++ b/mindspore/core/ops/prelu.h @@ -0,0 +1,44 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_PRELU_H_ +#define MINDSPORE_CORE_OPS_PRELU_H_ +#include +#include +#include +#include "ops/primitive_c.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNamePReLU = "PReLU"; +class PReLU : public PrimitiveC { + public: + PReLU() : PrimitiveC(kNamePReLU) { InitIOName({"x"}, {"y"}); } + explicit PReLU(const std::string k_name) : PrimitiveC(k_name) { InitIOName({"x"}, {"y"}); } + ~PReLU() = default; + MS_DECLARE_PARENT(PReLU, PrimitiveC); + void Init() {} +}; + +AbstractBasePtr PReLUInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); +using PrimPReLUPtr = std::shared_ptr; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_PRELU_H_ diff --git a/mindspore/core/c_ops/primitive_c.cc b/mindspore/core/ops/primitive_c.cc similarity index 94% rename from mindspore/core/c_ops/primitive_c.cc rename to mindspore/core/ops/primitive_c.cc index ffc0d030513..dcb89530eaf 100644 --- a/mindspore/core/c_ops/primitive_c.cc +++ b/mindspore/core/ops/primitive_c.cc @@ -13,10 +13,14 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -#include "c_ops/primitive_c.h" + #include #include + +#include "ops/primitive_c.h" +#include "utils/check_convert_utils.h" namespace mindspore { +namespace ops { void PrimitiveC::InitIOName(const std::vector &inputs_name, const std::vector &outputs_name) { this->AddAttr("input_names", MakeValue(inputs_name)); this->AddAttr("output_names", MakeValue(outputs_name)); @@ -40,4 +44,5 @@ OpPrimCRegister &OpPrimCRegister::GetInstance() { std::map OpPrimCRegister::GetPrimCMap() { return op_primc_fns_; } void OpPrimCRegister::SetPrimCMap(const std::string &kname, const OpPrimCDefineFunc &fn) { op_primc_fns_[kname] = fn; } +} // namespace ops } // namespace mindspore diff --git a/mindspore/core/c_ops/primitive_c.h b/mindspore/core/ops/primitive_c.h similarity index 92% rename from mindspore/core/c_ops/primitive_c.h rename to mindspore/core/ops/primitive_c.h index 76e0a04a045..b25243767f3 100644 --- a/mindspore/core/c_ops/primitive_c.h +++ b/mindspore/core/ops/primitive_c.h @@ -14,8 +14,8 @@ * limitations under the License. */ -#ifndef MINDSPORE_CORE_C_OPS_PRIMITIVE_C_H_ -#define MINDSPORE_CORE_C_OPS_PRIMITIVE_C_H_ +#ifndef MINDSPORE_CORE_OPS_PRIMITIVE_C_H_ +#define MINDSPORE_CORE_OPS_PRIMITIVE_C_H_ #include #include #include @@ -24,6 +24,7 @@ #include "abstract/primitive_infer_map.h" #include "ir/value.h" namespace mindspore { +namespace ops { class PrimitiveC : public Primitive { public: explicit PrimitiveC(const std::string &name) : Primitive(name) {} @@ -62,5 +63,6 @@ class OpPrimCRegisterHelper { return out; \ } \ OpPrimCRegisterHelper primc_gen_##kname(kname, GetDefaultPrimC##primc); +} // namespace ops } // namespace mindspore -#endif // MINDSPORE_CORE_C_OPS_PRIMITIVE_C_H_ +#endif // MINDSPORE_CORE_OPS_PRIMITIVE_C_H_ diff --git a/mindspore/core/ops/prior_box.cc b/mindspore/core/ops/prior_box.cc new file mode 100644 index 00000000000..b1db9dea50e --- /dev/null +++ b/mindspore/core/ops/prior_box.cc @@ -0,0 +1,152 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include "ops/prior_box.h" +#include "ops/op_utils.h" +#include "utils/check_convert_utils.h" +#include "abstract/primitive_infer_map.h" + +namespace mindspore { +namespace ops { +void PriorBox::set_min_sizes(const std::vector &min_sizes) { this->AddAttr(kMinSizes, MakeValue(min_sizes)); } + +std::vector PriorBox::get_min_sizes() const { + auto value_ptr = GetAttr(kMinSizes); + return GetValue>(value_ptr); +} + +void PriorBox::set_max_sizes(const std::vector &max_sizes) { this->AddAttr(kMaxSizes, MakeValue(max_sizes)); } + +std::vector PriorBox::get_max_sizes() const { + auto value_ptr = GetAttr(kMaxSizes); + return GetValue>(value_ptr); +} + +void PriorBox::set_aspect_ratios(const std::vector &aspect_ratios) { + this->AddAttr(kAspectRatios, MakeValue(aspect_ratios)); +} + +std::vector PriorBox::get_aspect_ratios() const { + auto value_ptr = GetAttr(kAspectRatios); + return GetValue>(value_ptr); +} + +void PriorBox::set_variances(const std::vector &variances) { this->AddAttr(kVariances, MakeValue(variances)); } + +std::vector PriorBox::get_variances() const { + auto value_ptr = GetAttr(kVariances); + return GetValue>(value_ptr); +} + +void PriorBox::set_image_size_w(const int64_t image_size_w) { this->AddAttr(kImageSizeW, MakeValue(image_size_w)); } + +int64_t PriorBox::get_image_size_w() const { + auto value_ptr = GetAttr(kImageSizeW); + return GetValue(value_ptr); +} + +void PriorBox::set_image_size_h(const int64_t image_size_h) { this->AddAttr(kImageSizeH, MakeValue(image_size_h)); } + +int64_t PriorBox::get_image_size_h() const { + auto value_ptr = GetAttr(kImageSizeH); + return GetValue(value_ptr); +} + +void PriorBox::set_step_w(const float step_w) { this->AddAttr(kStepW, MakeValue(step_w)); } + +float PriorBox::get_step_w() const { + auto value_ptr = GetAttr(kStepW); + return GetValue(value_ptr); +} + +void PriorBox::set_step_h(const float step_h) { this->AddAttr(kStepH, MakeValue(step_h)); } + +float PriorBox::get_step_h() const { + auto value_ptr = GetAttr(kStepH); + return GetValue(value_ptr); +} + +void PriorBox::set_clip(const bool clip) { this->AddAttr(kClip, MakeValue(clip)); } + +bool PriorBox::get_clip() const { + auto value_ptr = GetAttr(kClip); + return GetValue(value_ptr); +} + +void PriorBox::set_flip(const bool flip) { this->AddAttr(kFlip, MakeValue(flip)); } + +bool PriorBox::get_flip() const { + auto value_ptr = GetAttr(kFlip); + return GetValue(value_ptr); +} + +void PriorBox::set_offset(const float offset) { this->AddAttr(kOffset, MakeValue(offset)); } + +float PriorBox::get_offset() const { + auto value_ptr = GetAttr(kOffset); + return GetValue(value_ptr); +} + +void PriorBox::Init(const std::vector &min_sizes, const std::vector &max_sizes, + const std::vector &aspect_ratios, const std::vector &variances, + const int64_t image_size_w, const int64_t image_size_h, const float step_w, const float step_h, + const bool clip, const bool flip, const float offset) { + this->set_min_sizes(min_sizes); + this->set_max_sizes(max_sizes); + this->set_aspect_ratios(aspect_ratios); + this->set_variances(variances); + this->set_image_size_w(image_size_w); + this->set_image_size_h(image_size_h); + this->set_step_w(step_w); + this->set_step_h(step_h); + this->set_clip(clip); + this->set_flip(flip); + this->set_offset(offset); +} + +AbstractBasePtr PriorBoxInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(primitive); + auto PriorBox_prim = primitive->cast(); + MS_EXCEPTION_IF_NULL(PriorBox_prim); + auto op_name = PriorBox_prim->name(); + MS_EXCEPTION_IF_NULL(input_args[0]); + std::vector different_aspect_ratios{1.0f}; + auto aspect_ratios = PriorBox_prim->get_aspect_ratios(); + for (int64_t i = 0; i < (int64_t)aspect_ratios.size(); i++) { + float ratio = aspect_ratios[i]; + bool exist = std::any_of(different_aspect_ratios.begin(), different_aspect_ratios.end(), + [&](float v) { return abs(ratio - v) < 1e-6; }); + if (!exist) { + different_aspect_ratios.emplace_back(ratio); + if (PriorBox_prim->get_flip()) { + different_aspect_ratios.emplace_back(1.0f / ratio); + } + } + } + int64_t num_priors_box = + PriorBox_prim->get_min_sizes().size() * different_aspect_ratios.size() + PriorBox_prim->get_max_sizes().size(); + auto input = CheckAndConvertUtils::ConvertShapePtrToShape("input_shape", input_args[0]->BuildShape(), op_name); + int64_t h = input[0] * input[1] * num_priors_box * 4; + std::vector output_shape{1, h, 1, 2}; + return std::make_shared(TypeIdToType(kNumberTypeFloat32), output_shape); +} + +REGISTER_PRIMITIVE_EVAL_IMPL(PriorBox, prim::kPrimPriorBox, PriorBoxInfer); +REGISTER_PRIMITIVE_C(kNamePriorBox, PriorBox); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/prior_box.h b/mindspore/core/ops/prior_box.h new file mode 100644 index 00000000000..a40b2671181 --- /dev/null +++ b/mindspore/core/ops/prior_box.h @@ -0,0 +1,69 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_PRIOR_BOX_H_ +#define MINDSPORE_CORE_OPS_PRIOR_BOX_H_ +#include +#include + +#include "ops/primitive_c.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNamePriorBox = "PriorBox"; +class PriorBox : public PrimitiveC { + public: + PriorBox() : PrimitiveC(kNamePriorBox) {} + ~PriorBox() = default; + MS_DECLARE_PARENT(PriorBox, PrimitiveC); + + void Init(const std::vector &min_sizes, const std::vector &max_sizes, + const std::vector &aspect_ratios, const std::vector &variances, const int64_t image_size_w, + const int64_t image_size_h, const float step_w, const float step_h, const bool clip, const bool flip, + const float offset); + void set_min_sizes(const std::vector &min_sizes); + void set_max_sizes(const std::vector &max_sizes); + void set_aspect_ratios(const std::vector &aspect_ratios); + void set_variances(const std::vector &variances); + void set_image_size_w(const int64_t image_size_w); + void set_image_size_h(const int64_t image_size_h); + void set_step_w(const float step_w); + void set_step_h(const float step_h); + void set_clip(const bool clip); + void set_flip(const bool flip); + void set_offset(const float offset); + std::vector get_min_sizes() const; + std::vector get_max_sizes() const; + std::vector get_aspect_ratios() const; + std::vector get_variances() const; + int64_t get_image_size_w() const; + int64_t get_image_size_h() const; + float get_step_w() const; + float get_step_h() const; + bool get_flip() const; + bool get_clip() const; + float get_offset() const; +}; + +AbstractBasePtr PriorBoxInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); +using PrimPriorBoxPtr = std::shared_ptr; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_PRIOR_BOX_H_ diff --git a/mindspore/core/ops/proposal.cc b/mindspore/core/ops/proposal.cc new file mode 100644 index 00000000000..81981f21444 --- /dev/null +++ b/mindspore/core/ops/proposal.cc @@ -0,0 +1,96 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + +#include "ops/proposal.h" +#include "ops/op_utils.h" +#include "utils/check_convert_utils.h" +#include "abstract/primitive_infer_map.h" + +namespace mindspore { +namespace ops { +void Proposal::set_feat_stride(const float feat_stride) { this->AddAttr(kFeatStride, MakeValue(feat_stride)); } + +float Proposal::get_feat_stride() const { + auto value_ptr = GetAttr(kFeatStride); + return GetValue(value_ptr); +} + +void Proposal::set_base_size(const float base_size) { this->AddAttr(kBaseSize, MakeValue(base_size)); } + +float Proposal::get_base_size() const { + auto value_ptr = GetAttr(kBaseSize); + return GetValue(value_ptr); +} + +void Proposal::set_min_size(const float min_size) { this->AddAttr(kMinSize, MakeValue(min_size)); } + +float Proposal::get_min_size() const { + auto value_ptr = GetAttr(kMinSize); + return GetValue(value_ptr); +} + +void Proposal::set_ratio(const std::vector &ratio) { this->AddAttr(kRatio, MakeValue(ratio)); } + +std::vector Proposal::get_ratio() const { + auto value_ptr = GetAttr(kRatio); + return GetValue>(value_ptr); +} + +void Proposal::set_scale(const std::vector &scale) { this->AddAttr(kScale, MakeValue(scale)); } + +std::vector Proposal::get_scale() const { + auto value_ptr = GetAttr(kScale); + return GetValue>(value_ptr); +} + +void Proposal::set_pre_nms_topn(const int64_t pre_nms_topn) { this->AddAttr(kPreNmsTopn, MakeValue(pre_nms_topn)); } + +int64_t Proposal::get_pre_nms_topn() const { + auto value_ptr = GetAttr(kPreNmsTopn); + return GetValue(value_ptr); +} + +void Proposal::set_post_nms_topn(const int64_t post_nms_topn) { this->AddAttr(kPostNmsTopn, MakeValue(post_nms_topn)); } + +int64_t Proposal::get_post_nms_topn() const { + auto value_ptr = GetAttr(kPostNmsTopn); + return GetValue(value_ptr); +} + +void Proposal::set_nms_thresh(const float nms_thresh) { this->AddAttr(kNmsThresh, MakeValue(nms_thresh)); } + +float Proposal::get_nms_thresh() const { + auto value_ptr = GetAttr(kNmsThresh); + return GetValue(value_ptr); +} + +void Proposal::Init(const float feat_stride, const float base_size, const float min_size, + const std::vector &ratio, const std::vector &scale, const int64_t pre_nms_topn, + const int64_t post_nms_topn, const float nms_thresh) { + this->set_feat_stride(feat_stride); + this->set_base_size(base_size); + this->set_min_size(min_size); + this->set_ratio(ratio); + this->set_scale(scale); + this->set_pre_nms_topn(pre_nms_topn); + this->set_post_nms_topn(post_nms_topn); + this->set_nms_thresh(nms_thresh); +} +REGISTER_PRIMITIVE_C(kNameProposal, Proposal); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/proposal.h b/mindspore/core/ops/proposal.h new file mode 100644 index 00000000000..24c7b0727b0 --- /dev/null +++ b/mindspore/core/ops/proposal.h @@ -0,0 +1,58 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_PROPOSAL_H_ +#define MINDSPORE_CORE_OPS_PROPOSAL_H_ +#include + +#include "ops/primitive_c.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameProposal = "Proposal"; +class Proposal : public PrimitiveC { + public: + Proposal() : PrimitiveC(kNameProposal) {} + ~Proposal() = default; + MS_DECLARE_PARENT(Proposal, PrimitiveC); + + void Init(const float feat_stride, const float base_size, const float min_size, const std::vector &ratio, + const std::vector &scale, const int64_t pre_nms_topn, const int64_t post_nms_topn, + const float nms_thresh); + void set_feat_stride(const float feat_stride); + void set_base_size(const float base_size); + void set_min_size(const float min_size); + void set_ratio(const std::vector &ratio); + void set_scale(const std::vector &scale); + void set_pre_nms_topn(const int64_t pre_nms_topn); + void set_post_nms_topn(const int64_t post_nms_topn); + void set_nms_thresh(const float nms_thresh); + float get_feat_stride() const; + float get_base_size() const; + float get_min_size() const; + std::vector get_ratio() const; + std::vector get_scale() const; + int64_t get_pre_nms_topn() const; + int64_t get_post_nms_topn() const; + float get_nms_thresh() const; +}; + +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_PROPOSAL_H_ diff --git a/mindspore/core/ops/quant_dtype_cast.cc b/mindspore/core/ops/quant_dtype_cast.cc new file mode 100644 index 00000000000..ddda8add7e3 --- /dev/null +++ b/mindspore/core/ops/quant_dtype_cast.cc @@ -0,0 +1,53 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops/quant_dtype_cast.h" + +namespace mindspore { +namespace ops { +void QuantDTypeCast::set_src_t(const int64_t src_t) { AddAttr(kSrcT, MakeValue(src_t)); } +int64_t QuantDTypeCast::get_src_t() const { + auto value_ptr = this->GetAttr(kSrcT); + return GetValue(value_ptr); +} +void QuantDTypeCast::set_dst_t(const int64_t dst_t) { AddAttr(kDstT, MakeValue(dst_t)); } +int64_t QuantDTypeCast::get_dst_t() const { + auto value_ptr = this->GetAttr(kDstT); + return GetValue(value_ptr); +} +void QuantDTypeCast::Init(const int64_t src_t, const int64_t dst_t) { + this->set_src_t(src_t); + this->set_dst_t(dst_t); +} +AbstractBasePtr QuantDTypeCastInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(primitive); + auto QuantDTypeCast_prim = primitive->cast(); + MS_EXCEPTION_IF_NULL(QuantDTypeCast_prim); + auto op_name = QuantDTypeCast_prim->name(); + MS_EXCEPTION_IF_NULL(input_args[0]); + auto input_type = input_args[0]->BuildType()->cast(); + MS_EXCEPTION_IF_NULL(input_type); + MS_ASSERT(input_type->element() == TypeIdToType(TypeId(QuantDTypeCast_prim->get_dst_t()))); + auto input_shape = CheckAndConvertUtils::ConvertShapePtrToShape("input_shape", input_args[0]->BuildShape(), op_name); + return std::make_shared(TypeIdToType(TypeId(QuantDTypeCast_prim->get_dst_t())), + input_shape); +} + +REGISTER_PRIMITIVE_EVAL_IMPL(QuantDTypeCast, prim::kPrimQuantDTypeCast, QuantDTypeCastInfer); +REGISTER_PRIMITIVE_C(kNameQuantDTypeCast, QuantDTypeCast); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/quant_dtype_cast.h b/mindspore/core/ops/quant_dtype_cast.h new file mode 100644 index 00000000000..3b1631b27ac --- /dev/null +++ b/mindspore/core/ops/quant_dtype_cast.h @@ -0,0 +1,51 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_QUANTD_TYPE_CAST_H_ +#define MINDSPORE_CORE_OPS_QUANTD_TYPE_CAST_H_ + +#include +#include +#include +#include +#include +#include "ops/op_utils.h" +#include "ops/primitive_c.h" +#include "abstract/primitive_infer_map.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameQuantDTypeCast = "QuantDTypeCast"; +class QuantDTypeCast : public PrimitiveC { + public: + QuantDTypeCast() : PrimitiveC(kNameQuantDTypeCast) {} + ~QuantDTypeCast() = default; + MS_DECLARE_PARENT(QuantDTypeCast, PrimitiveC); + void Init(const int64_t src_t, const int64_t dst_t); + void set_src_t(const int64_t src_t); + int64_t get_src_t() const; + void set_dst_t(const int64_t dst_t); + int64_t get_dst_t() const; +}; +AbstractBasePtr QuantDTypeCastInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); +using PrimQuantDTypeCastPtr = std::shared_ptr; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_QUANTD_TYPE_CAST_H_ diff --git a/mindspore/core/ops/range.cc b/mindspore/core/ops/range.cc new file mode 100644 index 00000000000..a5f1fa8663e --- /dev/null +++ b/mindspore/core/ops/range.cc @@ -0,0 +1,113 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "ops/range.h" +#include +#include +#include +#include +#include +#include "ops/op_utils.h" +#include "utils/check_convert_utils.h" +#include "abstract/primitive_infer_map.h" + +namespace mindspore { +namespace ops { +void Range::set_d_type(const int64_t d_type) { this->AddAttr(kDType, MakeValue(d_type)); } + +int64_t Range::get_d_type() const { + auto value_ptr = GetAttr(kDType); + return GetValue(value_ptr); +} + +void Range::set_start(const int64_t start) { this->AddAttr(kStart, MakeValue(start)); } + +int64_t Range::get_start() const { + auto value_ptr = GetAttr(kStart); + return GetValue(value_ptr); +} + +void Range::set_limit(const int64_t limit) { this->AddAttr(kLimit, MakeValue(limit)); } + +int64_t Range::get_limit() const { + auto value_ptr = GetAttr(kLimit); + return GetValue(value_ptr); +} + +void Range::set_delta(const int64_t delta) { this->AddAttr(kDelta, MakeValue(delta)); } + +int64_t Range::get_delta() const { + auto value_ptr = GetAttr(kDelta); + return GetValue(value_ptr); +} + +void Range::Init(const int64_t d_type, const int64_t start, const int64_t limit, const int64_t delta) { + this->set_d_type(d_type); + this->set_start(start); + this->set_limit(limit); + this->set_delta(delta); +} + +AbstractBasePtr RangeInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(primitive); + auto prim = primitive->cast(); + MS_EXCEPTION_IF_NULL(prim); + int64_t shape_size; + TypeId dtype; + if (input_args.size() == 3) { + MS_EXCEPTION_IF_NULL(input_args[0]->BuildValue()); + MS_EXCEPTION_IF_NULL(input_args[1]->BuildValue()); + MS_EXCEPTION_IF_NULL(input_args[2]->BuildValue()); + auto start_tensor = input_args[0]->BuildValue()->cast(); + auto limit_tensor = input_args[1]->BuildValue()->cast(); + auto delta_tensor = input_args[2]->BuildValue()->cast(); + dtype = static_cast(start_tensor->data_type_c()); + switch (dtype) { + case kNumberTypeInt: + case kNumberTypeInt32: { + auto start = *reinterpret_cast(start_tensor->data_c()); + auto limit = *reinterpret_cast(limit_tensor->data_c()); + auto delta = *reinterpret_cast(delta_tensor->data_c()); + shape_size = + std::max(static_cast(std::ceil(static_cast(limit - start) / delta)), static_cast(0)); + } break; + case kNumberTypeFloat32: + case kNumberTypeFloat: { + auto start = *reinterpret_cast(start_tensor->data_c()); + auto limit = *reinterpret_cast(limit_tensor->data_c()); + auto delta = *reinterpret_cast(delta_tensor->data_c()); + shape_size = + std::max(static_cast(std::ceil(static_cast(limit - start) / delta)), static_cast(0)); + } break; + default: { + MS_LOG(EXCEPTION) << "Range has unsupported dataType: " << dtype; + } + } + } else { + int64_t start = prim->get_start(); + int64_t limit = prim->get_limit(); + int64_t delta = prim->get_delta(); + dtype = kNumberTypeInt32; + shape_size = + std::max(static_cast(std::ceil(LongToDouble(limit - start) / delta)), static_cast(0)); + } + return std::make_shared( + TypeIdToType(dtype), std::make_shared(std::vector{shape_size})); +} +REGISTER_PRIMITIVE_EVAL_IMPL(Range, prim::kPrimRange, RangeInfer); +REGISTER_PRIMITIVE_C(kNameRange, Range); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/range.h b/mindspore/core/ops/range.h new file mode 100644 index 00000000000..aba04823a4b --- /dev/null +++ b/mindspore/core/ops/range.h @@ -0,0 +1,52 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_RANGE_H_ +#define MINDSPORE_CORE_OPS_RANGE_H_ +#include +#include +#include +#include +#include "ops/primitive_c.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameRange = "Range"; +class Range : public PrimitiveC { + public: + Range() : PrimitiveC(kNameRange) {} + ~Range() = default; + MS_DECLARE_PARENT(Range, PrimitiveC); + void Init(const int64_t d_type, const int64_t start, const int64_t limit, const int64_t delta); + void set_d_type(const int64_t d_type); + void set_start(const int64_t start); + void set_limit(const int64_t limit); + void set_delta(const int64_t delta); + int64_t get_d_type() const; + int64_t get_start() const; + int64_t get_limit() const; + int64_t get_delta() const; +}; + +AbstractBasePtr RangeInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); +using PrimRangePtr = std::shared_ptr; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_RANGE_H_ diff --git a/mindspore/core/ops/rank.cc b/mindspore/core/ops/rank.cc new file mode 100644 index 00000000000..f080b0b3f0f --- /dev/null +++ b/mindspore/core/ops/rank.cc @@ -0,0 +1,41 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops/rank.h" + +namespace mindspore { +namespace ops { +namespace { +TypePtr RankInferType(const PrimitivePtr &prim, const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(prim); + auto Rank_prim = prim->cast(); + MS_EXCEPTION_IF_NULL(Rank_prim); + auto op_name = Rank_prim->name(); + auto infer_dtype = input_args[0]->BuildType(); + CheckAndConvertUtils::CheckSubClass("x", infer_dtype, {TypeIdToType(kObjectTypeTensorType)}, op_name); + return TypeIdToType(kMetaTypeNone); +} +} // namespace +AbstractBasePtr RankInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + std::vector infer_shape; + return std::make_shared(RankInferType(primitive, input_args), infer_shape); +} + +REGISTER_PRIMITIVE_EVAL_IMPL(Rank, prim::kPrimRank, RankInfer); +REGISTER_PRIMITIVE_C(kNameRank, Rank); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/rank.h b/mindspore/core/ops/rank.h new file mode 100644 index 00000000000..506e6d0172e --- /dev/null +++ b/mindspore/core/ops/rank.h @@ -0,0 +1,42 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_RANK_H_ +#define MINDSPORE_CORE_OPS_RANK_H_ +#include +#include + +#include "ops/primitive_c.h" +#include "ops/op_utils.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameRank = "Rank"; +class Rank : public PrimitiveC { + public: + Rank() : PrimitiveC(kNameRank) { auto prim_name = name(); } + ~Rank() = default; + MS_DECLARE_PARENT(Rank, PrimitiveC); + void Init() {} +}; +AbstractBasePtr RankInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); +using PrimRankPtr = std::shared_ptr; +} // namespace ops +} // namespace mindspore +#endif // MINDSPORE_CORE_OPS_RANK_H_ diff --git a/mindspore/core/ops/real_div.cc b/mindspore/core/ops/real_div.cc new file mode 100644 index 00000000000..9722440f0f4 --- /dev/null +++ b/mindspore/core/ops/real_div.cc @@ -0,0 +1,57 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include +#include "ops/real_div.h" +#include "ops/op_utils.h" +#include "utils/check_convert_utils.h" +#include "abstract/primitive_infer_map.h" + +namespace mindspore { +namespace ops { +namespace { +abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(primitive); + auto op_name = primitive->name(); + return BroadCastInferShape(op_name, input_args); +} + +TypePtr InferType(const PrimitivePtr &prim, const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(prim); + CheckAndConvertUtils::CheckInteger("input number", input_args.size(), kEqual, 2, prim->name()); + for (const auto &item : input_args) { + MS_EXCEPTION_IF_NULL(item); + } + std::map types; + types.emplace("x", input_args[0]->BuildType()); + types.emplace("y", input_args[1]->BuildType()); + auto infer_type = CheckAndConvertUtils::CheckTensorTypeSame(types, common_valid_types, prim->name()); + return TypeIdToType(infer_type); +} +} // namespace + +AbstractBasePtr RealDivInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + return std::make_shared(InferType(primitive, input_args), + InferShape(primitive, input_args)->shape()); +} +REGISTER_PRIMITIVE_EVAL_IMPL(RealDiv, prim::kPrimRealDiv, RealDivInfer); +REGISTER_PRIMITIVE_C(kNameRealDiv, RealDiv); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/real_div.h b/mindspore/core/ops/real_div.h new file mode 100644 index 00000000000..043e53f5871 --- /dev/null +++ b/mindspore/core/ops/real_div.h @@ -0,0 +1,42 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_REAL_DIV_H_ +#define MINDSPORE_CORE_OPS_REAL_DIV_H_ +#include +#include +#include "ops/primitive_c.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameRealDiv = "RealDiv"; +class RealDiv : public PrimitiveC { + public: + RealDiv() : PrimitiveC(kNameRealDiv) { InitIOName({"x", "y"}, {"output"}); } + ~RealDiv() = default; + MS_DECLARE_PARENT(RealDiv, PrimitiveC); + void Init() {} +}; + +AbstractBasePtr RealDivInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); +using PrimRealDivPtr = std::shared_ptr; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_REAL_DIV_H_ diff --git a/mindspore/core/ops/reciprocal.cc b/mindspore/core/ops/reciprocal.cc new file mode 100644 index 00000000000..684c66bae53 --- /dev/null +++ b/mindspore/core/ops/reciprocal.cc @@ -0,0 +1,50 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include +#include +#include "ops/reciprocal.h" +#include "ops/op_utils.h" +#include "utils/check_convert_utils.h" +#include "abstract/primitive_infer_map.h" + +namespace mindspore { +namespace ops { +AbstractBasePtr ReciprocalInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(primitive); + auto reciprocal_prim = primitive->cast(); + MS_EXCEPTION_IF_NULL(reciprocal_prim); + auto prim_name = reciprocal_prim->name(); + for (const auto &item : input_args) { + MS_EXCEPTION_IF_NULL(item); + } + // infer shape + auto in_shape = + CheckAndConvertUtils::ConvertShapePtrToShape("input_shape", input_args[0]->GetShapeTrack(), prim_name); + // infer type + auto x_type = input_args[0]->BuildType()->cast()->element(); + std::set valid_x_type = {TypeIdToType(kObjectTypeTensorType)}; + CheckAndConvertUtils::CheckSubClass("x_type", x_type, valid_x_type, prim_name); + return std::make_shared(x_type, in_shape); +} +REGISTER_PRIMITIVE_EVAL_IMPL(Reciprocal, prim::kPrimReciprocal, ReciprocalInfer); +REGISTER_PRIMITIVE_C(kNameReciprocal, Reciprocal); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/reciprocal.h b/mindspore/core/ops/reciprocal.h new file mode 100644 index 00000000000..a75b5fdb597 --- /dev/null +++ b/mindspore/core/ops/reciprocal.h @@ -0,0 +1,42 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_RECIPROCAL_H_ +#define MINDSPORE_CORE_OPS_RECIPROCAL_H_ +#include +#include +#include "ops/primitive_c.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameReciprocal = "Reciprocal"; +class Reciprocal : public PrimitiveC { + public: + Reciprocal() : PrimitiveC(kNameReciprocal) { InitIOName({"x"}, {"y"}); } + ~Reciprocal() = default; + MS_DECLARE_PARENT(Reciprocal, PrimitiveC); + void Init() {} +}; + +AbstractBasePtr ReciprocalInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); +using PrimReciprocalPtr = std::shared_ptr; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_RECIPROCAL_H_ diff --git a/mindspore/core/ops/reduce.cc b/mindspore/core/ops/reduce.cc new file mode 100644 index 00000000000..4926f2015c1 --- /dev/null +++ b/mindspore/core/ops/reduce.cc @@ -0,0 +1,114 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "ops/reduce.h" +#include +#include +#include +#include +#include +#include "ops/op_utils.h" +#include "utils/check_convert_utils.h" +#include "abstract/primitive_infer_map.h" + +namespace mindspore { +namespace ops { +namespace { + +void reduce_one_axis(const int64_t one_axis, const int64_t dim, std::set axis_reduce) { + CheckAndConvertUtils::CheckInRange("axis", one_axis, kIncludeLeft, {-dim, dim}, "Reduce"); + if (one_axis < 0) { + axis_reduce.insert(one_axis); + } +} + +std::vector infer_shape_reduce(std::vector input_x_shape, const ValuePtr axis_value, + const bool keep_dims, const std::string &prim_name) { + int64_t dim = input_x_shape.size(); + std::set axis_reduce; + if (axis_value == nullptr) { + std::vector vec; + if (keep_dims) { + return std::vector(dim, 1); + } + return vec; + } + auto axis_value_elem = GetValue>(axis_value); + if (axis_value_elem.size() == 1) { + reduce_one_axis(axis_value_elem[0], dim, axis_reduce); + } else { + int64_t size = axis_value_elem.size(); + for (int64_t i = 0; i < size; i++) { + reduce_one_axis(axis_value_elem[i], dim, axis_reduce); + } + } + std::vector out_shape; + for (int64_t i = 0; i < dim; i++) { + if (axis_reduce.find(i) != axis_reduce.end()) { + if (keep_dims) { + out_shape.emplace_back(1); + } + } else { + out_shape.emplace_back(input_x_shape[i]); + } + } + return out_shape; +} + +abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector &input_args) { + auto axis_value = input_args[1]->BuildValue(); + + MS_EXCEPTION_IF_NULL(primitive); + auto reduce_prim = primitive->cast(); + MS_EXCEPTION_IF_NULL(reduce_prim); + auto prim_name = reduce_prim->name(); + auto input_x_shape = + CheckAndConvertUtils::ConvertShapePtrToShape("input_x_shape", input_args[0]->BuildShape(), prim_name); + + auto keep_dims = reduce_prim->get_keep_dims(); + auto out_shape = infer_shape_reduce(input_x_shape, axis_value, keep_dims, prim_name); + + return std::make_shared(out_shape); +} + +TypePtr InferType(const PrimitivePtr &prim, const std::vector &input_args) { + for (const auto &item : input_args) { + MS_EXCEPTION_IF_NULL(item); + } + std::map types; + types.emplace("input_x", input_args[0]->BuildType()); + auto infer_type = CheckAndConvertUtils::CheckTensorTypeSame(types, common_valid_types, prim->name()); + return TypeIdToType(infer_type); +} +} // namespace + +void Reduce::set_keep_dims(const bool keep_dims) { this->AddAttr(kKeepDims, MakeValue(keep_dims)); } + +bool Reduce::get_keep_dims() const { + auto value_ptr = GetAttr(kKeepDims); + return GetValue(value_ptr); +} + +void Reduce::Init(const bool keep_dims) { this->set_keep_dims(keep_dims); } + +AbstractBasePtr ReduceInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + return std::make_shared(InferType(primitive, input_args), + InferShape(primitive, input_args)->shape()); +} +REGISTER_PRIMITIVE_EVAL_IMPL(Reduce, prim::kPrimReduce, ReduceInfer); +REGISTER_PRIMITIVE_C(kNameReduce, Reduce); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/reduce.h b/mindspore/core/ops/reduce.h new file mode 100644 index 00000000000..29a821ff527 --- /dev/null +++ b/mindspore/core/ops/reduce.h @@ -0,0 +1,46 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_REDUCE_H_ +#define MINDSPORE_CORE_OPS_REDUCE_H_ +#include +#include +#include +#include +#include "ops/primitive_c.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameReduce = "Reduce"; +class Reduce : public PrimitiveC { + public: + Reduce() : PrimitiveC(kNameReduce) { InitIOName({"input_x", "axis"}, {"y"}); } + explicit Reduce(const std::string k_name) : PrimitiveC(k_name) { InitIOName({"input_x", "axis"}, {"y"}); } + ~Reduce() = default; + MS_DECLARE_PARENT(Reduce, PrimitiveC); + void Init(const bool keep_dims = false); + void set_keep_dims(const bool keep_dims); + bool get_keep_dims() const; +}; +AbstractBasePtr ReduceInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); +using PrimReducePtr = std::shared_ptr; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_REDUCE_H_ diff --git a/mindspore/core/ops/reduce_all.cc b/mindspore/core/ops/reduce_all.cc new file mode 100644 index 00000000000..8f55e26f744 --- /dev/null +++ b/mindspore/core/ops/reduce_all.cc @@ -0,0 +1,31 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "ops/reduce_all.h" +#include +#include +#include +#include +#include +#include "ops/op_utils.h" +#include "utils/check_convert_utils.h" +#include "abstract/primitive_infer_map.h" + +namespace mindspore { +namespace ops { + +REGISTER_PRIMITIVE_C(kNameReduceAll, ReduceAll); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/reduce_all.h b/mindspore/core/ops/reduce_all.h new file mode 100644 index 00000000000..83de77ad6cc --- /dev/null +++ b/mindspore/core/ops/reduce_all.h @@ -0,0 +1,39 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_REDUCE_ALL_H_ +#define MINDSPORE_CORE_OPS_REDUCE_ALL_H_ +#include +#include +#include +#include +#include "ops/reduce.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameReduceAll = "ReduceAll"; +class ReduceAll : public Reduce { + public: + ReduceAll() : Reduce(kNameReduceAll) { InitIOName({"input_x", "axis"}, {"y"}); } + ~ReduceAll() = default; + MS_DECLARE_PARENT(ReduceAll, Reduce); +}; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_REDUCE_ALL_H_ diff --git a/mindspore/core/ops/reduce_any.cc b/mindspore/core/ops/reduce_any.cc new file mode 100644 index 00000000000..8f257e112ec --- /dev/null +++ b/mindspore/core/ops/reduce_any.cc @@ -0,0 +1,30 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "ops/reduce_any.h" +#include +#include +#include +#include +#include +#include "ops/op_utils.h" +#include "utils/check_convert_utils.h" +#include "abstract/primitive_infer_map.h" + +namespace mindspore { +namespace ops { +REGISTER_PRIMITIVE_C(kNameReduceAny, ReduceAny); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/reduce_any.h b/mindspore/core/ops/reduce_any.h new file mode 100644 index 00000000000..3957ee10995 --- /dev/null +++ b/mindspore/core/ops/reduce_any.h @@ -0,0 +1,39 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_REDUCE_ANY_H_ +#define MINDSPORE_CORE_OPS_REDUCE_ANY_H_ +#include +#include +#include +#include +#include "ops/reduce.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameReduceAny = "ReduceAny"; +class ReduceAny : public Reduce { + public: + ReduceAny() : Reduce(kNameReduceAny) { InitIOName({"input_x", "axis"}, {"y"}); } + ~ReduceAny() = default; + MS_DECLARE_PARENT(ReduceAny, Reduce); +}; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_REDUCE_ANY_H_ diff --git a/mindspore/core/ops/reduce_asum.cc b/mindspore/core/ops/reduce_asum.cc new file mode 100644 index 00000000000..028cd6f9206 --- /dev/null +++ b/mindspore/core/ops/reduce_asum.cc @@ -0,0 +1,26 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + +#include "ops/reduce_asum.h" +#include "ops/op_utils.h" + +namespace mindspore { +namespace ops { +REGISTER_PRIMITIVE_C(kNameReduceASum, ReduceASum); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/reduce_asum.h b/mindspore/core/ops/reduce_asum.h new file mode 100644 index 00000000000..9e6e36889c5 --- /dev/null +++ b/mindspore/core/ops/reduce_asum.h @@ -0,0 +1,40 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_REDUCE_ASUM_H_ +#define MINDSPORE_CORE_OPS_REDUCE_ASUM_H_ +#include +#include +#include +#include +#include "ops/reduce.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameReduceASum = "ReduceASum"; +class ReduceASum : public Reduce { + public: + ReduceASum() : Reduce(kNameReduceASum) { InitIOName({"input_x", "axis"}, {"y"}); } + ~ReduceASum() = default; + MS_DECLARE_PARENT(ReduceASum, Reduce); + void Init() {} +}; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_REDUCE_ASUM_H_ diff --git a/mindspore/core/c_ops/gather.cc b/mindspore/core/ops/reduce_max.cc similarity index 82% rename from mindspore/core/c_ops/gather.cc rename to mindspore/core/ops/reduce_max.cc index 00ffbfe577a..561dbd01f88 100644 --- a/mindspore/core/c_ops/gather.cc +++ b/mindspore/core/ops/reduce_max.cc @@ -14,9 +14,13 @@ * limitations under the License. */ -#include "c_ops/gather.h" #include +#include "ops/reduce_max.h" +#include "ops/op_utils.h" + namespace mindspore { -REGISTER_PRIMITIVE_C(kNameGather, Gather); +namespace ops { +REGISTER_PRIMITIVE_C(kNameReduceMax, ReduceMax); +} // namespace ops } // namespace mindspore diff --git a/mindspore/core/ops/reduce_max.h b/mindspore/core/ops/reduce_max.h new file mode 100644 index 00000000000..a7cbed8517b --- /dev/null +++ b/mindspore/core/ops/reduce_max.h @@ -0,0 +1,40 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_REDUCE_MAX_H_ +#define MINDSPORE_CORE_OPS_REDUCE_MAX_H_ +#include +#include +#include +#include +#include "ops/reduce.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameReduceMax = "ReduceMax"; +class ReduceMax : public Reduce { + public: + ReduceMax() : Reduce(kNameReduceMax) { InitIOName({"input_x", "axis"}, {"y"}); } + ~ReduceMax() = default; + MS_DECLARE_PARENT(ReduceMax, Reduce); + void Init() {} +}; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_REDUCE_MAX_H_ diff --git a/mindspore/core/ops/reduce_mean.cc b/mindspore/core/ops/reduce_mean.cc new file mode 100644 index 00000000000..267d88817e1 --- /dev/null +++ b/mindspore/core/ops/reduce_mean.cc @@ -0,0 +1,30 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "ops/reduce_mean.h" +#include +#include +#include +#include +#include +#include "ops/op_utils.h" +#include "utils/check_convert_utils.h" +#include "abstract/primitive_infer_map.h" + +namespace mindspore { +namespace ops { +REGISTER_PRIMITIVE_C(kNameReduceMean, ReduceMean); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/reduce_mean.h b/mindspore/core/ops/reduce_mean.h new file mode 100644 index 00000000000..3bf643d8626 --- /dev/null +++ b/mindspore/core/ops/reduce_mean.h @@ -0,0 +1,39 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_REDUCE_MEAN_H_ +#define MINDSPORE_CORE_OPS_REDUCE_MEAN_H_ +#include +#include +#include +#include +#include "ops/reduce.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameReduceMean = "ReduceMean"; +class ReduceMean : public Reduce { + public: + ReduceMean() : Reduce(kNameReduceMean) { InitIOName({"input_x", "axis"}, {"y"}); } + ~ReduceMean() = default; + MS_DECLARE_PARENT(ReduceMean, Reduce); +}; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_REDUCE_MEAN_H_ diff --git a/mindspore/core/c_ops/floor.cc b/mindspore/core/ops/reduce_min.cc similarity index 85% rename from mindspore/core/c_ops/floor.cc rename to mindspore/core/ops/reduce_min.cc index 8dc8cf09dd9..fecfc261807 100644 --- a/mindspore/core/c_ops/floor.cc +++ b/mindspore/core/ops/reduce_min.cc @@ -14,9 +14,11 @@ * limitations under the License. */ -#include "c_ops/floor.h" +#include "ops/reduce_min.h" #include namespace mindspore { -REGISTER_PRIMITIVE_C(kNameFloor, Floor); +namespace ops { +REGISTER_PRIMITIVE_C(kNameReduceMin, ReduceMin); +} // namespace ops } // namespace mindspore diff --git a/mindspore/core/ops/reduce_min.h b/mindspore/core/ops/reduce_min.h new file mode 100644 index 00000000000..15972cafcec --- /dev/null +++ b/mindspore/core/ops/reduce_min.h @@ -0,0 +1,39 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_REDUCE_MIN_H_ +#define MINDSPORE_CORE_OPS_REDUCE_MIN_H_ +#include +#include +#include +#include +#include "ops/reduce.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameReduceMin = "ReduceMin"; +class ReduceMin : public Reduce { + public: + ReduceMin() : Reduce(kNameReduceMin) { InitIOName({"input_x", "axis"}, {"y"}); } + ~ReduceMin() = default; + MS_DECLARE_PARENT(ReduceMin, Reduce); +}; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_REDUCE_MIN_H_ diff --git a/mindspore/core/ops/reduce_prod.cc b/mindspore/core/ops/reduce_prod.cc new file mode 100644 index 00000000000..38e8b0a63b2 --- /dev/null +++ b/mindspore/core/ops/reduce_prod.cc @@ -0,0 +1,30 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "ops/reduce_prod.h" +#include +#include +#include +#include +#include +#include "ops/op_utils.h" +#include "utils/check_convert_utils.h" +#include "abstract/primitive_infer_map.h" + +namespace mindspore { +namespace ops { +REGISTER_PRIMITIVE_C(kNameReduceProd, ReduceProd); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/reduce_prod.h b/mindspore/core/ops/reduce_prod.h new file mode 100644 index 00000000000..f6c7f6506d0 --- /dev/null +++ b/mindspore/core/ops/reduce_prod.h @@ -0,0 +1,39 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_REDUCE_PROD_H_ +#define MINDSPORE_CORE_OPS_REDUCE_PROD_H_ +#include +#include +#include +#include +#include "ops/reduce.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameReduceProd = "ReduceProd"; +class ReduceProd : public Reduce { + public: + ReduceProd() : Reduce(kNameReduceProd) { InitIOName({"input_x", "axis"}, {"y"}); } + ~ReduceProd() = default; + MS_DECLARE_PARENT(ReduceProd, Reduce); +}; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_REDUCE_PROD_H_ diff --git a/mindspore/core/c_ops/logical_and.cc b/mindspore/core/ops/reduce_sum.cc similarity index 82% rename from mindspore/core/c_ops/logical_and.cc rename to mindspore/core/ops/reduce_sum.cc index 826f0a54689..ec4e5f7ee1f 100644 --- a/mindspore/core/c_ops/logical_and.cc +++ b/mindspore/core/ops/reduce_sum.cc @@ -14,9 +14,13 @@ * limitations under the License. */ -#include "c_ops/logical_and.h" #include +#include "ops/reduce_sum.h" +#include "ops/op_utils.h" + namespace mindspore { -REGISTER_PRIMITIVE_C(kNameLogicalAnd, LogicalAnd); +namespace ops { +REGISTER_PRIMITIVE_C(kNameReduceSum, ReduceSum); +} // namespace ops } // namespace mindspore diff --git a/mindspore/core/ops/reduce_sum.h b/mindspore/core/ops/reduce_sum.h new file mode 100644 index 00000000000..ec9e4a499df --- /dev/null +++ b/mindspore/core/ops/reduce_sum.h @@ -0,0 +1,40 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_REDUCE_SUM_H_ +#define MINDSPORE_CORE_OPS_REDUCE_SUM_H_ +#include +#include +#include +#include +#include "ops/reduce.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameReduceSum = "ReduceSum"; +class ReduceSum : public Reduce { + public: + ReduceSum() : Reduce(kNameReduceSum) { InitIOName({"input_x", "axis"}, {"y"}); } + ~ReduceSum() = default; + MS_DECLARE_PARENT(ReduceSum, Reduce); + void Init() {} +}; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_REDUCE_SUM_H_ diff --git a/mindspore/core/ops/reduce_sum_square.cc b/mindspore/core/ops/reduce_sum_square.cc new file mode 100644 index 00000000000..b28c4cbe42b --- /dev/null +++ b/mindspore/core/ops/reduce_sum_square.cc @@ -0,0 +1,26 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + +#include "ops/reduce_sum_square.h" +#include "ops/op_utils.h" + +namespace mindspore { +namespace ops { +REGISTER_PRIMITIVE_C(kNameReduceSumSquare, ReduceSumSquare); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/reduce_sum_square.h b/mindspore/core/ops/reduce_sum_square.h new file mode 100644 index 00000000000..b5c4620fb6a --- /dev/null +++ b/mindspore/core/ops/reduce_sum_square.h @@ -0,0 +1,40 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_REDUCE_SUM_SQUARE_H_ +#define MINDSPORE_CORE_OPS_REDUCE_SUM_SQUARE_H_ +#include +#include +#include +#include +#include "ops/reduce.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameReduceSumSquare = "ReduceSumSquare"; +class ReduceSumSquare : public Reduce { + public: + ReduceSumSquare() : Reduce(kNameReduceSumSquare) { InitIOName({"input_x", "axis"}, {"y"}); } + ~ReduceSumSquare() = default; + MS_DECLARE_PARENT(ReduceSumSquare, Reduce); + void Init() {} +}; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_REDUCE_SUM_SQUARE_H_ diff --git a/mindspore/core/c_ops/custom_normalize.cc b/mindspore/core/ops/relu.cc similarity index 88% rename from mindspore/core/c_ops/custom_normalize.cc rename to mindspore/core/ops/relu.cc index f205ef2b214..d81f85ee4aa 100644 --- a/mindspore/core/c_ops/custom_normalize.cc +++ b/mindspore/core/ops/relu.cc @@ -14,10 +14,12 @@ * limitations under the License. */ -#include "c_ops/custom_normalize.h" +#include "ops/relu.h" #include "utils/check_convert_utils.h" #include "abstract/primitive_infer_map.h" namespace mindspore { -REGISTER_PRIMITIVE_C(kNameCustomNormalize, CustomNormalize); +namespace ops { +REGISTER_PRIMITIVE_C(kNameReLU, ReLU); +} // namespace ops } // namespace mindspore diff --git a/mindspore/core/ops/relu.h b/mindspore/core/ops/relu.h new file mode 100644 index 00000000000..50236de3fb7 --- /dev/null +++ b/mindspore/core/ops/relu.h @@ -0,0 +1,36 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_RELU_H_ +#define MINDSPORE_CORE_OPS_RELU_H_ +#include "ops/primitive_c.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameReLU = "ReLU"; +class ReLU : public PrimitiveC { + public: + ReLU() : PrimitiveC(kNameReLU) { InitIOName({"x"}, {"output"}); } + ~ReLU() = default; + MS_DECLARE_PARENT(ReLU, PrimitiveC); + void Init() {} +}; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_RELU_H_ diff --git a/mindspore/core/c_ops/relu6.cc b/mindspore/core/ops/relu6.cc similarity index 89% rename from mindspore/core/c_ops/relu6.cc rename to mindspore/core/ops/relu6.cc index bae8fc3e6f0..2ea9e494af6 100644 --- a/mindspore/core/c_ops/relu6.cc +++ b/mindspore/core/ops/relu6.cc @@ -14,16 +14,17 @@ * limitations under the License. */ -#include "c_ops/relu6.h" #include #include #include #include #include +#include "ops/relu6.h" #include "utils/check_convert_utils.h" #include "abstract/primitive_infer_map.h" namespace mindspore { +namespace ops { namespace { abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector &input_args) { MS_EXCEPTION_IF_NULL(primitive); @@ -44,11 +45,12 @@ TypePtr InferType(const PrimitivePtr &prim, const std::vector & return TypeIdToType(infer_type); } } // namespace -AbstractBasePtr Relu6Infer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, +AbstractBasePtr ReLU6Infer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, const std::vector &input_args) { return std::make_shared(InferType(primitive, input_args), InferShape(primitive, input_args)->shape()); } -REGISTER_PRIMITIVE_EVAL_IMPL(Relu6, prim::kPrimRelu6, Relu6Infer); -REGISTER_PRIMITIVE_C(kNameRelu6, Relu6); +REGISTER_PRIMITIVE_EVAL_IMPL(ReLU6, prim::kPrimRelu6, ReLU6Infer); +REGISTER_PRIMITIVE_C(kNameReLU6, ReLU6); +} // namespace ops } // namespace mindspore diff --git a/mindspore/core/ops/relu6.h b/mindspore/core/ops/relu6.h new file mode 100644 index 00000000000..57003c2dcd0 --- /dev/null +++ b/mindspore/core/ops/relu6.h @@ -0,0 +1,42 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_CORE_OPS_RELU6_H_ +#define MINDSPORE_CORE_OPS_RELU6_H_ +#include +#include +#include +#include +#include "ops/primitive_c.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameReLU6 = "ReLU6"; +class ReLU6 : public PrimitiveC { + public: + ReLU6() : PrimitiveC(kNameReLU6) { InitIOName({"x"}, {"output"}); } + ~ReLU6() = default; + MS_DECLARE_PARENT(ReLU6, PrimitiveC); + void Init() {} +}; + +AbstractBasePtr ReLU6Infer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); +using PrimReLU6Ptr = std::shared_ptr; +} // namespace ops +} // namespace mindspore +#endif // MINDSPORE_CORE_OPS_RELU6_H_ diff --git a/mindspore/core/ops/reshape.cc b/mindspore/core/ops/reshape.cc new file mode 100644 index 00000000000..b3649fe5fe4 --- /dev/null +++ b/mindspore/core/ops/reshape.cc @@ -0,0 +1,104 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include +#include +#include +#include +#include "ops/reshape.h" +#include "utils/check_convert_utils.h" +#include "abstract/primitive_infer_map.h" + +namespace mindspore { +namespace ops { +AbstractBasePtr ReshapeInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(primitive); + auto prim_name = primitive->name(); + CheckAndConvertUtils::CheckInteger("input number", input_args.size(), kEqual, 2, prim_name); + for (const auto &item : input_args) { + MS_EXCEPTION_IF_NULL(item); + } + auto x = input_args[0]->cast(); + MS_EXCEPTION_IF_NULL(x); + auto shape = input_args[1]->cast(); + MS_EXCEPTION_IF_NULL(shape); + auto shape_v = GetValue>(shape->BuildValue()); + int64_t neg_index = -1; + int64_t dim_prod = 1; + for (size_t i = 0; i < shape_v.size(); ++i) { + if (shape_v[i] == -1) { + if (neg_index != -1) { + MS_LOG(EXCEPTION) << "The Reshape's shape input can only has one -1 at most."; + } + neg_index = SizeToLong(i); + } else { + dim_prod *= shape_v[i]; + } + } + MS_EXCEPTION_IF_NULL(x->shape()); + auto x_shape = x->shape()->shape(); + int64_t arr_prod = + std::accumulate(x_shape.begin(), x_shape.end(), static_cast(1), std::multiplies()); + if (arr_prod <= 0) { + ShapeVector x_max_shape = x->shape()->max_shape(); + ShapeVector x_min_shape = x->shape()->min_shape(); + if (x_max_shape.empty()) { + x_max_shape = x_shape; + } + if (x_min_shape.empty()) { + x_min_shape = x_shape; + } + int64_t max_arr_prod = + std::accumulate(x_max_shape.begin(), x_max_shape.end(), static_cast(1), std::multiplies()); + int64_t min_arr_prod = + std::accumulate(x_min_shape.begin(), x_min_shape.end(), static_cast(1), std::multiplies()); + ShapeVector max_shape = shape_v; + ShapeVector min_shape = shape_v; + if (neg_index != -1) { + max_shape[neg_index] = max_arr_prod / dim_prod; + min_shape[neg_index] = min_arr_prod / dim_prod; + } else { + MS_LOG(EXCEPTION) << "For dynamic shape, Reshape's shape input must have neg index"; + } + return std::make_shared(x->element(), + std::make_shared(shape_v, min_shape, max_shape)); + } else { + if (dim_prod <= 0 || arr_prod % dim_prod != 0) { + MS_LOG(EXCEPTION) << "The product of input_x's shape should > 0, and can be divided by product of input_shape, " + "but product of input_x's shape is " + << arr_prod << ", product of input_shape is" << dim_prod; + } + if (neg_index != -1) { + shape_v[neg_index] = arr_prod / dim_prod; + dim_prod *= shape_v[neg_index]; + } + if (arr_prod != dim_prod) { + MS_LOG(EXCEPTION) << "The product of input_x's shape should be equal to product of input_shape, " + "but product of input_x's shape is " + << arr_prod << ", product of input_shape is" << dim_prod; + } + return std::make_shared(x->element(), std::make_shared(shape_v)); + } +} + +REGISTER_PRIMITIVE_EVAL_IMPL(Reshape, prim::kPrimReshape, ReshapeInfer); +REGISTER_PRIMITIVE_C(kNameReshape, Reshape); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/c_ops/reshape.h b/mindspore/core/ops/reshape.h similarity index 87% rename from mindspore/core/c_ops/reshape.h rename to mindspore/core/ops/reshape.h index 03647308574..38c4b0db5a0 100644 --- a/mindspore/core/c_ops/reshape.h +++ b/mindspore/core/ops/reshape.h @@ -13,18 +13,19 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -#ifndef MINDSPORE_CORE_C_OPS_RESHAPE_H_ -#define MINDSPORE_CORE_C_OPS_RESHAPE_H_ +#ifndef MINDSPORE_CORE_OPS_RESHAPE_H_ +#define MINDSPORE_CORE_OPS_RESHAPE_H_ #include #include #include #include -#include "c_ops/primitive_c.h" +#include "ops/primitive_c.h" #include "abstract/abstract_value.h" #include "utils/check_convert_utils.h" namespace mindspore { +namespace ops { constexpr auto kNameReshape = "Reshape"; class Reshape : public PrimitiveC { public: @@ -37,6 +38,7 @@ class Reshape : public PrimitiveC { AbstractBasePtr ReshapeInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, const std::vector &input_args); using PrimReshapePtr = std::shared_ptr; +} // namespace ops } // namespace mindspore -#endif // MINDSPORE_CORE_C_OPS_RESHAPE_H_ +#endif // MINDSPORE_CORE_OPS_RESHAPE_H_ diff --git a/mindspore/core/ops/resize.cc b/mindspore/core/ops/resize.cc new file mode 100644 index 00000000000..7af3f9aff67 --- /dev/null +++ b/mindspore/core/ops/resize.cc @@ -0,0 +1,130 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include +#include "ops/resize.h" +#include "ops/op_utils.h" +#include "utils/check_convert_utils.h" +#include "abstract/primitive_infer_map.h" + +namespace mindspore { +namespace ops { +void Resize::Init(const Format format, const ResizeMethod method, const int64_t new_height, const int64_t new_width, + const bool preserve_aspect_ratio, const CoordinateTransformMode coordinate_transform_mode, + const float cubic_coeff, const int64_t exclude_outside, const float extrapolation_value, + const NearestMode nearest_mode) { + this->set_format(format); + this->set_method(method); + this->set_new_height(new_height); + this->set_new_width(new_width); + this->set_preserve_aspect_ratio(preserve_aspect_ratio); + this->set_coordinate_transform_mode(coordinate_transform_mode); + this->set_cubic_coeff(cubic_coeff); + this->set_exclude_outside(exclude_outside); + this->set_extrapolation_value(extrapolation_value); + this->set_nearest_mode(nearest_mode); +} +void Resize::set_format(const Format format) { + int64_t swi = format; + this->AddAttr(kFormat, MakeValue(swi)); +} + +void Resize::set_method(const ResizeMethod method) { + auto swi = (int64_t)method; + this->AddAttr(kMethod, MakeValue(swi)); +} + +void Resize::set_new_height(const int64_t new_height) { this->AddAttr(kNewHeight, MakeValue(new_height)); } + +void Resize::set_new_width(const int64_t new_width) { this->AddAttr(kNewWidth, MakeValue(new_width)); } + +void Resize::set_preserve_aspect_ratio(const bool preserve_aspect_ratio) { + this->AddAttr(kPreserveAspectRatio, MakeValue(preserve_aspect_ratio)); +} + +void Resize::set_coordinate_transform_mode(const CoordinateTransformMode coordinate_transform_mode) { + int64_t swi = coordinate_transform_mode; + this->AddAttr(kCoordinateTransformMode, MakeValue(swi)); +} + +void Resize::set_cubic_coeff(const float cubic_coeff) { this->AddAttr(kCubicCoeff, MakeValue(cubic_coeff)); } + +void Resize::set_exclude_outside(const int64_t exclude_outside) { + this->AddAttr(kExcludeOutside, MakeValue(exclude_outside)); +} + +void Resize::set_extrapolation_value(const float extrapolation_value) { + this->AddAttr(kExtrapolationValue, MakeValue(extrapolation_value)); +} + +void Resize::set_nearest_mode(const NearestMode nearest_mode) { + int64_t swi = (int64_t)nearest_mode; + this->AddAttr(kNearestMode, MakeValue(swi)); +} + +Format Resize::get_format() const { + auto value_ptr = GetAttr(kFormat); + return Format(GetValue(value_ptr)); +} + +ResizeMethod Resize::get_method() const { + auto value_ptr = GetAttr(kMethod); + return ResizeMethod(GetValue(value_ptr)); +} + +int64_t Resize::get_new_height() const { + auto value_ptr = GetAttr(kNewHeight); + return GetValue(value_ptr); +} + +int64_t Resize::get_new_width() const { + auto value_ptr = GetAttr(kNewWidth); + return GetValue(value_ptr); +} +bool Resize::get_preserve_aspect_ratio() const { + auto value_ptr = GetAttr(kPreserveAspectRatio); + return GetValue(value_ptr); +} +CoordinateTransformMode Resize::get_coordinate_transform_mode() const { + auto value_ptr = GetAttr(kCoordinateTransformMode); + return CoordinateTransformMode(GetValue(value_ptr)); +} + +float Resize::get_cubic_coeff() const { + auto value_ptr = GetAttr(kCubicCoeff); + return GetValue(value_ptr); +} + +int64_t Resize::get_exclude_outside() const { + auto value_ptr = GetAttr(kExcludeOutside); + return GetValue(value_ptr); +} + +float Resize::get_extrapolation_value() const { + auto value_ptr = GetAttr(kExtrapolationValue); + return GetValue(value_ptr); +} + +NearestMode Resize::get_nearest_mode() const { + auto value_ptr = GetAttr(kNearestMode); + return NearestMode(GetValue(value_ptr)); +} +REGISTER_PRIMITIVE_C(kNameResize, Resize); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/resize.h b/mindspore/core/ops/resize.h new file mode 100644 index 00000000000..940a97501b6 --- /dev/null +++ b/mindspore/core/ops/resize.h @@ -0,0 +1,65 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_RESIZE_H_ +#define MINDSPORE_CORE_OPS_RESIZE_H_ +#include +#include +#include "ops/primitive_c.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameResize = "Resize"; +class Resize : public PrimitiveC { + public: + Resize() : PrimitiveC(kNameResize) {} + ~Resize() = default; + MS_DECLARE_PARENT(Resize, PrimitiveC); + void Init(const Format format, const ResizeMethod method, const int64_t new_height, const int64_t new_width, + const bool preserve_aspect_ratio, const CoordinateTransformMode coordinate_transform_mode, + const float cubic_coeff, const int64_t exclude_outside, const float extrapolation_value, + const NearestMode nearest_mode); + void set_format(const Format format); + void set_method(const ResizeMethod method); + void set_new_height(const int64_t new_height); + void set_new_width(const int64_t new_width); + void set_preserve_aspect_ratio(const bool preserve_aspect_ratio); + void set_coordinate_transform_mode(const CoordinateTransformMode coordinate_transform_mode); + void set_cubic_coeff(const float cubic_coeff); + void set_exclude_outside(const int64_t exclude_outside); + void set_extrapolation_value(const float extrapolation_value); + void set_nearest_mode(const NearestMode nearest_mode); + Format get_format() const; + ResizeMethod get_method() const; + int64_t get_new_height() const; + int64_t get_new_width() const; + bool get_preserve_aspect_ratio() const; + CoordinateTransformMode get_coordinate_transform_mode() const; + float get_cubic_coeff() const; + int64_t get_exclude_outside() const; + float get_extrapolation_value() const; + NearestMode get_nearest_mode() const; +}; + +AbstractBasePtr ResizeInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); +using PrimResizePtr = std::shared_ptr; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_RESIZE_H_ diff --git a/mindspore/core/ops/resize_bilinear.cc b/mindspore/core/ops/resize_bilinear.cc new file mode 100644 index 00000000000..2a8889dda0b --- /dev/null +++ b/mindspore/core/ops/resize_bilinear.cc @@ -0,0 +1,76 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "ops/resize_bilinear.h" +#include +#include +#include +#include +#include +#include "ops/op_utils.h" +#include "utils/check_convert_utils.h" +#include "abstract/primitive_infer_map.h" + +namespace mindspore { +namespace ops { + +void ResizeBilinear::set_size(const std::vector &size) { this->AddAttr(kSize, MakeValue(size)); } + +std::vector ResizeBilinear::get_size() const { + auto value_ptr = GetAttr(kSize); + return GetValue>(value_ptr); +} + +void ResizeBilinear::set_align_corners(const bool align_corners) { + this->AddAttr(kAlignCorners, MakeValue(align_corners)); +} + +bool ResizeBilinear::get_align_corners() const { + auto value_ptr = GetAttr(kAlignCorners); + return GetValue(value_ptr); +} + +void ResizeBilinear::Init(const std::vector &size, const bool align_corners) { + this->set_size(size); + this->set_align_corners(align_corners); +} +AbstractBasePtr ResizeBilinearInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(primitive); + auto resize_prim = primitive->cast(); + MS_EXCEPTION_IF_NULL(resize_prim); + auto prim_name = resize_prim->name(); + CheckAndConvertUtils::CheckInteger("resize_bilinear_infer", input_args.size(), kEqual, 1, prim_name); + + // Infer shape + auto input_shape = + CheckAndConvertUtils::ConvertShapePtrToShape("input_shape", input_args[0]->BuildShape(), prim_name); + CheckAndConvertUtils::CheckInteger("input_shape_rank", input_shape.size(), kEqual, 4, prim_name); + std::vector out_shape = {input_shape[0], input_shape[1]}; + auto size = resize_prim->get_size(); + out_shape.insert(out_shape.end(), size.begin(), size.end()); + + // Infer type + auto input_type = input_args[0]->BuildType()->cast()->element(); + const std::set valid_types = {kNumberTypeFloat16, kNumberTypeFloat32}; + CheckAndConvertUtils::CheckTensorTypeValid("input_type", input_type, valid_types, prim_name); + auto out_type = TypeIdToType(kNumberTypeFloat32); + + return std::make_shared(out_type, out_shape); +} +REGISTER_PRIMITIVE_EVAL_IMPL(ResizeBilinear, prim::kPrimResizeBilinear, ResizeBilinearInfer); +REGISTER_PRIMITIVE_C(kNameResizeBilinear, ResizeBilinear); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/resize_bilinear.h b/mindspore/core/ops/resize_bilinear.h new file mode 100644 index 00000000000..e615509eec0 --- /dev/null +++ b/mindspore/core/ops/resize_bilinear.h @@ -0,0 +1,47 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_RESIZE_BILINEAR_H_ +#define MINDSPORE_CORE_OPS_RESIZE_BILINEAR_H_ +#include +#include +#include +#include +#include "ops/primitive_c.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameResizeBilinear = "ResizeBilinear"; +class ResizeBilinear : public PrimitiveC { + public: + ResizeBilinear() : PrimitiveC(kNameResizeBilinear) {} + ~ResizeBilinear() = default; + MS_DECLARE_PARENT(ResizeBilinear, PrimitiveC); + void Init(const std::vector &size, const bool align_corners = false); + void set_size(const std::vector &size); + void set_align_corners(const bool align_corners); + std::vector get_size() const; + bool get_align_corners() const; +}; +AbstractBasePtr ResizeBilinearInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); +using PrimResizeBilinearPtr = std::shared_ptr; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_RESIZE_BILINEAR_H_ diff --git a/mindspore/core/ops/resize_nearest_neighbor.cc b/mindspore/core/ops/resize_nearest_neighbor.cc new file mode 100644 index 00000000000..d5af129c240 --- /dev/null +++ b/mindspore/core/ops/resize_nearest_neighbor.cc @@ -0,0 +1,47 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include +#include +#include "ops/op_utils.h" +#include "ops/resize_nearest_neighbor.h" +#include "utils/check_convert_utils.h" +#include "abstract/primitive_infer_map.h" + +namespace mindspore { +namespace ops { +void ResizeNearestNeighbor::Init(const std::vector &size, const bool align_corners) { + this->set_size(size); + this->set_align_corners(align_corners); +} +void ResizeNearestNeighbor::set_size(const std::vector &size) { this->AddAttr(kSize, MakeValue(size)); } +void ResizeNearestNeighbor::set_align_corners(const bool align_corners) { + this->AddAttr(kAlignCorners, MakeValue(align_corners)); +} +std::vector ResizeNearestNeighbor::get_size() const { + auto value_ptr = GetAttr(kSize); + return GetValue>(value_ptr); +} +bool ResizeNearestNeighbor::get_align_corners() const { + auto value_ptr = GetAttr(kAlignCorners); + return GetValue(value_ptr); +} +REGISTER_PRIMITIVE_C(kNameResizeNearestNeighbor, ResizeNearestNeighbor); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/resize_nearest_neighbor.h b/mindspore/core/ops/resize_nearest_neighbor.h new file mode 100644 index 00000000000..aa4a15affd4 --- /dev/null +++ b/mindspore/core/ops/resize_nearest_neighbor.h @@ -0,0 +1,45 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_CORE_OPS_RESIZE_NEAREST_NEIGHBOR_H_ +#define MINDSPORE_CORE_OPS_RESIZE_NEAREST_NEIGHBOR_H_ + +#include +#include + +#include "ops/primitive_c.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameResizeNearestNeighbor = "ResizeNearestNeighbor"; +class ResizeNearestNeighbor : public PrimitiveC { + public: + ResizeNearestNeighbor() : PrimitiveC(kNameResizeNearestNeighbor) {} + ~ResizeNearestNeighbor() = default; + MS_DECLARE_PARENT(ResizeNearestNeighbor, PrimitiveC); + void Init(const std::vector &size, const bool align_corners = false); + void set_size(const std::vector &size); + void set_align_corners(const bool align_corners); + std::vector get_size() const; + bool get_align_corners() const; +}; + +using PrimResizeNearestNeighborPtr = std::shared_ptr; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_RESIZE_NEAREST_NEIGHBOR_H_ diff --git a/mindspore/core/c_ops/fill.cc b/mindspore/core/ops/return.cc similarity index 82% rename from mindspore/core/c_ops/fill.cc rename to mindspore/core/ops/return.cc index 70b211700ec..95075cd4a48 100644 --- a/mindspore/core/c_ops/fill.cc +++ b/mindspore/core/ops/return.cc @@ -14,9 +14,12 @@ * limitations under the License. */ -#include "c_ops/fill.h" +#include "ops/return.h" #include "utils/check_convert_utils.h" +#include "abstract/primitive_infer_map.h" namespace mindspore { -REGISTER_PRIMITIVE_C(kNameFill, Fill); +namespace ops { +REGISTER_PRIMITIVE_C(kNameReturn, Return); +} // namespace ops } // namespace mindspore diff --git a/mindspore/core/ops/return.h b/mindspore/core/ops/return.h new file mode 100644 index 00000000000..e7cc1eafd6a --- /dev/null +++ b/mindspore/core/ops/return.h @@ -0,0 +1,40 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_CORE_OPS_RETURN_H_ +#define MINDSPORE_CORE_OPS_RETURN_H_ + +#include + +#include "ops/primitive_c.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameReturn = "Return"; +class Return : public PrimitiveC { + public: + Return() : PrimitiveC(kNameReturn) {} + ~Return() = default; + MS_DECLARE_PARENT(Return, PrimitiveC); + void Init() {} +}; + +using PrimReturnPtr = std::shared_ptr; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_RETURN_H_ diff --git a/mindspore/core/ops/reverse_sequence.cc b/mindspore/core/ops/reverse_sequence.cc new file mode 100644 index 00000000000..0fa4bc1bc63 --- /dev/null +++ b/mindspore/core/ops/reverse_sequence.cc @@ -0,0 +1,78 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include "ops/reverse_sequence.h" +#include "ops/op_utils.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +void ReverseSequence::Init(const int64_t seq_dim, const int64_t batch_dim) { + this->set_seq_dim(seq_dim); + this->set_batch_dim(batch_dim); +} +void ReverseSequence::set_seq_dim(const int64_t seq_dim) { this->AddAttr(kSeqDim, MakeValue(seq_dim)); } +void ReverseSequence::set_batch_dim(const int64_t batch_dim) { this->AddAttr(kBatchDim, MakeValue(batch_dim)); } + +int64_t ReverseSequence::get_seq_dim() const { + auto value_ptr = this->GetAttr(kSeqDim); + return GetValue(value_ptr); +} +int64_t ReverseSequence::get_batch_dim() const { + auto value_ptr = this->GetAttr(kBatchDim); + return GetValue(value_ptr); +} +AbstractBasePtr ReverseSequenceInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(primitive); + auto reverse_prim = primitive->cast(); + MS_EXCEPTION_IF_NULL(reverse_prim); + auto prim_name = reverse_prim->name(); + CheckAndConvertUtils::CheckInteger("input numbers", input_args.size(), kEqual, 2, prim_name); + for (const auto &item : input_args) { + MS_EXCEPTION_IF_NULL(item); + } + // infer shape + auto input_shape = + CheckAndConvertUtils::ConvertShapePtrToShape("input_shape", input_args[0]->BuildShape(), prim_name); + auto seq_lengths = + CheckAndConvertUtils::ConvertShapePtrToShape("seq_lengths", input_args[1]->BuildShape(), prim_name); + auto seq_dim = reverse_prim->get_seq_dim(); + auto batch_dim = reverse_prim->get_batch_dim(); + CheckAndConvertUtils::CheckInteger("seq_dim", seq_dim, kLessEqual, input_shape.size(), prim_name); + CheckAndConvertUtils::CheckInteger("batch_dim", batch_dim, kLessEqual, input_shape.size(), prim_name); + CheckAndConvertUtils::CheckInteger("batch_dim", batch_dim, kNotEqual, seq_dim, prim_name); + CheckAndConvertUtils::CheckInteger("seq_lengths rank", seq_lengths.size(), kEqual, 1, prim_name); + CheckAndConvertUtils::CheckInteger("seq_lengths vector size", seq_lengths[0], kEqual, input_shape[batch_dim], + prim_name); + // infer type + std::set tmp(common_valid_types); + tmp.insert(kNumberTypeBool); + const std::set valid_x_types(tmp); + const std::set valid_seq_types = {kNumberTypeInt32, kNumberTypeInt64}; + auto x_type = input_args[0]->BuildType()->cast()->element(); + auto seq_type = input_args[1]->BuildType()->cast()->element(); + CheckAndConvertUtils::CheckTensorTypeValid("x_type", x_type, valid_x_types, prim_name); + CheckAndConvertUtils::CheckTensorTypeValid("seq_type", seq_type, valid_seq_types, prim_name); + return std::make_shared(x_type, input_shape); +} +REGISTER_PRIMITIVE_EVAL_IMPL(ReverseSequence, prim::kPrimReverseSequence, ReverseSequenceInfer); +REGISTER_PRIMITIVE_C(kNameReverseSequence, ReverseSequence); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/reverse_sequence.h b/mindspore/core/ops/reverse_sequence.h new file mode 100644 index 00000000000..6fde1a86144 --- /dev/null +++ b/mindspore/core/ops/reverse_sequence.h @@ -0,0 +1,45 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_REVERSE_SEQUENCE_H_ +#define MINDSPORE_CORE_OPS_REVERSE_SEQUENCE_H_ +#include +#include +#include "ops/primitive_c.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameReverseSequence = "ReverseSequence"; +class ReverseSequence : public PrimitiveC { + public: + ReverseSequence() : PrimitiveC(kNameReverseSequence) { InitIOName({"x", "seq_lengths"}, {"y"}); } + ~ReverseSequence() = default; + MS_DECLARE_PARENT(ReverseSequence, PrimitiveC); + void Init(const int64_t seq_dim, const int64_t batch_dim = 0); + void set_seq_dim(const int64_t seq_dim); + void set_batch_dim(const int64_t batch_dim); + int64_t get_seq_dim() const; + int64_t get_batch_dim() const; +}; +AbstractBasePtr ReverseSequenceInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); +using PrimReverseSequence = std::shared_ptr; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_REVERSE_SEQUENCE_H_ diff --git a/mindspore/core/ops/reverse_v2.cc b/mindspore/core/ops/reverse_v2.cc new file mode 100644 index 00000000000..7294065e960 --- /dev/null +++ b/mindspore/core/ops/reverse_v2.cc @@ -0,0 +1,72 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + +#include "ops/op_utils.h" +#include "ops/reverse_v2.h" + +namespace mindspore { +namespace ops { +namespace { +abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(primitive); + auto reverseV2_prim = primitive->cast(); + MS_EXCEPTION_IF_NULL(reverseV2_prim); + auto prim_name = reverseV2_prim->name(); + auto x_shape = CheckAndConvertUtils::ConvertShapePtrToShape("x_shape", input_args[0]->BuildShape(), prim_name); + // auto axis = reverseV2_prim->get_axis(); + // int dim = x_shape.size(); + // for (auto &axis_value : axis) { + // CheckAndConvertUtils::CheckInRange("axis value", axis_value, kIncludeLeft, {-dim, dim}, prim_name); + // } + return std::make_shared(x_shape); +} + +TypePtr InferType(const PrimitivePtr &prim, const std::vector &input_args) { + for (const auto &item : input_args) { + MS_EXCEPTION_IF_NULL(item); + } + const std::set valid_types = {kNumberTypeInt8, kNumberTypeInt16, kNumberTypeInt32, kNumberTypeInt64, + kNumberTypeUInt8, kNumberTypeUInt16, kNumberTypeUInt32, kNumberTypeUInt64, + kNumberTypeFloat16, kNumberTypeFloat32, kNumberTypeFloat64, kNumberTypeBool}; + auto infer_type = input_args[0]->BuildType(); + CheckAndConvertUtils::CheckTensorTypeValid("x type", infer_type, valid_types, prim->name()); + MS_EXCEPTION_IF_NULL(infer_type); + auto tensor_type = infer_type->cast(); + MS_EXCEPTION_IF_NULL(tensor_type); + auto data_type = tensor_type->element(); + MS_EXCEPTION_IF_NULL(data_type); + return data_type; +} +} // namespace + +void ReverseV2::Init(const std::vector &axis) { this->set_axis(axis); } +void ReverseV2::set_axis(const std::vector &axis) { this->AddAttr(kAxis, MakeValue(axis)); } +std::vector ReverseV2::get_axis() const { + auto value_ptr = GetAttr(kAxis); + return GetValue>(value_ptr); +} + +AbstractBasePtr ReverseV2Infer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + return std::make_shared(InferType(primitive, input_args), + InferShape(primitive, input_args)->shape()); +} +REGISTER_PRIMITIVE_EVAL_IMPL(ReverseV2, prim::kPrimReverseV2, ReverseV2Infer); +REGISTER_PRIMITIVE_C(kNameReverseV2, ReverseV2); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/reverse_v2.h b/mindspore/core/ops/reverse_v2.h new file mode 100644 index 00000000000..6df7990ddb1 --- /dev/null +++ b/mindspore/core/ops/reverse_v2.h @@ -0,0 +1,46 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_REVERSE_V2_H_ +#define MINDSPORE_CORE_OPS_REVERSE_V2_H_ +#include +#include +#include +#include +#include "ops/primitive_c.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameReverseV2 = "ReverseV2"; +class ReverseV2 : public PrimitiveC { + public: + ReverseV2() : PrimitiveC(kNameReverseV2) {} + ~ReverseV2() = default; + MS_DECLARE_PARENT(ReverseV2, PrimitiveC); + void Init(const std::vector &axis); + void set_axis(const std::vector &axis); + std::vector get_axis() const; +}; + +AbstractBasePtr ReverseV2Infer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); +using PrimReverseV2Ptr = std::shared_ptr; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_REVERSE_V2_H_ diff --git a/mindspore/core/ops/rfft.cc b/mindspore/core/ops/rfft.cc new file mode 100644 index 00000000000..ced92a7a098 --- /dev/null +++ b/mindspore/core/ops/rfft.cc @@ -0,0 +1,63 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops/rfft.h" +#include "ops/op_utils.h" +#include "utils/check_convert_utils.h" +#include "abstract/primitive_infer_map.h" + +namespace mindspore { +namespace ops { +namespace { +abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(primitive); + auto rfft_prim = primitive->cast(); + MS_EXCEPTION_IF_NULL(rfft_prim); + auto prim_name = rfft_prim->name(); + auto first_input_shape = + CheckAndConvertUtils::ConvertShapePtrToShape("first_input_shape", input_args[0]->BuildShape(), prim_name); + auto out_shape = first_input_shape; + out_shape[out_shape.size() - 1] = rfft_prim->get_fft_length() / 2 + 1; + out_shape.push_back(2); + return std::make_shared(out_shape); +} + +TypePtr InferType(const PrimitivePtr &prim, const std::vector &input_args) { + for (const auto &item : input_args) { + MS_EXCEPTION_IF_NULL(item); + } + return TypeIdToType(kNumberTypeComplex64); +} +} // namespace + +void Rfft::Init(const int64_t fft_length) { this->set_fft_length(fft_length); } + +void Rfft::set_fft_length(const int64_t fft_length) { this->AddAttr(kFftLength, MakeValue(fft_length)); } + +int64_t Rfft::get_fft_length() const { + auto value_ptr = this->GetAttr(kFftLength); + return GetValue(value_ptr); +} + +AbstractBasePtr RfftInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + return std::make_shared(InferType(primitive, input_args), + InferShape(primitive, input_args)->shape()); +} +REGISTER_PRIMITIVE_EVAL_IMPL(Rfft, prim::kPrimRfft, RfftInfer); +REGISTER_PRIMITIVE_C(kNameRfft, Rfft); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/rfft.h b/mindspore/core/ops/rfft.h new file mode 100644 index 00000000000..1edf6b4fba1 --- /dev/null +++ b/mindspore/core/ops/rfft.h @@ -0,0 +1,43 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_CORE_OPS_RFFT_H_ +#define MINDSPORE_CORE_OPS_RFFT_H_ +#include +#include + +#include "ops/primitive_c.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameRfft = "Rfft"; +class Rfft : public PrimitiveC { + public: + Rfft() : PrimitiveC(kNameRfft) {} + ~Rfft() = default; + MS_DECLARE_PARENT(Rfft, PrimitiveC); + void Init(const int64_t fft_length); + void set_fft_length(const int64_t fft_length); + int64_t get_fft_length() const; +}; +AbstractBasePtr RfftInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); +using PrimRfftPtr = std::shared_ptr; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_RFFT_H_ diff --git a/mindspore/core/ops/roi_pooling.cc b/mindspore/core/ops/roi_pooling.cc new file mode 100644 index 00000000000..d0977d09e03 --- /dev/null +++ b/mindspore/core/ops/roi_pooling.cc @@ -0,0 +1,84 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "ops/roi_pooling.h" +#include +#include +#include +#include +#include +#include "ops/op_utils.h" +#include "utils/check_convert_utils.h" +#include "abstract/primitive_infer_map.h" + +namespace mindspore { +namespace ops { +void ROIPooling::set_pooled_h(const int64_t pooled_h) { this->AddAttr(kPooledH, MakeValue(pooled_h)); } + +int64_t ROIPooling::get_pooled_h() const { + auto value_ptr = GetAttr(kPooledH); + return GetValue(value_ptr); +} + +void ROIPooling::set_pooled_w(const int64_t pooled_w) { this->AddAttr(kPooledW, MakeValue(pooled_w)); } + +int64_t ROIPooling::get_pooled_w() const { + auto value_ptr = GetAttr(kPooledW); + return GetValue(value_ptr); +} + +void ROIPooling::set_scale(const float scale) { this->AddAttr(kScale, MakeValue(scale)); } + +float ROIPooling::get_scale() const { + auto value_ptr = GetAttr(kScale); + return GetValue(value_ptr); +} + +void ROIPooling::Init(const int64_t pooled_h, const int64_t pooled_w, const float scale) { + this->set_pooled_h(pooled_h); + this->set_pooled_w(pooled_w); + this->set_scale(scale); +} +AbstractBasePtr ROIPoolingInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(primitive); + auto roi_prim = primitive->cast(); + MS_EXCEPTION_IF_NULL(roi_prim); + auto prim_name = roi_prim->name(); + CheckAndConvertUtils::CheckInteger("roi_pooling_infer", input_args.size(), kEqual, 2, prim_name); + MS_EXCEPTION_IF_NULL(input_args[0]); + MS_EXCEPTION_IF_NULL(input_args[1]); + + // Infer type + auto output_data_type = input_args[0]->BuildType()->cast()->element(); + + // Infer shape + auto new_h = roi_prim->get_pooled_h(); + auto new_w = roi_prim->get_pooled_w(); + auto input_shape = + CheckAndConvertUtils::ConvertShapePtrToShape("input_shape", input_args[0]->BuildShape(), prim_name); + auto roi_shape = CheckAndConvertUtils::ConvertShapePtrToShape("roi_shape", input_args[1]->BuildShape(), prim_name); + std::vector output_shape; + output_shape.push_back(roi_shape[0]); + output_shape.push_back(new_h); + output_shape.push_back(new_w); + output_shape.push_back(input_shape[1]); + + return std::make_shared(output_data_type, std::make_shared(output_shape)); +} +REGISTER_PRIMITIVE_EVAL_IMPL(ROIPooling, prim::kPrimROIPooling, ROIPoolingInfer); +REGISTER_PRIMITIVE_C(kNameROIPooling, ROIPooling); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/roi_pooling.h b/mindspore/core/ops/roi_pooling.h new file mode 100644 index 00000000000..3fe61323df1 --- /dev/null +++ b/mindspore/core/ops/roi_pooling.h @@ -0,0 +1,49 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_ROI_POOLING_H_ +#define MINDSPORE_CORE_OPS_ROI_POOLING_H_ +#include +#include +#include +#include +#include "ops/primitive_c.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameROIPooling = "ROIPooling"; +class ROIPooling : public PrimitiveC { + public: + ROIPooling() : PrimitiveC(kNameROIPooling) {} + ~ROIPooling() = default; + MS_DECLARE_PARENT(ROIPooling, PrimitiveC); + void Init(const int64_t pooled_h, const int64_t pooled_w, const float scale); + void set_pooled_h(const int64_t pooled_h); + void set_pooled_w(const int64_t pooled_w); + void set_scale(const float scale); + int64_t get_pooled_h() const; + int64_t get_pooled_w() const; + float get_scale() const; +}; +AbstractBasePtr ROIPoolingInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); +using PrimROIPoolingPtr = std::shared_ptr; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_ROI_POOLING_H_ diff --git a/mindspore/core/ops/round.cc b/mindspore/core/ops/round.cc new file mode 100644 index 00000000000..43c0bdb2763 --- /dev/null +++ b/mindspore/core/ops/round.cc @@ -0,0 +1,50 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops/round.h" +#include "ops/op_utils.h" +#include "utils/check_convert_utils.h" +#include "abstract/primitive_infer_map.h" + +namespace mindspore { +namespace ops { +namespace { +abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector &input_args) { + auto x_shape = CheckAndConvertUtils::ConvertShapePtrToShape("x_shape", input_args[0]->BuildShape(), "round"); + return std::make_shared(x_shape); +} + +TypePtr InferType(const PrimitivePtr &prim, const std::vector &input_args) { + auto infer_type = input_args[0]->BuildType(); + CheckAndConvertUtils::CheckTensorTypeValid("x", infer_type, common_valid_types, prim->name()); + MS_EXCEPTION_IF_NULL(infer_type); + auto tensor_type = infer_type->cast(); + MS_EXCEPTION_IF_NULL(tensor_type); + auto data_type = tensor_type->element(); + MS_EXCEPTION_IF_NULL(data_type); + return data_type; +} +} // namespace + +AbstractBasePtr RoundInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + return std::make_shared(InferType(primitive, input_args), + InferShape(primitive, input_args)->shape()); +} +REGISTER_PRIMITIVE_EVAL_IMPL(Round, prim::kPrimRound, RoundInfer); +REGISTER_PRIMITIVE_C(kNameRound, Round); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/round.h b/mindspore/core/ops/round.h new file mode 100644 index 00000000000..bec87da0731 --- /dev/null +++ b/mindspore/core/ops/round.h @@ -0,0 +1,43 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_ROUND_H_ +#define MINDSPORE_CORE_OPS_ROUND_H_ +#include +#include + +#include "ops/primitive_c.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameRound = "Round"; +class Round : public PrimitiveC { + public: + Round() : PrimitiveC(kNameRound) { InitIOName({"input_x"}, {"output"}); } + ~Round() = default; + MS_DECLARE_PARENT(Round, PrimitiveC); + void Init() {} +}; + +AbstractBasePtr RoundInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); +using PrimRoundPtr = std::shared_ptr; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_ROUND_H_ diff --git a/mindspore/core/ops/rsqrt.cc b/mindspore/core/ops/rsqrt.cc new file mode 100644 index 00000000000..0dfb271ef02 --- /dev/null +++ b/mindspore/core/ops/rsqrt.cc @@ -0,0 +1,59 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops/rsqrt.h" +#include +#include +#include +#include +#include +#include "ops/op_utils.h" +#include "utils/check_convert_utils.h" +#include "abstract/primitive_infer_map.h" + +namespace mindspore { +namespace ops { +namespace { +abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(primitive); + auto rsqrt_prim = primitive->cast(); + MS_EXCEPTION_IF_NULL(rsqrt_prim); + auto prim_name = rsqrt_prim->name(); + auto in_shape = CheckAndConvertUtils::ConvertShapePtrToShape("in_shape", input_args[0]->GetShapeTrack(), prim_name); + CheckAndConvertUtils::CheckInteger("input shape", in_shape.size(), kEqual, 1, prim_name); + return std::make_shared(in_shape); +} + +TypePtr InferType(const PrimitivePtr &prim, const std::vector &input_args) { + if (std::any_of(input_args.begin(), input_args.end(), [](AbstractBasePtr a) { return a == nullptr; })) { + MS_LOG(EXCEPTION) << "nullptr"; + } + std::map types; + types.emplace("x", input_args[0]->BuildType()); + auto infer_type = CheckAndConvertUtils::CheckTensorTypeSame(types, common_valid_types, prim->name()); + return TypeIdToType(infer_type); +} +} // namespace + +AbstractBasePtr RsqrtInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + return std::make_shared(InferType(primitive, input_args), + InferShape(primitive, input_args)->shape()); +} +REGISTER_PRIMITIVE_EVAL_IMPL(Rsqrt, prim::kPrimRsqrt, RsqrtInfer); +REGISTER_PRIMITIVE_C(kNameRsqrt, Rsqrt); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/rsqrt.h b/mindspore/core/ops/rsqrt.h new file mode 100644 index 00000000000..96b7d3c02be --- /dev/null +++ b/mindspore/core/ops/rsqrt.h @@ -0,0 +1,43 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_RSQRT_H_ +#define MINDSPORE_CORE_OPS_RSQRT_H_ +#include +#include +#include +#include +#include "ops/primitive_c.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameRsqrt = "Rsqrt"; +class Rsqrt : public PrimitiveC { + public: + Rsqrt() : PrimitiveC(kNameRsqrt) { InitIOName({"x"}, {"output"}); } + ~Rsqrt() = default; + MS_DECLARE_PARENT(Rsqrt, PrimitiveC); + void Init() {} +}; +AbstractBasePtr RsqrtInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); +using PrimRsqrtPtr = std::shared_ptr; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_RSQRT_H_ diff --git a/mindspore/core/ops/scale.cc b/mindspore/core/ops/scale.cc new file mode 100644 index 00000000000..9ccbb7f4803 --- /dev/null +++ b/mindspore/core/ops/scale.cc @@ -0,0 +1,28 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops/scale.h" + +namespace mindspore { +namespace ops { +void Scale::Init(const int64_t axis) { set_axis(axis); } + +void Scale::set_axis(const int64_t axis) { this->AddAttr(kAxis, MakeValue(axis)); } +int64_t Scale::get_axis() const { return GetValue(GetAttr(kAxis)); } + +REGISTER_PRIMITIVE_C(kNameScale, Scale); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/scale.h b/mindspore/core/ops/scale.h new file mode 100644 index 00000000000..3b224f44dd9 --- /dev/null +++ b/mindspore/core/ops/scale.h @@ -0,0 +1,44 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_SCALE_H_ +#define MINDSPORE_CORE_OPS_SCALE_H_ +#include +#include +#include +#include +#include "ops/primitive_c.h" +#include "ops/op_utils.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameScale = "Scale"; +class Scale : public PrimitiveC { + public: + Scale() : PrimitiveC(kNameScale) {} + explicit Scale(const std::string k_name) : PrimitiveC(k_name) {} + ~Scale() = default; + MS_DECLARE_PARENT(Scale, PrimitiveC); + void Init(const int64_t axis = -1); + void set_axis(const int64_t axis); + int64_t get_axis() const; +}; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_SCALE_H_ diff --git a/mindspore/core/ops/scatter_nd.cc b/mindspore/core/ops/scatter_nd.cc new file mode 100644 index 00000000000..d5056143890 --- /dev/null +++ b/mindspore/core/ops/scatter_nd.cc @@ -0,0 +1,63 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include "ops/scatter_nd.h" +#include "ops/op_utils.h" +#include "utils/check_convert_utils.h" +#include "abstract/primitive_infer_map.h" + +namespace mindspore { +namespace ops { +namespace { +abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector &input_args) { + auto shape_value = input_args[2]->BuildValue(); + auto shape_value_element = GetValue>(shape_value); + for (const auto &shape : shape_value_element) { + CheckAndConvertUtils::CheckInteger("shape value", shape, kGreaterThan, 0, "ScatterNd"); + } + auto indices_shape = + CheckAndConvertUtils::ConvertShapePtrToShape("indices_shape", input_args[0]->BuildShape(), "ScatterNd"); + auto update_shape = + CheckAndConvertUtils::ConvertShapePtrToShape("update_shape", input_args[1]->BuildShape(), "ScatterNd"); + CheckAndConvertUtils::CheckInteger("indices_shape[0] and update_shape[0]", indices_shape[0], kEqual, update_shape[0], + "ScatterNd"); + return std::make_shared(shape_value_element); +} + +TypePtr InferType(const PrimitivePtr &prim, const std::vector &input_args) { + for (const auto &item : input_args) { + MS_EXCEPTION_IF_NULL(item); + } + const std::set indices_valid_types = {kNumberTypeInt32, kNumberTypeInt64}; + const std::set update_valid_types = {TypeIdToType(kObjectTypeTensorType)}; + auto indices_type = input_args[0]->BuildType(); + auto update_type = input_args[1]->BuildType(); + CheckAndConvertUtils::CheckSubClass("update type", update_type, update_valid_types, prim->name()); + CheckAndConvertUtils::CheckTensorTypeValid("indices type", indices_type, indices_valid_types, prim->name()); + return input_args[1]->BuildType(); +} +} // namespace + +AbstractBasePtr ScatterNdInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + return std::make_shared(InferType(primitive, input_args), + InferShape(primitive, input_args)->shape()); +} +REGISTER_PRIMITIVE_EVAL_IMPL(ScatterNd, prim::kPrimScatterNd, ScatterNdInfer); +REGISTER_PRIMITIVE_C(kNameScatterNd, ScatterNd); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/scatter_nd.h b/mindspore/core/ops/scatter_nd.h new file mode 100644 index 00000000000..dfa678bfd56 --- /dev/null +++ b/mindspore/core/ops/scatter_nd.h @@ -0,0 +1,42 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_SCATTER_ND_H_ +#define MINDSPORE_CORE_OPS_SCATTER_ND_H_ +#include +#include + +#include "ops/primitive_c.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameScatterNd = "ScatterNd"; +class ScatterNd : public PrimitiveC { + public: + ScatterNd() : PrimitiveC(kNameScatterNd) { InitIOName({"indices", "update", "shape"}, {"output"}); } + ~ScatterNd() = default; + MS_DECLARE_PARENT(ScatterNd, PrimitiveC); + void Init() {} +}; +AbstractBasePtr ScatterNdInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); +using PrimScatterNdPtr = std::shared_ptr; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_SCATTER_ND_H_ diff --git a/mindspore/core/c_ops/atan.cc b/mindspore/core/ops/select.cc similarity index 86% rename from mindspore/core/c_ops/atan.cc rename to mindspore/core/ops/select.cc index 97f56784d22..492a2301e41 100644 --- a/mindspore/core/c_ops/atan.cc +++ b/mindspore/core/ops/select.cc @@ -14,8 +14,10 @@ * limitations under the License. */ -#include "c_ops/atan.h" +#include "ops/select.h" namespace mindspore { -REGISTER_PRIMITIVE_C(kNameAtan, Atan); +namespace ops { +REGISTER_PRIMITIVE_C(kNameSelect, Select); +} // namespace ops } // namespace mindspore diff --git a/mindspore/core/ops/select.h b/mindspore/core/ops/select.h new file mode 100644 index 00000000000..79a3b1b38df --- /dev/null +++ b/mindspore/core/ops/select.h @@ -0,0 +1,44 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_SELECT_H_ +#define MINDSPORE_CORE_OPS_SELECT_H_ + +#include +#include +#include +#include +#include +#include "ops/op_utils.h" +#include "ops/primitive_c.h" +#include "abstract/primitive_infer_map.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameSelect = "Select"; +class Select : public PrimitiveC { + public: + Select() : PrimitiveC(kNameSelect) { InitIOName({"condition", "x", "y"}, {"output"}); } + ~Select() = default; + MS_DECLARE_PARENT(Select, PrimitiveC); + void Init() {} +}; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_SELECT_H_ diff --git a/mindspore/core/ops/sgd.cc b/mindspore/core/ops/sgd.cc new file mode 100644 index 00000000000..6cc7ebfc38a --- /dev/null +++ b/mindspore/core/ops/sgd.cc @@ -0,0 +1,52 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops/sgd.h" + +namespace mindspore { +namespace ops { +void SGD::Init(const float dampening, const float weight_decay, const bool nesterov) { + set_nesterov(nesterov); + set_dampening(dampening); + set_weight_decay(weight_decay); +} + +void SGD::set_dampening(const float dampening) { + if (get_nesterov()) CheckAndConvertUtils::CheckValue(kDampening, dampening, kEqual, 0.0, name()); + AddAttr(kDampening, MakeValue(dampening)); +} + +void SGD::set_weight_decay(const float weight_decay) { AddAttr(kWeightDecay, MakeValue(weight_decay)); } + +void SGD::set_nesterov(const bool nesterov) { AddAttr(kNesterov, MakeValue(nesterov)); } + +float SGD::get_dampening() const { + auto value_ptr = GetAttr(kDampening); + return GetValue(value_ptr); +} + +float SGD::get_weight_decay() const { + auto value_ptr = GetAttr(kWeightDecay); + return GetValue(value_ptr); +} + +bool SGD::get_nesterov() const { + auto value_ptr = GetAttr(kNesterov); + return GetValue(value_ptr); +} +REGISTER_PRIMITIVE_C(kNameSGD, SGD); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/sgd.h b/mindspore/core/ops/sgd.h new file mode 100644 index 00000000000..20025232055 --- /dev/null +++ b/mindspore/core/ops/sgd.h @@ -0,0 +1,46 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_SGD_H_ +#define MINDSPORE_CORE_OPS_SGD_H_ +#include +#include +#include "ops/primitive_c.h" +#include "ops/op_utils.h" +#include "abstract/abstract_value.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameSGD = "SGD"; +class SGD : public PrimitiveC { + public: + SGD() : PrimitiveC(kNameSGD) {} + ~SGD() = default; + MS_DECLARE_PARENT(SGD, PrimitiveC); + void Init(const float dampening = 0.0, const float weight_decay = 0.0, const bool nesterov = false); + void set_dampening(const float dampening); + void set_weight_decay(const float weight_decay); + void set_nesterov(const bool nesterov); + float get_dampening() const; + float get_weight_decay() const; + bool get_nesterov() const; +}; +AbstractBasePtr SGDInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); +using PrimSGD = std::shared_ptr; +} // namespace ops +} // namespace mindspore +#endif // MINDSPORE_CORE_OPS_SGD_H_ diff --git a/mindspore/core/ops/shape.cc b/mindspore/core/ops/shape.cc new file mode 100644 index 00000000000..507c6e3179c --- /dev/null +++ b/mindspore/core/ops/shape.cc @@ -0,0 +1,44 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops/shape.h" +#include +#include +#include +#include +#include +#include "ops/op_utils.h" +#include "utils/check_convert_utils.h" +#include "abstract/primitive_infer_map.h" + +namespace mindspore { +namespace ops { +AbstractBasePtr ShapeInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + // infer shape + MS_EXCEPTION_IF_NULL(primitive); + auto shape_prim = primitive->cast(); + MS_EXCEPTION_IF_NULL(shape_prim); + auto op_name = shape_prim->name(); + auto in_shape = CheckAndConvertUtils::ConvertShapePtrToShape("x_shape", input_args[0]->BuildShape(), op_name); + // infer type + auto x_type = input_args[0]->BuildType()->cast()->element(); + return std::make_shared(x_type, in_shape); +} +REGISTER_PRIMITIVE_EVAL_IMPL(Shape, prim::kPrimShape, ShapeInfer); +REGISTER_PRIMITIVE_C(kNameShape, Shape); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/c_ops/relu6.h b/mindspore/core/ops/shape.h similarity index 67% rename from mindspore/core/c_ops/relu6.h rename to mindspore/core/ops/shape.h index 8034e76b473..8fdfea9d9a2 100644 --- a/mindspore/core/c_ops/relu6.h +++ b/mindspore/core/ops/shape.h @@ -13,28 +13,32 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -#ifndef MINDSPORE_CORE_C_OPS_RELU6_H_ -#define MINDSPORE_CORE_C_OPS_RELU6_H_ + +#ifndef MINDSPORE_CORE_OPS_SHAPE_H_ +#define MINDSPORE_CORE_OPS_SHAPE_H_ #include #include #include #include -#include "c_ops/primitive_c.h" +#include "ops/primitive_c.h" #include "abstract/abstract_value.h" #include "utils/check_convert_utils.h" namespace mindspore { -constexpr auto kNameRelu6 = "Relu6"; -class Relu6 : public PrimitiveC { +namespace ops { +constexpr auto kNameShape = "Shape"; +class Shape : public PrimitiveC { public: - Relu6() : PrimitiveC(kNameRelu6) { InitIOName({"x"}, {"output"}); } - ~Relu6() = default; - MS_DECLARE_PARENT(Relu6, PrimitiveC); + Shape() : PrimitiveC(kNameShape) {} + ~Shape() = default; + MS_DECLARE_PARENT(Shape, PrimitiveC); void Init() {} }; -AbstractBasePtr Relu6Infer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, +AbstractBasePtr ShapeInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, const std::vector &input_args); -using PrimRelu6Ptr = std::shared_ptr; +using PrimShapePtr = std::shared_ptr; +} // namespace ops } // namespace mindspore -#endif // MINDSPORE_CORE_C_OPS_RELU6_H_ + +#endif // MINDSPORE_CORE_OPS_SHAPE_H_ diff --git a/mindspore/core/c_ops/flatten.cc b/mindspore/core/ops/sigmoid.cc similarity index 82% rename from mindspore/core/c_ops/flatten.cc rename to mindspore/core/ops/sigmoid.cc index 2556b9feaec..4ee2db94ddf 100644 --- a/mindspore/core/c_ops/flatten.cc +++ b/mindspore/core/ops/sigmoid.cc @@ -14,9 +14,12 @@ * limitations under the License. */ -#include "c_ops/flatten.h" +#include "ops/sigmoid.h" #include "utils/check_convert_utils.h" +#include "abstract/primitive_infer_map.h" namespace mindspore { -REGISTER_PRIMITIVE_C(kNameFlatten, Flatten); +namespace ops { +REGISTER_PRIMITIVE_C(kNameSigmoid, Sigmoid); +} // namespace ops } // namespace mindspore diff --git a/mindspore/core/ops/sigmoid.h b/mindspore/core/ops/sigmoid.h new file mode 100644 index 00000000000..b67a7dc3653 --- /dev/null +++ b/mindspore/core/ops/sigmoid.h @@ -0,0 +1,36 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_SIGMOID_H_ +#define MINDSPORE_CORE_OPS_SIGMOID_H_ +#include "ops/primitive_c.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameSigmoid = "Sigmoid"; +class Sigmoid : public PrimitiveC { + public: + Sigmoid() : PrimitiveC(kNameSigmoid) { InitIOName({"x"}, {"output"}); } + ~Sigmoid() = default; + MS_DECLARE_PARENT(Sigmoid, PrimitiveC); + void Init() {} +}; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_SIGMOID_H_ diff --git a/mindspore/core/ops/sigmoid_cross_entropy_with_logits.cc b/mindspore/core/ops/sigmoid_cross_entropy_with_logits.cc new file mode 100644 index 00000000000..112b22b0463 --- /dev/null +++ b/mindspore/core/ops/sigmoid_cross_entropy_with_logits.cc @@ -0,0 +1,61 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops/sigmoid_cross_entropy_with_logits.h" +#include +#include +#include +#include +#include +#include "ops/op_utils.h" +#include "utils/check_convert_utils.h" +#include "abstract/primitive_infer_map.h" + +namespace mindspore { +namespace ops { +AbstractBasePtr SigmoidCrossEntropyWithLogitsInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(primitive); + auto sigmoid_prim = primitive->cast(); + MS_EXCEPTION_IF_NULL(sigmoid_prim); + auto prim_name = sigmoid_prim->name(); + CheckAndConvertUtils::CheckInteger("sigmoid_cross_extropy_with_logits_infer", input_args.size(), kEqual, 2, + prim_name); + + // Infer shape + auto x_shape = CheckAndConvertUtils::ConvertShapePtrToShape("x_shape", input_args[0]->BuildShape(), prim_name); + auto y_shape = CheckAndConvertUtils::ConvertShapePtrToShape("y_shape", input_args[1]->BuildShape(), prim_name); + CheckAndConvertUtils::Check("x_shape", x_shape, kEqual, "y_shape", y_shape, prim_name, TypeError); + + // Infer type + auto x_type = input_args[0]->BuildType()->cast()->element(); + const std::set valid_types = { + kNumberTypeBool, kNumberTypeInt, kNumberTypeInt8, kNumberTypeInt16, + kNumberTypeInt32, kNumberTypeInt64, kNumberTypeUInt, kNumberTypeUInt8, + kNumberTypeUInt16, kNumberTypeUInt32, kNumberTypeUInt64, kNumberTypeFloat, + kNumberTypeFloat16, kNumberTypeFloat32, kNumberTypeFloat64, kNumberTypeComplex64}; + std::map args; + args.emplace("x_type", input_args[0]->BuildType()); + args.emplace("y_type", input_args[1]->BuildType()); + CheckAndConvertUtils::CheckTensorTypeSame(args, valid_types, prim_name); + + return std::make_shared(x_type, x_shape); +} +REGISTER_PRIMITIVE_EVAL_IMPL(SigmoidCrossEntropyWithLogits, prim::kPrimSigmoidCrossEntropyWithLogits, + SigmoidCrossEntropyWithLogitsInfer); +REGISTER_PRIMITIVE_C(kNameSigmoidCrossEntropyWithLogits, SigmoidCrossEntropyWithLogits); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/sigmoid_cross_entropy_with_logits.h b/mindspore/core/ops/sigmoid_cross_entropy_with_logits.h new file mode 100644 index 00000000000..d34987cbe9b --- /dev/null +++ b/mindspore/core/ops/sigmoid_cross_entropy_with_logits.h @@ -0,0 +1,45 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_SIGMOID_CROSS_ENTROPY_WITH_LOGITS_H_ +#define MINDSPORE_CORE_OPS_SIGMOID_CROSS_ENTROPY_WITH_LOGITS_H_ +#include +#include +#include +#include +#include "ops/primitive_c.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameSigmoidCrossEntropyWithLogits = "SigmoidCrossEntropyWithLogits"; +class SigmoidCrossEntropyWithLogits : public PrimitiveC { + public: + SigmoidCrossEntropyWithLogits() : PrimitiveC(kNameSigmoidCrossEntropyWithLogits) { + InitIOName({"predict", "target"}, {"loss"}); + } + ~SigmoidCrossEntropyWithLogits() = default; + MS_DECLARE_PARENT(SigmoidCrossEntropyWithLogits, PrimitiveC); + void Init() {} +}; +AbstractBasePtr SigmoidCrossEntropyWithLogitsInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); +using PrimSigmoidCrossEntropyWithLogitsPtr = std::shared_ptr; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_SIGMOID_CROSS_ENTROPY_WITH_LOGITS_H_ diff --git a/mindspore/core/ops/sin.cc b/mindspore/core/ops/sin.cc new file mode 100644 index 00000000000..ef4e48447b2 --- /dev/null +++ b/mindspore/core/ops/sin.cc @@ -0,0 +1,56 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops/sin.h" +#include +#include +#include +#include +#include +#include "ops/op_utils.h" +#include "utils/check_convert_utils.h" +#include "abstract/primitive_infer_map.h" + +namespace mindspore { +namespace ops { +namespace { +abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector &input_args) { + for (const auto &item : input_args) { + MS_EXCEPTION_IF_NULL(item); + } + auto x_shape = CheckAndConvertUtils::ConvertShapePtrToShape("x_shape", input_args[0]->BuildShape(), "Sin"); + return std::make_shared(x_shape); +} + +TypePtr InferType(const PrimitivePtr &prim, const std::vector &input_args) { + for (const auto &item : input_args) { + MS_EXCEPTION_IF_NULL(item); + } + auto infer_type = input_args[0]->BuildType()->cast()->element(); + CheckAndConvertUtils::CheckTensorTypeValid("x type", input_args[0]->BuildType(), common_valid_types, prim->name()); + return infer_type; +} +} // namespace + +AbstractBasePtr SinInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + return std::make_shared(InferType(primitive, input_args), + InferShape(primitive, input_args)->shape()); +} +REGISTER_PRIMITIVE_EVAL_IMPL(Sin, prim::kPrimSin, SinInfer); +REGISTER_PRIMITIVE_C(kNameSin, Sin); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/sin.h b/mindspore/core/ops/sin.h new file mode 100644 index 00000000000..a6e050ae5f4 --- /dev/null +++ b/mindspore/core/ops/sin.h @@ -0,0 +1,43 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_SIN_H_ +#define MINDSPORE_CORE_OPS_SIN_H_ +#include +#include +#include +#include +#include "ops/primitive_c.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameSin = "Sin"; +class Sin : public PrimitiveC { + public: + Sin() : PrimitiveC(kNameSin) {} + ~Sin() = default; + MS_DECLARE_PARENT(Sin, PrimitiveC); + void Init() {} +}; +AbstractBasePtr SinInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); +using PrimSinPtr = std::shared_ptr; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_SIN_H_ diff --git a/mindspore/core/ops/skip_gram.cc b/mindspore/core/ops/skip_gram.cc new file mode 100644 index 00000000000..af2b17585a5 --- /dev/null +++ b/mindspore/core/ops/skip_gram.cc @@ -0,0 +1,77 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops/skip_gram.h" +#include "utils/check_convert_utils.h" +#include "abstract/primitive_infer_map.h" + +namespace mindspore { +namespace ops { +namespace { +abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(primitive); + auto SkipGram_prim = primitive->cast(); + MS_EXCEPTION_IF_NULL(SkipGram_prim); + auto prim_name = SkipGram_prim->name(); + if (input_args.size() != 1) { + MS_LOG(ERROR) << "Skip Gram should have one input"; + } + auto infer_value = input_args[0]->BuildValue(); + if (infer_value == nullptr) { + MS_LOG(INFO) << "Do infer shape in runtime."; + } + auto in_shape = CheckAndConvertUtils::ConvertShapePtrToShape("in_shape", input_args[0]->BuildShape(), prim_name); + return std::make_shared(in_shape); +} + +TypePtr InferType(const PrimitivePtr &prim, const std::vector &input_args) { + auto infer_type = input_args[0]->BuildType(); + return infer_type; +} +} // namespace + +void SkipGram::set_include_all_grams(const bool include_all_grams) { + AddAttr(kIncludeALLGrams, MakeValue(include_all_grams)); +} +bool SkipGram::get_include_all_grams() const { + auto value_ptr = this->GetAttr(kIncludeALLGrams); + return GetValue(value_ptr); +} +void SkipGram::set_max_skip_size(const int64_t max_skip_size) { AddAttr(kMaxSkipSize, MakeValue(max_skip_size)); } +int64_t SkipGram::get_max_skip_size() const { + auto value_ptr = this->GetAttr(kMaxSkipSize); + return GetValue(value_ptr); +} +void SkipGram::set_ngram_size(const int64_t ngram_size) { AddAttr(kNgramSize, MakeValue(ngram_size)); } +int64_t SkipGram::get_ngram_size() const { + auto value_ptr = this->GetAttr(kNgramSize); + return GetValue(value_ptr); +} +void SkipGram::Init(const bool include_all_grams, const int64_t max_skip_size, const int64_t ngram_size) { + this->set_include_all_grams(include_all_grams); + this->set_max_skip_size(max_skip_size); + this->set_ngram_size(ngram_size); +} + +AbstractBasePtr SkipGramInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + return std::make_shared(InferType(primitive, input_args), + InferShape(primitive, input_args)->shape()); +} +REGISTER_PRIMITIVE_EVAL_IMPL(SkipGram, prim::kPrimSkipGram, SkipGramInfer); +REGISTER_PRIMITIVE_C(kNameSkipGram, SkipGram); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/skip_gram.h b/mindspore/core/ops/skip_gram.h new file mode 100644 index 00000000000..8fdbb64514b --- /dev/null +++ b/mindspore/core/ops/skip_gram.h @@ -0,0 +1,53 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_SKIP_GRAM_H_ +#define MINDSPORE_CORE_OPS_SKIP_GRAM_H_ + +#include +#include +#include +#include +#include +#include "ops/op_utils.h" +#include "ops/primitive_c.h" +#include "abstract/primitive_infer_map.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameSkipGram = "SkipGram"; +class SkipGram : public PrimitiveC { + public: + SkipGram() : PrimitiveC(kNameSkipGram) {} + ~SkipGram() = default; + MS_DECLARE_PARENT(SkipGram, PrimitiveC); + void Init(const bool include_all_grams, const int64_t max_skip_size, const int64_t ngram_size); + void set_include_all_grams(const bool include_all_grams); + bool get_include_all_grams() const; + void set_max_skip_size(const int64_t max_skip_size); + int64_t get_max_skip_size() const; + void set_ngram_size(const int64_t ngram_size); + int64_t get_ngram_size() const; +}; +AbstractBasePtr SkipGramInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); +using PrimSkipGramPtr = std::shared_ptr; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_SKIP_GRAM_H_ diff --git a/mindspore/core/ops/slice.cc b/mindspore/core/ops/slice.cc new file mode 100644 index 00000000000..fd1efa584b3 --- /dev/null +++ b/mindspore/core/ops/slice.cc @@ -0,0 +1,31 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops/slice.h" +#include +#include +#include +#include +#include +#include "ops/op_utils.h" +#include "utils/check_convert_utils.h" +#include "abstract/primitive_infer_map.h" + +namespace mindspore { +namespace ops { +REGISTER_PRIMITIVE_C(kNameSlice, Slice); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/slice.h b/mindspore/core/ops/slice.h new file mode 100644 index 00000000000..da040f4c4e3 --- /dev/null +++ b/mindspore/core/ops/slice.h @@ -0,0 +1,40 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_SLICE_H_ +#define MINDSPORE_CORE_OPS_SLICE_H_ +#include +#include +#include +#include +#include "ops/primitive_c.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameSlice = "Slice"; +class Slice : public PrimitiveC { + public: + Slice() : PrimitiveC(kNameSlice) { InitIOName({"x", "begin", "size"}, {"output"}); } + ~Slice() = default; + MS_DECLARE_PARENT(Slice, PrimitiveC); + void Init() {} +}; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_SLICE_H_ diff --git a/mindspore/core/ops/smooth_l1_loss.cc b/mindspore/core/ops/smooth_l1_loss.cc new file mode 100644 index 00000000000..be4b0db82f6 --- /dev/null +++ b/mindspore/core/ops/smooth_l1_loss.cc @@ -0,0 +1,62 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include + +#include "ops/smooth_l1_loss.h" +#include "ops/op_utils.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +void SmoothL1Loss::Init(const float beta) { this->set_beta(beta); } +void SmoothL1Loss::set_beta(const float beta) { this->AddAttr(kBeta, MakeValue(beta)); } + +float SmoothL1Loss::get_beta() const { + auto value_ptr = this->GetAttr(kBeta); + return GetValue(value_ptr); +} + +AbstractBasePtr SmoothL1LossInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(primitive); + auto smooth_prim = primitive->cast(); + MS_EXCEPTION_IF_NULL(smooth_prim); + auto prim_name = smooth_prim->name(); + CheckAndConvertUtils::CheckInteger("smooth_l1_loss_infer", input_args.size(), kEqual, 2, prim_name); + + // Infer shape + auto prediction = CheckAndConvertUtils::ConvertShapePtrToShape("prediction", input_args[0]->BuildShape(), prim_name); + auto target = CheckAndConvertUtils::ConvertShapePtrToShape("target", input_args[0]->BuildShape(), prim_name); + CheckAndConvertUtils::Check("prediction shape", prediction, kEqual, "target shape", target, prim_name, TypeError); + + // Infer type + auto prediction_type = input_args[0]->BuildType()->cast()->element(); + const std::set valid_types = {kNumberTypeFloat16, kNumberTypeFloat32}; + std::map args; + args.emplace("scale", input_args[0]->BuildType()); + args.emplace("bias", input_args[1]->BuildType()); + CheckAndConvertUtils::CheckTensorTypeSame(args, valid_types, prim_name); + + return std::make_shared(prediction_type, prediction); +} +REGISTER_PRIMITIVE_EVAL_IMPL(SmoothL1Loss, prim::kPrimSmoothL1Loss, SmoothL1LossInfer); +REGISTER_PRIMITIVE_C(kNameSmoothL1Loss, SmoothL1Loss); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/smooth_l1_loss.h b/mindspore/core/ops/smooth_l1_loss.h new file mode 100644 index 00000000000..972a27aae14 --- /dev/null +++ b/mindspore/core/ops/smooth_l1_loss.h @@ -0,0 +1,43 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_SMOOTH_L1_LOSS_H_ +#define MINDSPORE_CORE_OPS_SMOOTH_L1_LOSS_H_ +#include +#include +#include "ops/primitive_c.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameSmoothL1Loss = "SmoothL1Loss"; +class SmoothL1Loss : public PrimitiveC { + public: + SmoothL1Loss() : PrimitiveC(kNameSmoothL1Loss) { InitIOName({"prediction", "target"}, {"output"}); } + ~SmoothL1Loss() = default; + MS_DECLARE_PARENT(SmoothL1Loss, PrimitiveC); + void Init(const float beta); + void set_beta(const float beta); + float get_beta() const; +}; +AbstractBasePtr SmoothL1LossInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); +using PrimSmoothL1LossPtr = std::shared_ptr; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_SMOOTH_L1_LOSS_H_ diff --git a/mindspore/core/c_ops/softmax.cc b/mindspore/core/ops/softmax.cc similarity index 73% rename from mindspore/core/c_ops/softmax.cc rename to mindspore/core/ops/softmax.cc index 159d9e1d5af..f48fa11337c 100644 --- a/mindspore/core/c_ops/softmax.cc +++ b/mindspore/core/ops/softmax.cc @@ -14,45 +14,46 @@ * limitations under the License. */ -#include "c_ops/softmax.h" +#include "ops/softmax.h" #include #include #include #include #include -#include "c_ops/op_utils.h" +#include "ops/op_utils.h" #include "utils/check_convert_utils.h" #include "abstract/primitive_infer_map.h" namespace mindspore { -void SoftMax::set_axis(const std::vector &axis) { this->AddAttr(kAxis, MakeValue(axis)); } +namespace ops { +void Softmax::set_axis(const std::vector &axis) { this->AddAttr(kAxis, MakeValue(axis)); } -std::vector SoftMax::get_axis() const { +std::vector Softmax::get_axis() const { auto value_ptr = GetAttr(kAxis); return GetValue>(value_ptr); } -void SoftMax::Init(int64_t axis) { +void Softmax::Init(const int64_t axis) { auto op_name = this->name(); std::vector axis_vec = {axis}; CheckAndConvertUtils::CheckInteger("axis_len", axis_vec.size(), kEqual, 1, op_name); - auto rank = axis_vec.size(); + auto rank = SizeToLong(axis_vec.size()); for (auto &item : axis_vec) { - CheckAndConvertUtils::CheckInRange("axis", item, kIncludeLeft, {-rank, rank}, op_name); + CheckAndConvertUtils::CheckInRange("axis", item, kIncludeLeft, {-rank, rank}, op_name); } this->set_axis(axis_vec); } abstract::ShapePtr SoftMaxInferShape(const PrimitivePtr &primitive, const std::vector &input_args) { MS_EXCEPTION_IF_NULL(primitive); - auto softmax_prim = primitive->cast(); - MS_EXCEPTION_IF_NULL(softmax_prim); - auto op_name = softmax_prim->name(); - auto axis = softmax_prim->get_axis(); + auto Softmax_prim = primitive->cast(); + MS_EXCEPTION_IF_NULL(Softmax_prim); + auto op_name = Softmax_prim->name(); + auto axis = Softmax_prim->get_axis(); auto in_shape = CheckAndConvertUtils::ConvertShapePtrToShape("input_shape", input_args[0]->GetShapeTrack(), op_name); - auto rank = in_shape.size(); + auto rank = SizeToLong(in_shape.size()); for (auto &item : axis) { - CheckAndConvertUtils::CheckInRange("axis", item, kIncludeLeft, {-rank, rank}, op_name); + CheckAndConvertUtils::CheckInRange("axis", item, kIncludeLeft, {-rank, rank}, op_name); } return std::make_shared(in_shape); } @@ -68,12 +69,13 @@ TypePtr SoftMaxInferType(const PrimitivePtr &prim, const std::vector &input_args) { return std::make_shared(SoftMaxInferType(primitive, input_args), SoftMaxInferShape(primitive, input_args)->shape()); } -REGISTER_PRIMITIVE_EVAL_IMPL(SoftMax, prim::kPrimSoftMax, SoftMaxInfer); -REGISTER_PRIMITIVE_C(kNameSoftMax, SoftMax); +REGISTER_PRIMITIVE_EVAL_IMPL(Softmax, prim::kPrimSoftmax, SoftmaxInfer); +REGISTER_PRIMITIVE_C(kNameSoftmax, Softmax); +} // namespace ops } // namespace mindspore diff --git a/mindspore/core/c_ops/softmax.h b/mindspore/core/ops/softmax.h similarity index 65% rename from mindspore/core/c_ops/softmax.h rename to mindspore/core/ops/softmax.h index f881e6b22c3..464ed7c572e 100644 --- a/mindspore/core/c_ops/softmax.h +++ b/mindspore/core/ops/softmax.h @@ -14,32 +14,34 @@ * limitations under the License. */ -#ifndef MINDSPORE_CORE_C_OPS_SOFTMAX_H_ -#define MINDSPORE_CORE_C_OPS_SOFTMAX_H_ +#ifndef MINDSPORE_CORE_OPS_SOFTMAX_H_ +#define MINDSPORE_CORE_OPS_SOFTMAX_H_ #include #include #include #include -#include "c_ops/primitive_c.h" +#include "ops/primitive_c.h" #include "abstract/abstract_value.h" #include "utils/check_convert_utils.h" namespace mindspore { -constexpr auto kNameSoftMax = "SoftMax"; -class SoftMax : public PrimitiveC { +namespace ops { +constexpr auto kNameSoftmax = "Softmax"; +class Softmax : public PrimitiveC { public: - SoftMax() : PrimitiveC(kNameSoftMax) { InitIOName({"x"}, {"output"}); } - ~SoftMax() = default; - MS_DECLARE_PARENT(SoftMax, PrimitiveC); - void Init(int64_t axis = -1); + Softmax() : PrimitiveC(kNameSoftmax) { InitIOName({"x"}, {"output"}); } + ~Softmax() = default; + MS_DECLARE_PARENT(Softmax, PrimitiveC); + void Init(const int64_t axis = -1); void set_axis(const std::vector &axis); std::vector get_axis() const; }; -AbstractBasePtr SoftMaxInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, +AbstractBasePtr SoftmaxInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, const std::vector &input_args); -using PrimSoftMaxPtr = std::shared_ptr; +using PrimSoftmaxPtr = std::shared_ptr; +} // namespace ops } // namespace mindspore -#endif // MINDSPORE_CORE_C_OPS_SOFTMAX_H_ +#endif // MINDSPORE_CORE_OPS_SOFTMAX_H_ diff --git a/mindspore/core/ops/softmax_cross_entropy_with_logits.cc b/mindspore/core/ops/softmax_cross_entropy_with_logits.cc new file mode 100644 index 00000000000..8597b1f137e --- /dev/null +++ b/mindspore/core/ops/softmax_cross_entropy_with_logits.cc @@ -0,0 +1,66 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops/softmax_cross_entropy_with_logits.h" +#include +#include +#include +#include +#include +#include "ops/op_utils.h" +#include "utils/check_convert_utils.h" +#include "abstract/primitive_infer_map.h" + +namespace mindspore { +namespace ops { + +AbstractBasePtr SoftmaxCrossEntropyWithLogitsInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(primitive); + auto softmax_prim = primitive->cast(); + MS_EXCEPTION_IF_NULL(softmax_prim); + auto prim_name = softmax_prim->name(); + CheckAndConvertUtils::CheckInteger("softmax_cross_entropy_with_logics_infer", input_args.size(), kEqual, 2, + prim_name); + + // Infer shape + auto logits_shape = + CheckAndConvertUtils::ConvertShapePtrToShape("logits_shape", input_args[0]->BuildShape(), prim_name); + auto labels_shape = + CheckAndConvertUtils::ConvertShapePtrToShape("labels_shape", input_args[1]->BuildShape(), prim_name); + CheckAndConvertUtils::Check("logits shape", logits_shape, kEqual, "labels shape", labels_shape, prim_name, TypeError); + std::vector loss_shape = {logits_shape[0]}; + auto dlogits_shape = logits_shape; + + // Infer type + const std::set valid_types = {kNumberTypeFloat16, kNumberTypeFloat32}; + std::map args; + args.emplace("logits_type", input_args[0]->BuildType()); + args.emplace("labels_type", input_args[1]->BuildType()); + CheckAndConvertUtils::CheckTensorTypeSame(args, valid_types, prim_name); + auto logits_type = input_args[0]->BuildType()->cast()->element(); + + auto output0 = std::make_shared(logits_type, loss_shape); + auto output1 = std::make_shared(logits_type, dlogits_shape); + AbstractBasePtrList output = {output0, output1}; + return std::make_shared(output); +} + +REGISTER_PRIMITIVE_EVAL_IMPL(SoftmaxCrossEntropyWithLogits, prim::kPrimSoftmaxCrossEntropyWithLogits, + SoftmaxCrossEntropyWithLogitsInfer); +REGISTER_PRIMITIVE_C(kNameSoftmaxCrossEntropyWithLogits, SoftmaxCrossEntropyWithLogits); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/softmax_cross_entropy_with_logits.h b/mindspore/core/ops/softmax_cross_entropy_with_logits.h new file mode 100644 index 00000000000..61d10de048b --- /dev/null +++ b/mindspore/core/ops/softmax_cross_entropy_with_logits.h @@ -0,0 +1,43 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_SOFTMAX_CROSS_ENTROPY_WITH_LOGITS_H_ +#define MINDSPORE_CORE_OPS_SOFTMAX_CROSS_ENTROPY_WITH_LOGITS_H_ +#include +#include +#include +#include +#include "ops/primitive_c.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameSoftmaxCrossEntropyWithLogits = "SoftmaxCrossEntropyWithLogits"; +class SoftmaxCrossEntropyWithLogits : public PrimitiveC { + public: + SoftmaxCrossEntropyWithLogits() : PrimitiveC(kNameSoftmaxCrossEntropyWithLogits) {} + ~SoftmaxCrossEntropyWithLogits() = default; + MS_DECLARE_PARENT(SoftmaxCrossEntropyWithLogits, PrimitiveC); + void Init() {} +}; +AbstractBasePtr SoftmaxCrossEntropyWithLogitsInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); +using PrimSoftmaxCrossEntropyWithLogitsPtr = std::shared_ptr; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_SOFTMAX_CROSS_ENTROPY_WITH_LOGITS_H_ diff --git a/mindspore/core/ops/space_to_batch.cc b/mindspore/core/ops/space_to_batch.cc new file mode 100644 index 00000000000..6d648cdc7a5 --- /dev/null +++ b/mindspore/core/ops/space_to_batch.cc @@ -0,0 +1,97 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "ops/space_to_batch.h" +#include +#include +#include +#include +#include +#include "ops/op_utils.h" +#include "utils/check_convert_utils.h" +#include "abstract/primitive_infer_map.h" + +namespace mindspore { +namespace ops { +namespace { +abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(primitive); + auto spacetobatch_prim = primitive->cast(); + MS_EXCEPTION_IF_NULL(spacetobatch_prim); + auto prim_name = spacetobatch_prim->name(); + auto input_shape = + CheckAndConvertUtils::ConvertShapePtrToShape("input_shape", input_args[0]->BuildShape(), prim_name); + CheckAndConvertUtils::CheckInteger("input shape", input_shape.size(), kEqual, 4, prim_name); + std::vector output_shape(input_shape.size()); + auto block_shape_vector = spacetobatch_prim->get_block_size(); + auto paddings = spacetobatch_prim->get_paddings(); + for (size_t i = 0; i < 2; i++) { + auto padded = output_shape[i + 2] + paddings[i][0] + paddings[i][1]; + CheckAndConvertUtils::CheckInteger("padded shape", padded % block_shape_vector.size(), kEqual, 0, prim_name); + output_shape[i + 2] = padded / block_shape_vector.size(); + } + output_shape[0] *= block_shape_vector.size() * block_shape_vector.size(); + return std::make_shared(output_shape); +} + +TypePtr InferType(const PrimitivePtr &prim, const std::vector &input_args) { + if (std::any_of(input_args.begin(), input_args.end(), [](AbstractBasePtr a) { return a == nullptr; })) { + MS_LOG(EXCEPTION) << "nullptr"; + } + std::map types; + types.emplace("x", input_args[0]->BuildType()); + auto infer_type = CheckAndConvertUtils::CheckTensorTypeSame(types, common_valid_types, prim->name()); + return TypeIdToType(infer_type); +} +} // namespace +void SpaceToBatch::set_paddings(const std::vector> &paddings) { + this->AddAttr(kPaddings, MakeValue(paddings)); + int64_t h = paddings.size(); + int64_t w = paddings[0].size(); + std::vector temp_w = {2, 2}; + CheckAndConvertUtils::Check(kPaddings, {h, w}, kEqual, "paddings_shape(2,2)", temp_w, this->name()); + for (int64_t i = 0; i < h; i++) { + for (int64_t j = 0; j < w; j++) { + CheckAndConvertUtils::CheckInteger(kPadding, paddings[i][j], kGreaterEqual, 0, this->name()); + } + } +} + +std::vector> SpaceToBatch::get_paddings() const { + auto value_ptr = GetAttr(kPaddings); + return GetValue>>(value_ptr); +} +void SpaceToBatch::set_block_size(const std::vector block_size) { + this->AddAttr(kBlockSize, MakeValue(block_size)); +} + +std::vector SpaceToBatch::get_block_size() const { + auto value_ptr = GetAttr(kBlockSize); + return GetValue>(value_ptr); +} + +void SpaceToBatch::Init(const std::vector block_size, const std::vector> &paddings) { + this->set_paddings(paddings); + this->set_block_size(block_size); +} +AbstractBasePtr SpaceToBatchInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + return std::make_shared(InferType(primitive, input_args), + InferShape(primitive, input_args)->shape()); +} +REGISTER_PRIMITIVE_EVAL_IMPL(SpaceToBatch, prim::kPrimSpaceToBatch, SpaceToBatchInfer); +REGISTER_PRIMITIVE_C(kNameSpaceToBatch, SpaceToBatch); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/space_to_batch.h b/mindspore/core/ops/space_to_batch.h new file mode 100644 index 00000000000..0e2197af522 --- /dev/null +++ b/mindspore/core/ops/space_to_batch.h @@ -0,0 +1,48 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_SPACE_TO_BATCH_H_ +#define MINDSPORE_CORE_OPS_SPACE_TO_BATCH_H_ + +#include +#include +#include +#include +#include "ops/primitive_c.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameSpaceToBatch = "SpaceToBatch"; +class SpaceToBatch : public PrimitiveC { + public: + SpaceToBatch() : PrimitiveC(kNameSpaceToBatch) {} + ~SpaceToBatch() = default; + MS_DECLARE_PARENT(SpaceToBatch, PrimitiveC); + void Init(const std::vector block_size, const std::vector> &paddings); + void set_paddings(const std::vector> &paddings); + void set_block_size(const std::vector block_size); + std::vector get_block_size() const; + std::vector> get_paddings() const; +}; +AbstractBasePtr SpaceToBatchInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); +using PrimSpaceToBatchPtr = std::shared_ptr; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_SPACE_TO_BATCH_H_ diff --git a/mindspore/core/ops/space_to_batch_nd.cc b/mindspore/core/ops/space_to_batch_nd.cc new file mode 100644 index 00000000000..44f8d5a1926 --- /dev/null +++ b/mindspore/core/ops/space_to_batch_nd.cc @@ -0,0 +1,108 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "ops/space_to_batch_nd.h" +#include +#include +#include +#include +#include +#include "ops/op_utils.h" +#include "utils/check_convert_utils.h" +#include "abstract/primitive_infer_map.h" + +namespace mindspore { +namespace ops { +namespace { +abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(primitive); + auto space_prim = primitive->cast(); + MS_EXCEPTION_IF_NULL(space_prim); + auto prim_name = space_prim->name(); + auto x_shape = CheckAndConvertUtils::ConvertShapePtrToShape("x_shape", input_args[0]->BuildShape(), prim_name); + CheckAndConvertUtils::CheckInteger("input_x rank", x_shape.size(), kEqual, 4, prim_name); + auto out_shape = x_shape; + int64_t block_shape_prod = 1; + int64_t offset = 2; + auto block_shape = space_prim->get_block_shape(); + auto padding = space_prim->get_paddings(); + int64_t size = block_shape.size(); + for (int64_t i = 0; i < size; i++) { + int64_t padded = out_shape[i + offset] + padding[i][0] + padding[i][1]; + if (padded % block_shape[i] != 0) { + MS_EXCEPTION(ValueError) << prim_name << " padded[" << i << "]" << padded << "should be divisible by block_shape[" + << i << "]" << block_shape[i]; + } + out_shape[i + offset] = int64_t(floor(padded / block_shape[i])); + block_shape_prod = block_shape_prod * block_shape[i]; + } + out_shape[0] = out_shape[0] * block_shape_prod; + return std::make_shared(out_shape); +} + +TypePtr InferType(const PrimitivePtr &prim, const std::vector &input_args) { + for (const auto &item : input_args) { + MS_EXCEPTION_IF_NULL(item); + } + auto infer_type = input_args[0]->BuildType()->cast()->element(); + return infer_type; +} +} // namespace + +void SpaceToBatchND::set_paddings(std::vector> paddings) { + CheckAndConvertUtils::CheckInteger(kPaddings, paddings.size(), kEqual, 2, this->name()); + int64_t h = paddings.size(); + int64_t w = paddings[0].size(); + std::vector temp_w = {2, 2}; + CheckAndConvertUtils::Check(kPaddings, {h, w}, kEqual, "paddings_shape(2,2)", temp_w, this->name()); + for (int64_t i = 0; i < h; i++) { + for (int64_t j = 0; j < w; j++) { + CheckAndConvertUtils::CheckInteger(kPaddings, paddings[i][j], kGreaterEqual, 0, this->name()); + } + } + this->AddAttr(kPaddings, MakeValue(paddings)); +} + +std::vector> SpaceToBatchND::get_paddings() const { + auto value_ptr = GetAttr(kPaddings); + return GetValue>>(value_ptr); +} +void SpaceToBatchND::set_block_shape(std::vector block_shape) { + CheckAndConvertUtils::CheckInteger(kBlockShape, block_shape.size(), kEqual, 2, this->name()); + for (int64_t i = 0; i < (int64_t)block_shape.size(); i++) { + CheckAndConvertUtils::CheckInteger(kBlockShape, block_shape[i], kGreaterEqual, 1, this->name()); + } + this->AddAttr(kBlockShape, MakeValue(block_shape)); +} + +std::vector SpaceToBatchND::get_block_shape() const { + auto value_ptr = GetAttr(kBlockShape); + return GetValue>(value_ptr); +} + +void SpaceToBatchND::Init(std::vector block_shape, std::vector> paddings) { + this->set_paddings(paddings); + this->set_block_shape(block_shape); +} + +AbstractBasePtr SpaceToBatchNDInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + return std::make_shared(InferType(primitive, input_args), + InferShape(primitive, input_args)->shape()); +} +REGISTER_PRIMITIVE_EVAL_IMPL(SpaceToBatchND, prim::kPrimSpaceToBatchND, SpaceToBatchNDInfer); +REGISTER_PRIMITIVE_C(kNameSpaceToBatchND, SpaceToBatchND); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/space_to_batch_nd.h b/mindspore/core/ops/space_to_batch_nd.h new file mode 100644 index 00000000000..8ca02e35fe4 --- /dev/null +++ b/mindspore/core/ops/space_to_batch_nd.h @@ -0,0 +1,48 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_SPACE_TO_BATCH_ND_H_ +#define MINDSPORE_CORE_OPS_SPACE_TO_BATCH_ND_H_ + +#include +#include +#include +#include +#include "ops/primitive_c.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameSpaceToBatchND = "SpaceToBatchND"; +class SpaceToBatchND : public PrimitiveC { + public: + SpaceToBatchND() : PrimitiveC(kNameSpaceToBatchND) {} + ~SpaceToBatchND() = default; + MS_DECLARE_PARENT(SpaceToBatchND, PrimitiveC); + void Init(std::vector block_shape, const std::vector> paddings); + void set_paddings(const std::vector> paddings); + void set_block_shape(std::vector block_shape); + std::vector get_block_shape() const; + std::vector> get_paddings() const; +}; +AbstractBasePtr SpaceToBatchNDInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); +using PrimSpaceToBatchNDPtr = std::shared_ptr; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_SPACE_TO_BATCH_ND_H_ diff --git a/mindspore/core/c_ops/depth_to_space.cc b/mindspore/core/ops/space_to_depth.cc similarity index 62% rename from mindspore/core/c_ops/depth_to_space.cc rename to mindspore/core/ops/space_to_depth.cc index 1c20a6c951e..d433982e1f6 100644 --- a/mindspore/core/c_ops/depth_to_space.cc +++ b/mindspore/core/ops/space_to_depth.cc @@ -13,39 +13,35 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -#include "c_ops/depth_to_space.h" -#include -#include -#include -#include -#include -#include "c_ops/op_utils.h" -#include "utils/check_convert_utils.h" -#include "abstract/primitive_infer_map.h" + +#include "ops/space_to_depth.h" namespace mindspore { -void DepthToSpace::set_block_size(const int64_t &block_size) { - CheckAndConvertUtils::Check(kBlockSize, block_size, kGreaterEqual, "", 2, this->name()); - this->AddAttr(kBlockSize, MakeValue(block_size)); +namespace ops { +void SpaceToDepth::Init(const int64_t block_size, const Format &format) { + this->set_block_size(block_size); + this->set_format(format); } -int64_t DepthToSpace::get_block_size() const { +void SpaceToDepth::set_block_size(const int64_t block_size) { + CheckAndConvertUtils::Check(kBlockSize, block_size, kGreaterEqual, "", 2, this->name()); + AddAttr(kBlockSize, MakeValue(block_size)); +} + +int64_t SpaceToDepth::get_block_size() const { auto value_ptr = GetAttr(kBlockSize); return GetValue(value_ptr); } -void DepthToSpace::set_format(const Format &format) { + +void SpaceToDepth::set_format(const Format &format) { int64_t f = format; this->AddAttr(kFormat, MakeValue(f)); } -Format DepthToSpace::get_format() const { +Format SpaceToDepth::get_format() const { auto value_ptr = GetAttr(kFormat); return Format(GetValue(value_ptr)); } - -void DepthToSpace::Init(const int64_t &block_size, const Format &format) { - this->set_block_size(block_size); - this->set_format(format); -} -REGISTER_PRIMITIVE_C(kNameDepthToSpace, DepthToSpace); +REGISTER_PRIMITIVE_C(kNameSpaceToDepth, SpaceToDepth); +} // namespace ops } // namespace mindspore diff --git a/mindspore/core/ops/space_to_depth.h b/mindspore/core/ops/space_to_depth.h new file mode 100644 index 00000000000..edc50abfbe6 --- /dev/null +++ b/mindspore/core/ops/space_to_depth.h @@ -0,0 +1,46 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_SPACE_TO_DEPTH_H_ +#define MINDSPORE_CORE_OPS_SPACE_TO_DEPTH_H_ +#include +#include + +#include "ops/primitive_c.h" +#include "ops/op_utils.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameSpaceToDepth = "SpaceToDepth"; +class SpaceToDepth : public PrimitiveC { + public: + SpaceToDepth() : PrimitiveC(kNameSpaceToDepth) { InitIOName({"x"}, {"y"}); } + ~SpaceToDepth() = default; + MS_DECLARE_PARENT(SpaceToDepth, PrimitiveC); + void Init(const int64_t block_size, const Format &format = NCHW); + void set_block_size(const int64_t block_size); + int64_t get_block_size() const; + void set_format(const Format &format); + Format get_format() const; +}; +AbstractBasePtr SpaceToDepthInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); +using PrimSpaceToDepthPtr = std::shared_ptr; +} // namespace ops +} // namespace mindspore +#endif // MINDSPORE_CORE_OPS_SpaceToDepth_H_ diff --git a/mindspore/core/ops/sparse_softmax_cross_entropy.cc b/mindspore/core/ops/sparse_softmax_cross_entropy.cc new file mode 100644 index 00000000000..6595808d9b2 --- /dev/null +++ b/mindspore/core/ops/sparse_softmax_cross_entropy.cc @@ -0,0 +1,64 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include + +#include "ops/sparse_softmax_cross_entropy.h" +#include "ops/op_utils.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +void SparseSoftmaxCrossEntropy::Init(const bool grad) { this->set_grad(grad); } + +void SparseSoftmaxCrossEntropy::set_grad(const bool grad) { this->AddAttr(kGrad, MakeValue(grad)); } + +bool SparseSoftmaxCrossEntropy::get_grad() const { + auto value_ptr = GetAttr(kGrad); + return GetValue(value_ptr); +} + +AbstractBasePtr SparseSoftmaxCrossEntropyInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(primitive); + auto sparse_softmax_cross_entropy_prim = primitive->cast(); + MS_EXCEPTION_IF_NULL(sparse_softmax_cross_entropy_prim); + auto prim_name = sparse_softmax_cross_entropy_prim->name(); + CheckAndConvertUtils::CheckInteger("input numbers", input_args.size(), kEqual, 2, prim_name); + for (const auto &item : input_args) { + MS_EXCEPTION_IF_NULL(item); + } + // infer shape + auto input_shape = + CheckAndConvertUtils::ConvertShapePtrToShape("input_shape", input_args[0]->BuildShape(), prim_name); + std::vector output_shape; + if (sparse_softmax_cross_entropy_prim->get_grad() != 0) { + output_shape = input_shape; + } else { + output_shape.push_back(1); + } + // infer type + auto output_type = input_args[0]->BuildType()->cast()->element(); + return std::make_shared(output_type, output_shape); +} +REGISTER_PRIMITIVE_EVAL_IMPL(SparseSoftmaxCrossEntropy, prim::kPrimSparseSoftmaxCrossEntropy, + SparseSoftmaxCrossEntropyInfer); +REGISTER_PRIMITIVE_C(kNameSparseSoftmaxCrossEntropy, SparseSoftmaxCrossEntropy); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/sparse_softmax_cross_entropy.h b/mindspore/core/ops/sparse_softmax_cross_entropy.h new file mode 100644 index 00000000000..fa4b94c9f99 --- /dev/null +++ b/mindspore/core/ops/sparse_softmax_cross_entropy.h @@ -0,0 +1,43 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_SPARSE_SOFTMAX_CROSS_ENTROPY_H_ +#define MINDSPORE_CORE_OPS_SPARSE_SOFTMAX_CROSS_ENTROPY_H_ +#include +#include +#include "ops/primitive_c.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameSparseSoftmaxCrossEntropy = "SparseSoftmaxCrossEntropy"; +class SparseSoftmaxCrossEntropy : public PrimitiveC { + public: + SparseSoftmaxCrossEntropy() : PrimitiveC(kNameSparseSoftmaxCrossEntropy) {} + ~SparseSoftmaxCrossEntropy() = default; + MS_DECLARE_PARENT(SparseSoftmaxCrossEntropy, PrimitiveC); + void Init(const bool is_grad = false); + void set_grad(const bool is_grad); + bool get_grad() const; +}; +AbstractBasePtr SparseSoftmaxCrossEntropyInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); +using PrimSparseSoftmaxCrossEntropyPtr = std::shared_ptr; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_SPARSE_SOFTMAX_CROSS_ENTROPY_H_ diff --git a/mindspore/core/ops/sparse_to_dense.cc b/mindspore/core/ops/sparse_to_dense.cc new file mode 100644 index 00000000000..cd8086fc904 --- /dev/null +++ b/mindspore/core/ops/sparse_to_dense.cc @@ -0,0 +1,51 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include "ops/sparse_to_dense.h" +#include "ops/op_utils.h" +#include "utils/check_convert_utils.h" +#include "abstract/primitive_infer_map.h" + +namespace mindspore { +namespace ops { +AbstractBasePtr SparseToDenseInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(primitive); + auto spasetodense_prim = primitive->cast(); + MS_EXCEPTION_IF_NULL(spasetodense_prim); + auto prim_name = spasetodense_prim->name(); + CheckAndConvertUtils::CheckInteger("input numbers", input_args.size(), kEqual, 3, prim_name); + for (const auto &item : input_args) { + MS_EXCEPTION_IF_NULL(item); + } + // infer shape + auto dense_shape = + CheckAndConvertUtils::ConvertShapePtrToShape("dense_shape", input_args[3]->BuildShape(), prim_name); + // infer type + auto indices_type = input_args[0]->BuildType()->cast()->element(); + auto values_type = input_args[1]->BuildType()->cast()->element(); + std::set valid_type = {TypeIdToType(kObjectTypeTensorType)}; + CheckAndConvertUtils::CheckSubClass("indices_type", indices_type, valid_type, prim_name); + CheckAndConvertUtils::CheckSubClass("values_type", values_type, valid_type, prim_name); + return std::make_shared(values_type, dense_shape); +} +REGISTER_PRIMITIVE_EVAL_IMPL(SparseToDense, prim::kPrimSparseToDense, SparseToDenseInfer); +REGISTER_PRIMITIVE_C(kNameSparseToDense, SparseToDense); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/sparse_to_dense.h b/mindspore/core/ops/sparse_to_dense.h new file mode 100644 index 00000000000..27820a60ef7 --- /dev/null +++ b/mindspore/core/ops/sparse_to_dense.h @@ -0,0 +1,41 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_SPARSE_TO_DENSE_H_ +#define MINDSPORE_CORE_OPS_SPARSE_TO_DENSE_H_ +#include +#include +#include "ops/primitive_c.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameSparseToDense = "SparseToDense"; +class SparseToDense : public PrimitiveC { + public: + SparseToDense() : PrimitiveC(kNameSparseToDense) { InitIOName({"indices", "values", "dense_shape"}, {"output"}); } + ~SparseToDense() = default; + MS_DECLARE_PARENT(SparseToDense, PrimitiveC); + void Init() {} +}; +AbstractBasePtr SparseToDenseInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); +using PrimSparseToDensePtr = std::shared_ptr; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_SPARSE_TO_DENSE_H_ diff --git a/mindspore/core/ops/split.cc b/mindspore/core/ops/split.cc new file mode 100644 index 00000000000..cd8d21775df --- /dev/null +++ b/mindspore/core/ops/split.cc @@ -0,0 +1,52 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops/split.h" +#include "ops/op_utils.h" +#include "utils/check_convert_utils.h" +#include "abstract/primitive_infer_map.h" + +namespace mindspore { +namespace ops { +void Split::Init(const std::vector &size_splits, const int64_t axis, const int64_t output_num) { + this->set_axis(axis); + this->set_output_num(output_num); +} + +void Split::set_size_splits(const std::vector &size_splits) { + this->AddAttr(kSizeSplits, MakeValue(size_splits)); +} +void Split::set_axis(const int64_t axis) { this->AddAttr(kAxis, MakeValue(axis)); } +void Split::set_output_num(const int64_t output_num) { this->AddAttr(kOutputNum, MakeValue(output_num)); } + +std::vector Split::get_size_splits() const { + auto value_ptr = GetAttr(kSizeSplits); + return GetValue>(value_ptr); +} + +int64_t Split::get_axis() const { + auto value_ptr = GetAttr(kAxis); + return GetValue(value_ptr); +} + +int64_t Split::get_output_num() const { + auto value_ptr = GetAttr(kOutputNum); + return GetValue(value_ptr); +} + +REGISTER_PRIMITIVE_C(kNameSplit, Split); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/split.h b/mindspore/core/ops/split.h new file mode 100644 index 00000000000..2a745180d8e --- /dev/null +++ b/mindspore/core/ops/split.h @@ -0,0 +1,48 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_SPLIT_H_ +#define MINDSPORE_CORE_OPS_SPLIT_H_ +#include +#include + +#include "ops/primitive_c.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameSplit = "Split"; +class Split : public PrimitiveC { + public: + Split() : PrimitiveC(kNameSplit) {} + ~Split() = default; + MS_DECLARE_PARENT(Split, PrimitiveC); + void Init(const std::vector &size_splits, const int64_t axis, const int64_t output_num); + void set_size_splits(const std::vector &size_splits); + void set_axis(const int64_t axis); + void set_output_num(const int64_t output_num); + std::vector get_size_splits() const; + int64_t get_axis() const; + int64_t get_output_num() const; +}; +AbstractBasePtr SplitInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); +using PrimSplit = std::shared_ptr; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_SPLIT_H_ diff --git a/mindspore/core/ops/sqrt.cc b/mindspore/core/ops/sqrt.cc new file mode 100644 index 00000000000..609f11f6c59 --- /dev/null +++ b/mindspore/core/ops/sqrt.cc @@ -0,0 +1,26 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops/sqrt.h" +#include "ops/op_utils.h" +#include "utils/check_convert_utils.h" +#include "abstract/primitive_infer_map.h" + +namespace mindspore { +namespace ops { +REGISTER_PRIMITIVE_C(kNameSqrt, Sqrt); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/sqrt.h b/mindspore/core/ops/sqrt.h new file mode 100644 index 00000000000..5de82345508 --- /dev/null +++ b/mindspore/core/ops/sqrt.h @@ -0,0 +1,36 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_SQRT_H_ +#define MINDSPORE_CORE_OPS_SQRT_H_ +#include "ops/primitive_c.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameSqrt = "Sqrt"; +class Sqrt : public PrimitiveC { + public: + Sqrt() : PrimitiveC(kNameSqrt) { InitIOName({"x"}, {"output"}); } + ~Sqrt() = default; + MS_DECLARE_PARENT(Sqrt, PrimitiveC); + void Init() {} +}; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_SQRT_H_ diff --git a/mindspore/core/ops/square.cc b/mindspore/core/ops/square.cc new file mode 100644 index 00000000000..0fda0dd5012 --- /dev/null +++ b/mindspore/core/ops/square.cc @@ -0,0 +1,26 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops/square.h" +#include "ops/op_utils.h" +#include "utils/check_convert_utils.h" +#include "abstract/primitive_infer_map.h" + +namespace mindspore { +namespace ops { +REGISTER_PRIMITIVE_C(kNameSquare, Square); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/square.h b/mindspore/core/ops/square.h new file mode 100644 index 00000000000..4c39c1dccc1 --- /dev/null +++ b/mindspore/core/ops/square.h @@ -0,0 +1,36 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_SQUARE_H_ +#define MINDSPORE_CORE_OPS_SQUARE_H_ +#include "ops/primitive_c.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameSquare = "Square"; +class Square : public PrimitiveC { + public: + Square() : PrimitiveC(kNameSquare) { InitIOName({"input_x"}, {"y"}); } + ~Square() = default; + MS_DECLARE_PARENT(Square, PrimitiveC); + void Init() {} +}; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_SQUARE_H_ diff --git a/mindspore/core/ops/squared_difference.cc b/mindspore/core/ops/squared_difference.cc new file mode 100644 index 00000000000..9a638d472e1 --- /dev/null +++ b/mindspore/core/ops/squared_difference.cc @@ -0,0 +1,57 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include "ops/squared_difference.h" +#include "ops/op_utils.h" +#include "utils/check_convert_utils.h" +#include "abstract/primitive_infer_map.h" + +namespace mindspore { +namespace ops { +namespace { +abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(primitive); + auto squared_prim = primitive->cast(); + MS_EXCEPTION_IF_NULL(squared_prim); + auto op_name = squared_prim->name(); + return BroadCastInferShape(op_name, input_args); +} + +TypePtr InferType(const PrimitivePtr &prim, const std::vector &input_args) { + for (const auto &item : input_args) { + MS_EXCEPTION_IF_NULL(item); + } + const std::set valid_types = {kNumberTypeInt32, kNumberTypeFloat16, kNumberTypeFloat32}; + std::map types; + types.emplace("x", input_args[0]->BuildType()); + types.emplace("y", input_args[1]->BuildType()); + auto infer_type = CheckAndConvertUtils::CheckTensorTypeSame(types, valid_types, prim->name()); + return TypeIdToType(infer_type); +} +} // namespace + +AbstractBasePtr SquaredDifferenceInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + return std::make_shared(InferType(primitive, input_args), + InferShape(primitive, input_args)->shape()); +} +REGISTER_PRIMITIVE_EVAL_IMPL(SquaredDifference, prim::kPrimSquaredDifference, SquaredDifferenceInfer); +REGISTER_PRIMITIVE_C(kNameSquaredDifference, SquaredDifference); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/squared_difference.h b/mindspore/core/ops/squared_difference.h new file mode 100644 index 00000000000..c5362e09699 --- /dev/null +++ b/mindspore/core/ops/squared_difference.h @@ -0,0 +1,41 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_SQUARED_DIFFERENCE_H_ +#define MINDSPORE_CORE_OPS_SQUARED_DIFFERENCE_H_ +#include +#include +#include "ops/primitive_c.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameSquaredDifference = "SquaredDifference"; +class SquaredDifference : public PrimitiveC { + public: + SquaredDifference() : PrimitiveC(kNameSquaredDifference) { InitIOName({"x", "y"}, {"output"}); } + ~SquaredDifference() = default; + MS_DECLARE_PARENT(SquaredDifference, PrimitiveC); + void Init() {} +}; +AbstractBasePtr SquaredDifferenceInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); +using PrimSquaredDifferencePtr = std::shared_ptr; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_SQUARED_DIFFERENCE_H_ diff --git a/mindspore/core/c_ops/squeeze.cc b/mindspore/core/ops/squeeze.cc similarity index 91% rename from mindspore/core/c_ops/squeeze.cc rename to mindspore/core/ops/squeeze.cc index c579cd3f86a..b8a816a5bf6 100644 --- a/mindspore/core/c_ops/squeeze.cc +++ b/mindspore/core/ops/squeeze.cc @@ -14,9 +14,10 @@ * limitations under the License. */ -#include "c_ops/squeeze.h" +#include "ops/squeeze.h" namespace mindspore { +namespace ops { void Squeeze::Init(const std::vector &axis) { set_axis(axis); } void Squeeze::set_axis(const std::vector &axis) { AddAttr(kAxis, MakeValue(axis)); } std::vector Squeeze::get_axis() const { @@ -34,19 +35,19 @@ abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector infer_shape; auto in_shape = CheckAndConvertUtils::ConvertShapePtrToShape("input_shape", input_args[0]->GetShapeTrack(), op_name); - auto len = in_shape.size(); + auto len = SizeToLong(in_shape.size()); if (axis.empty()) { std::copy_if(in_shape.begin(), in_shape.end(), std::back_inserter(infer_shape), [](int64_t value) { return value != 1; }); } else { for (auto &item : axis) { - CheckAndConvertUtils::CheckInRange("axis_or_elememt", item, kIncludeBoth, {-len, len + 1}, op_name); + CheckAndConvertUtils::CheckInRange("axis_or_elememt", item, kIncludeBoth, {-len, len + 1}, op_name); auto idx = item >= 0 ? item : len + item; if (in_shape[idx] != 1) { MS_EXCEPTION(ValueError) << "Cannot select an axis to squeeze out which has size not equal to one."; } } - for (size_t i = 0; i < len; i++) { + for (int64_t i = 0; i < len; i++) { auto it = std::find(axis.begin(), axis.end(), i); auto it2 = std::find(axis.begin(), axis.end(), i - len); if (!(it != axis.end() || it2 != axis.end())) { @@ -72,4 +73,5 @@ AbstractBasePtr SqueezeInfer(const abstract::AnalysisEnginePtr &, const Primitiv REGISTER_PRIMITIVE_EVAL_IMPL(Squeeze, prim::kPrimSqueeze, SqueezeInfer); REGISTER_PRIMITIVE_C(kNameSqueeze, Squeeze); +} // namespace ops } // namespace mindspore diff --git a/mindspore/core/c_ops/squeeze.h b/mindspore/core/ops/squeeze.h similarity index 87% rename from mindspore/core/c_ops/squeeze.h rename to mindspore/core/ops/squeeze.h index 5a35b17924c..df5b30ff85b 100644 --- a/mindspore/core/c_ops/squeeze.h +++ b/mindspore/core/ops/squeeze.h @@ -14,21 +14,22 @@ * limitations under the License. */ -#ifndef MINDSPORE_CORE_C_OPS_SQUEEZE_H_ -#define MINDSPORE_CORE_C_OPS_SQUEEZE_H_ +#ifndef MINDSPORE_CORE_OPS_SQUEEZE_H_ +#define MINDSPORE_CORE_OPS_SQUEEZE_H_ #include #include #include #include #include -#include "c_ops/op_utils.h" -#include "c_ops/primitive_c.h" +#include "ops/op_utils.h" +#include "ops/primitive_c.h" #include "abstract/primitive_infer_map.h" #include "abstract/abstract_value.h" #include "utils/check_convert_utils.h" namespace mindspore { +namespace ops { constexpr auto kNameSqueeze = "Squeeze"; class Squeeze : public PrimitiveC { public: @@ -44,6 +45,7 @@ AbstractBasePtr SqueezeInfer(const abstract::AnalysisEnginePtr &, const Primitiv const std::vector &input_args); using PrimSqueezePtr = std::shared_ptr; +} // namespace ops } // namespace mindspore -#endif // MINDSPORE_CORE_C_OPS_SQUEEZE_H_ +#endif // MINDSPORE_CORE_OPS_SQUEEZE_H_ diff --git a/mindspore/core/ops/stack.cc b/mindspore/core/ops/stack.cc new file mode 100644 index 00000000000..dec640d7147 --- /dev/null +++ b/mindspore/core/ops/stack.cc @@ -0,0 +1,82 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops/stack.h" + +namespace mindspore { +namespace ops { +namespace { +abstract::AbstractBasePtr StackInfer(const PrimitivePtr &primitive, const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(primitive); + auto stack_prim = primitive->cast(); + MS_EXCEPTION_IF_NULL(stack_prim); + auto prim_name = stack_prim->name(); + + if (input_args.size() != 1) { + MS_LOG(ERROR) << "Invalid output size:" << input_args.size(); + } + if (input_args.size() < 1) { + MS_LOG(ERROR) << "Invalid input size " << input_args.size(); + } + auto input_shape = + CheckAndConvertUtils::ConvertShapePtrToShape("input_shape", input_args[0]->BuildShape(), prim_name); + for (int64_t i = 1; i < (int64_t)input_args.size(); ++i) { + auto input_shape_tmp = + CheckAndConvertUtils::ConvertShapePtrToShape("input_shape", input_args[i]->BuildShape(), prim_name); + if (input_shape_tmp.size() != input_shape.size()) { + MS_LOG(ERROR) << "All input shape size should be the same!"; + } + for (int64_t j = 0; j < (int64_t)input_shape.size(); ++j) { + if (input_shape_tmp.at(j) != input_shape.at(j)) { + MS_LOG(ERROR) << "All input shape should be the same!"; + } + } + } + std::vector infer_shape = input_shape; + infer_shape.insert(infer_shape.begin() + stack_prim->get_axis(), input_args.size()); + + auto infer_type0 = input_args[0]->BuildType()->cast()->element(); + for (int64_t i = 1; i < (int64_t)input_args.size(); i++) { + if (input_args[i]->BuildType()->cast()->element() == infer_type0) { + MS_LOG(ERROR) << "All input should have the same data type!input[" << i + << "] data type = " << input_args[i]->BuildType()->cast()->element(); + } + } + auto infer_type = input_args[0]->BuildType()->cast()->element(); + auto output0 = std::make_shared(infer_type, infer_shape); + AbstractBasePtrList output1 = {output0}; + return std::make_shared(output1); +} +} // namespace + +void Stack::set_axis(const int64_t axis) { AddAttr(kAxis, MakeValue(axis)); } + +int64_t Stack::get_axis() const { + auto value_ptr = this->GetAttr(kAxis); + return GetValue(value_ptr); +} + +void Stack::Init(const int64_t axis) { this->set_axis(axis); } + +AbstractBasePtr StackInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + return std::make_shared(StackInfer(primitive, input_args)); +} + +REGISTER_PRIMITIVE_EVAL_IMPL(Stack, prim::kPrimStack, StackInfer); +REGISTER_PRIMITIVE_C(kNameStack, Stack); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/stack.h b/mindspore/core/ops/stack.h new file mode 100644 index 00000000000..09a1b21e478 --- /dev/null +++ b/mindspore/core/ops/stack.h @@ -0,0 +1,48 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_STACK_H_ +#define MINDSPORE_CORE_OPS_STACK_H_ + +#include +#include +#include +#include +#include +#include "ops/op_utils.h" +#include "ops/primitive_c.h" +#include "abstract/primitive_infer_map.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameStack = "Stack"; +class Stack : public PrimitiveC { + public: + Stack() : PrimitiveC(kNameStack) {} + ~Stack() = default; + MS_DECLARE_PARENT(Stack, PrimitiveC); + void Init(const int64_t axis); + void set_axis(const int64_t axis); + int64_t get_axis() const; +}; +AbstractBasePtr StackInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); +using PrimStackPtr = std::shared_ptr; +} // namespace ops +} // namespace mindspore +#endif // MINDSPORE_CORE_OPS_STACK_H_ diff --git a/mindspore/core/ops/strided_slice.cc b/mindspore/core/ops/strided_slice.cc new file mode 100644 index 00000000000..0abd0252a75 --- /dev/null +++ b/mindspore/core/ops/strided_slice.cc @@ -0,0 +1,288 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops/strided_slice.h" +#include +#include +#include +#include +#include +#include +#include "ops/op_utils.h" +#include "utils/check_convert_utils.h" +#include "abstract/primitive_infer_map.h" + +namespace mindspore { +namespace ops { +namespace { +abstract::ShapePtr StridedSliceInferShape(const PrimitivePtr &primitive, + const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(primitive); + auto strided_slice_prim = primitive->cast(); + MS_EXCEPTION_IF_NULL(strided_slice_prim); + auto prim_name = strided_slice_prim->name(); + auto temp_begin_v = input_args[1]->cast()->BuildValue(); + auto begin_v = GetValue>(temp_begin_v); + auto temp_end_v = input_args[2]->cast()->BuildValue(); + auto end_v = GetValue>(temp_end_v); + auto temp_strides_v = input_args[3]->cast()->BuildValue(); + auto strides_v = GetValue>(temp_strides_v); + + auto x_shape = CheckAndConvertUtils::ConvertShapePtrToShape("x_shape", input_args[0]->BuildShape(), prim_name); + int64_t x_rank = x_shape.size(); + int64_t slice_len = begin_v.size(); + std::vector begin_pos = strided_slice_prim->TenToTwo(strided_slice_prim->get_begin_mask()); + std::vector end_pos = strided_slice_prim->TenToTwo(strided_slice_prim->get_end_mask()); + std::vector ellipsis_pos = strided_slice_prim->TenToTwo(strided_slice_prim->get_ellipsis_mask()); + std::vector new_axis_pos = strided_slice_prim->TenToTwo(strided_slice_prim->get_new_axis_mask()); + std::vector shrink_axis_pos = strided_slice_prim->TenToTwo(strided_slice_prim->get_shrink_axis_mask()); + + int64_t i = 0; + int64_t j = 0; + int64_t start = 0; + int64_t finish = 0; + int64_t strides = 0; + int64_t slicing_length = 0; + bool has_ellipsis = false; + std::vector infer_shape; + while (i < x_rank || j < slice_len) { + if (j < slice_len) { + start = begin_v[j]; + finish = end_v[j]; + strides = strides_v[j]; + if (j < (int64_t)ellipsis_pos.size() && ellipsis_pos[j] == 1) { + has_ellipsis = true; + break; + } + if (j < (int64_t)begin_pos.size() && begin_pos[j] == 1) { + start = strides_v[j] < 0 ? -1 : 0; + } + if (j < (int64_t)end_pos.size() && end_pos[j] == 1) { + finish = strides_v[j] < 0 ? -(x_shape[i] + 1) : x_shape[i]; + } + if (j < (int64_t)new_axis_pos.size() && new_axis_pos[j] == 1) { + infer_shape.push_back(1); + j += 1; + continue; + } + if (j < (int64_t)shrink_axis_pos.size() && shrink_axis_pos[j] == 1) { + if (((-x_shape[i] <= start && start < x_shape[i]) == false) || strides < 0) { + MS_EXCEPTION(ValueError) << "when shrink axis, the stride cannot be negative number"; + } + j += 1; + i += 1; + continue; + } + } else { + start = 0; + finish = x_shape[0]; + strides = 1; + } + slicing_length = strided_slice_prim->compute_slicing_length(start, finish, strides, x_shape, i); + infer_shape.push_back(slicing_length); + i += 1; + j += 1; + } + + int64_t num = 0; + for (int64_t n = j + 1; n < slice_len; n++) { + if (new_axis_pos[n] == 1) { + num++; + } + } + if (has_ellipsis) { + int64_t ellipsis_occupied_dims = x_rank - i - (slice_len - (j + 1)) + num; + infer_shape.insert(infer_shape.end(), x_shape.begin() + i, x_shape.begin() + i + ellipsis_occupied_dims); + j += 1; + i += ellipsis_occupied_dims; + + while (i < x_rank || j < slice_len) { + start = begin_v[j]; + finish = end_v[j]; + strides = strides_v[j]; + if (j < (int64_t)begin_pos.size() || j < slice_len) { + start = strides_v[j] < 0 ? -1 : 0; + } + if (j < (int64_t)end_pos.size() && end_pos[j] == 1) { + finish = strides_v[j] < 0 ? -(x_shape[i] + 1) : x_shape[i]; + } + if (j < (int64_t)new_axis_pos.size() && new_axis_pos[j] == 1) { + infer_shape.push_back(1); + j += 1; + continue; + } + if (j < (int64_t)shrink_axis_pos.size() && shrink_axis_pos[j] == 1) { + if (((-x_shape[i] <= start && start < x_shape[i]) == false) || strides < 0) { + MS_EXCEPTION(ValueError) << "when shrink axis, the stride cannot be negative number"; + } + j += 1; + i += 1; + continue; + } + slicing_length = strided_slice_prim->compute_slicing_length(start, finish, strides, x_shape, i); + infer_shape.push_back(slicing_length); + i += 1; + j += 1; + } + } + return std::make_shared(infer_shape); +} + +TypePtr StridedSliceInferType(const PrimitivePtr &prim, const std::vector &input_args) { + for (const auto &item : input_args) { + MS_EXCEPTION_IF_NULL(item); + } + auto infer_type = input_args[0]->BuildType()->cast()->element(); + return infer_type; +} +} // namespace + +void StridedSlice::set_begin_mask(const int64_t begin_mask) { + CheckAndConvertUtils::CheckInteger(kBeginMask, begin_mask, kGreaterEqual, 0, this->name()); + this->AddAttr(kBeginMask, MakeValue(begin_mask)); +} +int64_t StridedSlice::get_begin_mask() const { + auto value_ptr = GetAttr(kBeginMask); + return GetValue(value_ptr); +} +void StridedSlice::set_end_mask(const int64_t end_mask) { + CheckAndConvertUtils::CheckInteger(kEndMask, end_mask, kGreaterEqual, 0, this->name()); + this->AddAttr(kEndMask, MakeValue(end_mask)); +} +int64_t StridedSlice::get_end_mask() const { + auto value_ptr = GetAttr(kEndMask); + return GetValue(value_ptr); +} +void StridedSlice::set_ellipsis_mask(const int64_t ellipsis_mask) { + CheckAndConvertUtils::CheckInteger(kEllipsisMask, ellipsis_mask, kGreaterEqual, 0, this->name()); + std::bitset bs(ellipsis_mask); + std::ostringstream buffer; + if (bs.count() > 1) { + buffer << "For" << this->name() << ", only support one ellipsis in the index, but got " << this->get_end_mask(); + MS_EXCEPTION(ValueError) << buffer.str(); + } + this->AddAttr(kEllipsisMask, MakeValue(ellipsis_mask)); +} +int64_t StridedSlice::get_ellipsis_mask() const { + auto value_ptr = GetAttr(kEllipsisMask); + return GetValue(value_ptr); +} +void StridedSlice::set_new_axis_mask(const int64_t new_axis_mask) { + CheckAndConvertUtils::CheckInteger(kNewAxisMask, new_axis_mask, kGreaterEqual, 0, this->name()); + this->AddAttr(kNewAxisMask, MakeValue(new_axis_mask)); +} +int64_t StridedSlice::get_new_axis_mask() const { + auto value_ptr = GetAttr(kNewAxisMask); + return GetValue(value_ptr); +} +void StridedSlice::set_shrink_axis_mask(const int64_t shrink_axis_mask) { + CheckAndConvertUtils::CheckInteger(kShrinkAxisMask, shrink_axis_mask, kGreaterEqual, 0, this->name()); + this->AddAttr(kShrinkAxisMask, MakeValue(shrink_axis_mask)); +} +int64_t StridedSlice::get_shrink_axis_mask() const { + auto value_ptr = GetAttr(kShrinkAxisMask); + return GetValue(value_ptr); +} +void StridedSlice::Init(const int64_t begin_mask, const int64_t end_mask, const int64_t ellipsis_mask, + const int64_t new_axis_mask, const int64_t shrink_axis_mask) { + this->set_begin_mask(begin_mask); + this->set_end_mask(end_mask); + this->set_ellipsis_mask(ellipsis_mask); + this->set_new_axis_mask(new_axis_mask); + this->set_shrink_axis_mask(shrink_axis_mask); +} + +std::vector StridedSlice::TenToTwo(int64_t num) { + std::vector output; + if (num == 0) { + output.push_back(0); + return output; + } + while (num) { + output.push_back(num % 2); + num /= 2; + } + + return output; +} + +int64_t StridedSlice::compute_slicing_length(int64_t start_pos, int64_t end_pos, int64_t strides, + std::vector x_shape, int64_t i) { + if (i > (int64_t)x_shape.size()) { + MS_EXCEPTION(ValueError) << "For 'StridedSlice', When their is no new axis, " + "the index length must be less or equal than the dim of x."; + } + int64_t x_dim = x_shape[i]; + int64_t slicing_length = 0; + if (strides > 0) { + if ((start_pos >= x_dim) || end_pos < -x_dim) { + slicing_length = 0; + } else { + if (-x_dim <= start_pos && start_pos < 0) { + start_pos += x_dim; + } + if (start_pos < -x_dim) { + start_pos = 0; + } + if (-x_dim <= end_pos && end_pos < 0) { + end_pos += x_dim; + } + if (end_pos > x_dim) { + end_pos = x_dim; + } + if (start_pos > end_pos) { + slicing_length = 0; + } else { + slicing_length = 1 + (end_pos - 1 - start_pos) / strides; + } + } + } else { + if (start_pos < -x_dim || end_pos >= x_dim) { + slicing_length = 0; + } else { + if (0 < start_pos && start_pos < x_dim) { + start_pos += -x_dim; + } + if (start_pos >= x_dim) { + start_pos = -1; + } + if (0 <= end_pos && end_pos < x_dim) { + end_pos += -x_dim; + } + if (end_pos < -x_dim - 1) { + end_pos = -x_dim - 1; + } + if (start_pos <= end_pos) { + slicing_length = 0; + } else { + slicing_length = 1 + (end_pos + 1 - start_pos) / strides; + } + } + } + return slicing_length; +} + +AbstractBasePtr StridedSliceInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + return std::make_shared(StridedSliceInferType(primitive, input_args), + StridedSliceInferShape(primitive, input_args)->shape()); +} + +REGISTER_PRIMITIVE_EVAL_IMPL(StridedSlice, prim::kPrimStridedSlice, StridedSliceInfer); +REGISTER_PRIMITIVE_C(kNameStridedSlice, StridedSlice); + +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/strided_slice.h b/mindspore/core/ops/strided_slice.h new file mode 100644 index 00000000000..8f6739713e3 --- /dev/null +++ b/mindspore/core/ops/strided_slice.h @@ -0,0 +1,57 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_STRIDED_SLICE_H_ +#define MINDSPORE_CORE_OPS_STRIDED_SLICE_H_ +#include +#include +#include +#include +#include "ops/primitive_c.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameStridedSlice = "StridedSlice"; +class StridedSlice : public PrimitiveC { + public: + StridedSlice() : PrimitiveC(kNameStridedSlice) { InitIOName({"x", "begin", "end", "strides"}, {"output"}); } + ~StridedSlice() = default; + MS_DECLARE_PARENT(StridedSlice, PrimitiveC); + void Init(const int64_t begin_mask = 0, const int64_t end_mask = 0, const int64_t ellipsis_mask = 0, + const int64_t new_axis_mask = 0, const int64_t shrink_axis_mask = 0); + void set_begin_mask(const int64_t begin_mask); + void set_end_mask(const int64_t end_mask); + void set_ellipsis_mask(const int64_t ellipsis_mask); + void set_new_axis_mask(const int64_t new_axis_mask); + void set_shrink_axis_mask(const int64_t shrink_axis_mask); + int64_t get_begin_mask() const; + int64_t get_end_mask() const; + int64_t get_ellipsis_mask() const; + int64_t get_new_axis_mask() const; + int64_t get_shrink_axis_mask() const; + std::vector TenToTwo(int64_t num); + int64_t compute_slicing_length(int64_t start_pos, int64_t end_pos, int64_t strides, std::vector x_shape, + int64_t i); +}; +AbstractBasePtr StridedSliceInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); +using PrimStridedSlicePtr = std::shared_ptr; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_STRIDED_SLICE_H_ diff --git a/mindspore/core/ops/sub.cc b/mindspore/core/ops/sub.cc new file mode 100644 index 00000000000..ba395c9c0cc --- /dev/null +++ b/mindspore/core/ops/sub.cc @@ -0,0 +1,58 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include + +#include "ops/sub.h" +#include "ops/op_utils.h" +#include "utils/check_convert_utils.h" +#include "abstract/primitive_infer_map.h" + +namespace mindspore { +namespace ops { +namespace { +abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(primitive); + auto sub_prim = primitive->cast(); + MS_EXCEPTION_IF_NULL(sub_prim); + auto prim_name = sub_prim->name(); + return BroadCastInferShape(prim_name, input_args); +} + +TypePtr InferType(const PrimitivePtr &prim, const std::vector &input_args) { + for (const auto &item : input_args) { + MS_EXCEPTION_IF_NULL(item); + } + std::map types; + types.emplace("x", input_args[0]->BuildType()); + types.emplace("y", input_args[1]->BuildType()); + auto infer_type = CheckAndConvertUtils::CheckTensorTypeSame(types, common_valid_types, prim->name()); + return TypeIdToType(infer_type); +} +} // namespace + +AbstractBasePtr SubInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + return std::make_shared(InferType(primitive, input_args), + InferShape(primitive, input_args)->shape()); +} +REGISTER_PRIMITIVE_EVAL_IMPL(Sub, prim::kPrimSub, SubInfer); +REGISTER_PRIMITIVE_C(kNameSub, Sub); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/sub.h b/mindspore/core/ops/sub.h new file mode 100644 index 00000000000..12cd95d6a35 --- /dev/null +++ b/mindspore/core/ops/sub.h @@ -0,0 +1,45 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_SUB_H_ +#define MINDSPORE_CORE_OPS_SUB_H_ +#include +#include +#include + +#include "ops/primitive_c.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameSub = "Sub"; +class Sub : public PrimitiveC { + public: + Sub() : PrimitiveC(kNameSub) { InitIOName({"x", "y"}, {"output"}); } + explicit Sub(const std::string k_name) : PrimitiveC(k_name) { InitIOName({"x", "y"}, {"output"}); } + ~Sub() = default; + MS_DECLARE_PARENT(Sub, PrimitiveC); + void Init() {} +}; + +AbstractBasePtr SubInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); +using PrimSubPtr = std::shared_ptr; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_SUB_H_ diff --git a/mindspore/core/ops/switch.cc b/mindspore/core/ops/switch.cc new file mode 100644 index 00000000000..8537728148d --- /dev/null +++ b/mindspore/core/ops/switch.cc @@ -0,0 +1,26 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops/switch.h" +#include "ops/op_utils.h" +#include "utils/check_convert_utils.h" +#include "abstract/primitive_infer_map.h" + +namespace mindspore { +namespace ops { +REGISTER_PRIMITIVE_C(kNameSwitch, Switch); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/switch.h b/mindspore/core/ops/switch.h new file mode 100644 index 00000000000..5782b30f9d2 --- /dev/null +++ b/mindspore/core/ops/switch.h @@ -0,0 +1,36 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_SWITCH_H_ +#define MINDSPORE_CORE_OPS_SWITCH_H_ +#include "ops/primitive_c.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameSwitch = "Switch"; +class Switch : public PrimitiveC { + public: + Switch() : PrimitiveC(kNameSwitch) {} + ~Switch() = default; + MS_DECLARE_PARENT(Switch, PrimitiveC); + void Init() {} +}; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_SWITCH_H_ diff --git a/mindspore/core/ops/tan.cc b/mindspore/core/ops/tan.cc new file mode 100644 index 00000000000..ce3a67273d7 --- /dev/null +++ b/mindspore/core/ops/tan.cc @@ -0,0 +1,56 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops/tan.h" +#include +#include +#include +#include +#include +#include "ops/op_utils.h" +#include "utils/check_convert_utils.h" +#include "abstract/primitive_infer_map.h" + +namespace mindspore { +namespace ops { +AbstractBasePtr TanInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(primitive); + auto tan_prim = primitive->cast(); + MS_EXCEPTION_IF_NULL(tan_prim); + auto prim_name = tan_prim->name(); + CheckAndConvertUtils::CheckInteger("tan_infer", input_args.size(), kEqual, 1, prim_name); + + // Infer Shape + auto x_shape = CheckAndConvertUtils::ConvertShapePtrToShape("x_shape", input_args[0]->BuildShape(), prim_name); + auto infer_shape = std::make_shared(x_shape); + + // Infer Type + auto dtype = input_args[0]->BuildType(); + const std::set valid_types = {kNumberTypeFloat16, kNumberTypeFloat32, kNumberTypeInt32}; + CheckAndConvertUtils::CheckTensorTypeValid("x_dtype", dtype, valid_types, prim_name); + auto tensor_type = dtype->cast(); + MS_EXCEPTION_IF_NULL(tensor_type); + auto element = tensor_type->element(); + MS_EXCEPTION_IF_NULL(element); + auto infer_type = std::make_shared(TypeIdToType(element->type_id())); + + return std::make_shared(infer_type, infer_shape->shape()); +} +REGISTER_PRIMITIVE_EVAL_IMPL(Tan, prim::kPrimTan, TanInfer); +REGISTER_PRIMITIVE_C(kNameTan, Tan); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/tan.h b/mindspore/core/ops/tan.h new file mode 100644 index 00000000000..85d914867e1 --- /dev/null +++ b/mindspore/core/ops/tan.h @@ -0,0 +1,43 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_TAN_H_ +#define MINDSPORE_CORE_OPS_TAN_H_ +#include +#include +#include +#include +#include "ops/primitive_c.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameTan = "Tan"; +class Tan : public PrimitiveC { + public: + Tan() : PrimitiveC(kNameTan) {} + ~Tan() = default; + MS_DECLARE_PARENT(Tan, PrimitiveC); + void Init() {} +}; +AbstractBasePtr TanInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); +using PrimTanPtr = std::shared_ptr; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_TAN_H_ diff --git a/mindspore/core/c_ops/expand_dims.cc b/mindspore/core/ops/tanh.cc similarity index 83% rename from mindspore/core/c_ops/expand_dims.cc rename to mindspore/core/ops/tanh.cc index 66cc5addf3f..0308ce560a6 100644 --- a/mindspore/core/c_ops/expand_dims.cc +++ b/mindspore/core/ops/tanh.cc @@ -14,9 +14,12 @@ * limitations under the License. */ -#include "c_ops/expand_dims.h" +#include "ops/tanh.h" #include "utils/check_convert_utils.h" +#include "abstract/primitive_infer_map.h" namespace mindspore { -REGISTER_PRIMITIVE_C(kNameExpandDims, ExpandDims); +namespace ops { +REGISTER_PRIMITIVE_C(kNameTanh, Tanh); +} // namespace ops } // namespace mindspore diff --git a/mindspore/core/c_ops/asin.h b/mindspore/core/ops/tanh.h similarity index 69% rename from mindspore/core/c_ops/asin.h rename to mindspore/core/ops/tanh.h index 53bb610c887..e4a15a1dad2 100644 --- a/mindspore/core/c_ops/asin.h +++ b/mindspore/core/ops/tanh.h @@ -14,21 +14,23 @@ * limitations under the License. */ -#ifndef MINDSPORE_CORE_C_OPS_ASIN_H_ -#define MINDSPORE_CORE_C_OPS_ASIN_H_ -#include "c_ops/primitive_c.h" +#ifndef MINDSPORE_CORE_OPS_TANH_H_ +#define MINDSPORE_CORE_OPS_TANH_H_ +#include "ops/primitive_c.h" #include "abstract/abstract_value.h" #include "utils/check_convert_utils.h" namespace mindspore { -constexpr auto kNameAsin = "Asin"; -class Asin : public PrimitiveC { +namespace ops { +constexpr auto kNameTanh = "Tanh"; +class Tanh : public PrimitiveC { public: - Asin() : PrimitiveC(kNameAsin) {} - ~Asin() = default; - MS_DECLARE_PARENT(Asin, PrimitiveC); + Tanh() : PrimitiveC(kNameTanh) {} + ~Tanh() = default; + MS_DECLARE_PARENT(Tanh, PrimitiveC); void Init() {} }; +} // namespace ops } // namespace mindspore -#endif // MINDSPORE_CORE_C_OPS_ASIN_H_ +#endif // MINDSPORE_CORE_OPS_TANH_H_ diff --git a/mindspore/core/ops/tensor_list_from_tensor.cc b/mindspore/core/ops/tensor_list_from_tensor.cc new file mode 100644 index 00000000000..65e3e7d8cb3 --- /dev/null +++ b/mindspore/core/ops/tensor_list_from_tensor.cc @@ -0,0 +1,91 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops/tensor_list_from_tensor.h" +#include "ops/op_utils.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +namespace { +abstract::ShapePtr TensorListFromTensorInferShape(const PrimitivePtr &primitive, + const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(primitive); + auto tensor_list_from_tensor_prim = primitive->cast(); + MS_EXCEPTION_IF_NULL(tensor_list_from_tensor_prim); + auto prim_name = tensor_list_from_tensor_prim->name(); + auto input0_shape = + CheckAndConvertUtils::ConvertShapePtrToShape("input0 shape", input_args[0]->BuildShape(), prim_name); + auto input1_shape = + CheckAndConvertUtils::ConvertShapePtrToShape("input1 shape", input_args[1]->BuildShape(), prim_name); + if (input0_shape.size() < 1) { + MS_LOG(ERROR) << "input0_shape.size():" << input0_shape.size() << " must be greater than 0!"; + } + int64_t dim0 = input0_shape[0]; + if (dim0 < 0) { + MS_LOG(ERROR) << "input[0] dim0:" << dim0 << " must be greater than or equal to 0!"; + } + auto input1 = &input1_shape[0]; + MS_ASSERT(input1 != nullptr); + if (input1 == nullptr) { + MS_LOG(ERROR) << "input1 is nullptr"; + } + std::vector infer_shape = {1, dim0}; + return std::make_shared(infer_shape); +} + +TypePtr TensorListFromTensorInferType(const PrimitivePtr &prim, const std::vector &input_args) { + for (const auto &item : input_args) { + MS_EXCEPTION_IF_NULL(item); + } + TypeId infer_type = kObjectTypeTensorType; + return TypeIdToType(infer_type); +} +} // namespace + +void TensorListFromTensor::Init(const int64_t element_dtype, const int64_t shape_type) { + this->set_element_dtype(element_dtype); + this->set_shape_type(shape_type); +} + +int64_t TensorListFromTensor::get_element_dtype() const { + auto value_ptr = GetAttr(kElement_dtype); + return GetValue(value_ptr); +} + +int64_t TensorListFromTensor::get_shape_type() const { + auto value_ptr = GetAttr(kShapeType); + return GetValue(value_ptr); +} + +void TensorListFromTensor::set_element_dtype(const int64_t element_dtype) { + this->AddAttr(kElement_dtype, MakeValue(element_dtype)); +} + +void TensorListFromTensor::set_shape_type(const int64_t shape_type) { + this->AddAttr(kShapeType, MakeValue(shape_type)); +} + +AbstractBasePtr TensorListFromTensorInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + return std::make_shared(TensorListFromTensorInferType(primitive, input_args), + TensorListFromTensorInferShape(primitive, input_args)->shape()); +} + +REGISTER_PRIMITIVE_EVAL_IMPL(TensorListFromTensor, prim::kPrimTensorListFromTensor, TensorListFromTensorInfer); +REGISTER_PRIMITIVE_C(kNameTensorListFromTensor, TensorListFromTensor); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/tensor_list_from_tensor.h b/mindspore/core/ops/tensor_list_from_tensor.h new file mode 100644 index 00000000000..62ba4a63a1f --- /dev/null +++ b/mindspore/core/ops/tensor_list_from_tensor.h @@ -0,0 +1,45 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_TENSOR_LIST_FROM_TENSOR_H_ +#define MINDSPORE_CORE_OPS_TENSOR_LIST_FROM_TENSOR_H_ +#include +#include +#include "ops/primitive_c.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameTensorListFromTensor = "TensorListFromTensor"; +class TensorListFromTensor : public PrimitiveC { + public: + TensorListFromTensor() : PrimitiveC(kNameTensorListFromTensor) {} + ~TensorListFromTensor() = default; + MS_DECLARE_PARENT(TensorListFromTensor, PrimitiveC); + void Init(const int64_t element_dtype, const int64_t shape_type); + void set_element_dtype(const int64_t element_dtype); + void set_shape_type(const int64_t shape_type); + int64_t get_element_dtype() const; + int64_t get_shape_type() const; +}; +AbstractBasePtr TensorListFromTensorInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); +using PrimTensorListFromTensorPtr = std::shared_ptr; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_TENSOR_LIST_FROM_TENSOR_H_ diff --git a/mindspore/core/ops/tensor_list_get_item.cc b/mindspore/core/ops/tensor_list_get_item.cc new file mode 100644 index 00000000000..2398aef88dd --- /dev/null +++ b/mindspore/core/ops/tensor_list_get_item.cc @@ -0,0 +1,36 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops/tensor_list_get_item.h" +#include "ops/op_utils.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +void TensorListGetItem::Init(const int64_t element_dtype) { this->set_element_dtype(element_dtype); } + +void TensorListGetItem::set_element_dtype(const int64_t element_dtype) { + this->AddAttr(kElement_dtype, MakeValue(element_dtype)); +} + +int64_t TensorListGetItem::get_element_dtype() const { + auto value_ptr = GetAttr(kElement_dtype); + return GetValue(value_ptr); +} + +REGISTER_PRIMITIVE_C(kNameTensorListGetItem, TensorListGetItem); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/tensor_list_get_item.h b/mindspore/core/ops/tensor_list_get_item.h new file mode 100644 index 00000000000..a86cf0b3451 --- /dev/null +++ b/mindspore/core/ops/tensor_list_get_item.h @@ -0,0 +1,39 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_TENSOR_LIST_GET_ITEM_H_ +#define MINDSPORE_CORE_OPS_TENSOR_LIST_GET_ITEM_H_ +#include +#include "ops/primitive_c.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameTensorListGetItem = "TensorListGetItem"; +class TensorListGetItem : public PrimitiveC { + public: + TensorListGetItem() : PrimitiveC(kNameTensorListGetItem) {} + ~TensorListGetItem() = default; + MS_DECLARE_PARENT(TensorListGetItem, PrimitiveC); + void Init(const int64_t element_dtype); + void set_element_dtype(const int64_t element_dtype); + int64_t get_element_dtype() const; +}; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_TENSOR_LIST_GET_ITEM_H_ diff --git a/mindspore/core/ops/tensor_list_reserve.cc b/mindspore/core/ops/tensor_list_reserve.cc new file mode 100644 index 00000000000..f2c87d010f8 --- /dev/null +++ b/mindspore/core/ops/tensor_list_reserve.cc @@ -0,0 +1,46 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops/tensor_list_reserve.h" +#include "ops/op_utils.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +void TensorListReserve::Init(const int64_t element_dtype, const int64_t shape_type) { + this->set_element_dtype(element_dtype); + this->set_shape_type(shape_type); +} + +void TensorListReserve::set_element_dtype(const int64_t element_dtype) { + this->AddAttr(kElement_dtype, MakeValue(element_dtype)); +} + +void TensorListReserve::set_shape_type(const int64_t shape_type) { this->AddAttr(kShapeType, MakeValue(shape_type)); } + +int64_t TensorListReserve::get_element_dtype() const { + auto value_ptr = GetAttr(kElement_dtype); + return GetValue(value_ptr); +} + +int64_t TensorListReserve::get_shape_type() const { + auto value_ptr = GetAttr(kShapeType); + return GetValue(value_ptr); +} + +REGISTER_PRIMITIVE_C(kNameTensorListReserve, TensorListReserve); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/tensor_list_reserve.h b/mindspore/core/ops/tensor_list_reserve.h new file mode 100644 index 00000000000..adcf1ebe463 --- /dev/null +++ b/mindspore/core/ops/tensor_list_reserve.h @@ -0,0 +1,41 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_TENSOR_LIST_RESERVE_H_ +#define MINDSPORE_CORE_OPS_TENSOR_LIST_RESERVE_H_ +#include +#include "ops/primitive_c.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameTensorListReserve = "TensorListReserve"; +class TensorListReserve : public PrimitiveC { + public: + TensorListReserve() : PrimitiveC(kNameTensorListReserve) {} + ~TensorListReserve() = default; + MS_DECLARE_PARENT(TensorListReserve, PrimitiveC); + void Init(const int64_t element_dtype, const int64_t shape_type); + void set_element_dtype(const int64_t element_dtype); + void set_shape_type(const int64_t shape_type); + int64_t get_element_dtype() const; + int64_t get_shape_type() const; +}; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_TENSOR_LIST_RESERVE_H_ diff --git a/mindspore/core/ops/tensor_list_set_item.cc b/mindspore/core/ops/tensor_list_set_item.cc new file mode 100644 index 00000000000..abd28c9238a --- /dev/null +++ b/mindspore/core/ops/tensor_list_set_item.cc @@ -0,0 +1,36 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops/tensor_list_set_item.h" +#include "ops/op_utils.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +void TensorListSetItem::Init(const int64_t element_dtype) { this->set_element_dtype(element_dtype); } + +void TensorListSetItem::set_element_dtype(const int64_t element_dtype) { + this->AddAttr(kElement_dtype, MakeValue(element_dtype)); +} + +int64_t TensorListSetItem::get_element_dtype() const { + auto value_ptr = GetAttr(kElement_dtype); + return GetValue(value_ptr); +} + +REGISTER_PRIMITIVE_C(kNameTensorListSetItem, TensorListSetItem); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/tensor_list_set_item.h b/mindspore/core/ops/tensor_list_set_item.h new file mode 100644 index 00000000000..045824449ed --- /dev/null +++ b/mindspore/core/ops/tensor_list_set_item.h @@ -0,0 +1,39 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_TENSOR_LIST_SET_ITEM_H_ +#define MINDSPORE_CORE_OPS_TENSOR_LIST_SET_ITEM_H_ +#include +#include "ops/primitive_c.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameTensorListSetItem = "TensorListSetItem"; +class TensorListSetItem : public PrimitiveC { + public: + TensorListSetItem() : PrimitiveC(kNameTensorListSetItem) {} + ~TensorListSetItem() = default; + MS_DECLARE_PARENT(TensorListSetItem, PrimitiveC); + void Init(const int64_t element_dtype); + void set_element_dtype(const int64_t element_dtype); + int64_t get_element_dtype() const; +}; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_TENSOR_LIST_SET_ITEM_H_ diff --git a/mindspore/core/ops/tensor_list_stack.cc b/mindspore/core/ops/tensor_list_stack.cc new file mode 100644 index 00000000000..9985420e805 --- /dev/null +++ b/mindspore/core/ops/tensor_list_stack.cc @@ -0,0 +1,76 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include + +#include "ops/tensor_list_stack.h" +#include "ops/op_utils.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +void TensorListStack::Init(const int64_t num_elements, const int64_t element_dtype) { + this->set_num_elements(num_elements); + this->set_element_dtype(element_dtype); +} + +void TensorListStack::set_num_elements(const int64_t num_elements) { + this->AddAttr(kNumElements, MakeValue(num_elements)); +} + +void TensorListStack::set_element_dtype(const int64_t element_dtype) { + this->AddAttr(kElement_dtype, MakeValue(element_dtype)); +} + +int64_t TensorListStack::get_num_elements() const { + auto value_ptr = GetAttr(kNumElements); + return GetValue(value_ptr); +} + +int64_t TensorListStack::get_element_dtype() const { + auto value_ptr = GetAttr(kElement_dtype); + return GetValue(value_ptr); +} + +AbstractBasePtr TensorListStackInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(primitive); + auto TensorListStack_prim = primitive->cast(); + MS_EXCEPTION_IF_NULL(TensorListStack_prim); + for (auto input : input_args) { + MS_EXCEPTION_IF_NULL(input); + } + auto op_name = TensorListStack_prim->name(); + auto input0_shape = + CheckAndConvertUtils::ConvertShapePtrToShape("input0_shape", input_args[0]->BuildShape(), op_name); + int64_t num = std::accumulate(input0_shape.begin(), input0_shape.end(), 1LL, std::multiplies()); + if (num == 0) { + MS_LOG(ERROR) << "Try to stack a empty tensorlist!"; + } + if (input_args[1]->BuildShape() == nullptr) { + MS_LOG(ERROR) << "ele_shape->data_c() is nullptr"; + } + auto input1_shape = + CheckAndConvertUtils::ConvertShapePtrToShape("input1_shape", input_args[1]->BuildShape(), op_name); + input1_shape.insert(input1_shape.begin(), 1); + return std::make_shared(input_args[0]->BuildType(), input1_shape); +} + +REGISTER_PRIMITIVE_EVAL_IMPL(TensorListStack, prim::kPrimTensorListStack, TensorListStackInfer); +REGISTER_PRIMITIVE_C(kNameTensorListStack, TensorListStack); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/tensor_list_stack.h b/mindspore/core/ops/tensor_list_stack.h new file mode 100644 index 00000000000..ad51e3ec791 --- /dev/null +++ b/mindspore/core/ops/tensor_list_stack.h @@ -0,0 +1,46 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_TENSOR_LIST_STACK_H_ +#define MINDSPORE_CORE_OPS_TENSOR_LIST_STACK_H_ +#include +#include +#include "ops/primitive_c.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameTensorListStack = "TensorListStack"; +class TensorListStack : public PrimitiveC { + public: + TensorListStack() : PrimitiveC(kNameTensorListStack) {} + ~TensorListStack() = default; + MS_DECLARE_PARENT(TensorListStack, PrimitiveC); + void Init(const int64_t num_elements, const int64_t element_dtype); + void set_num_elements(const int64_t num_elements); + void set_element_dtype(const int64_t element_dtype); + int64_t get_num_elements() const; + int64_t get_element_dtype() const; +}; + +AbstractBasePtr TensorListStackInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); +using PrimTensorListStackPtr = std::shared_ptr; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_TENSOR_LIST_STACK_H_ diff --git a/mindspore/core/ops/tile.cc b/mindspore/core/ops/tile.cc new file mode 100644 index 00000000000..c6c7a7f385d --- /dev/null +++ b/mindspore/core/ops/tile.cc @@ -0,0 +1,77 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include "ops/tile.h" +#include "ops/op_utils.h" +#include "utils/check_convert_utils.h" +#include "abstract/primitive_infer_map.h" + +namespace mindspore { +namespace ops { +namespace { +abstract::ShapePtr TileInferShape(const PrimitivePtr &primitive, const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(primitive); + auto tile_prim = primitive->cast(); + MS_EXCEPTION_IF_NULL(tile_prim); + auto prim_name = tile_prim->name(); + auto input_shape = CheckAndConvertUtils::ConvertShapePtrToShape("x shape", input_args[0]->BuildShape(), prim_name); + auto multiples_v = GetValue>(input_args[1]->cast()->BuildValue()); + int len_sub = input_shape.size() - multiples_v.size(); + std::vector infer_shape = input_shape; + std::vector multiples_w; + if (len_sub == 0) { + multiples_w = multiples_v; + } + if (len_sub > 0) { + for (int64_t i = 0; i < len_sub; i++) { + infer_shape.insert(infer_shape.begin(), 1); + } + multiples_w = multiples_v; + } + if (len_sub < 0) { + MS_EXCEPTION(ValueError) << "the length of multiples can not be smaller than the" + "length of dimension in input_x"; + } + for (size_t i = 0; i < multiples_w.size(); i++) { + infer_shape[i] *= multiples_w[i]; + } + return std::make_shared(infer_shape); +} + +TypePtr TileInferType(const PrimitivePtr &prim, const std::vector &input_args) { + CheckAndConvertUtils::CheckInteger("tile_prim_infer", input_args.size(), kEqual, 2, prim->name()); + for (const auto &item : input_args) { + MS_EXCEPTION_IF_NULL(item); + } + auto x_dtype = input_args[0]->BuildType()->cast(); + std::set template_types = {TypeIdToType(kObjectTypeTensorType)}; + CheckAndConvertUtils::CheckSubClass("x_dtype", x_dtype, template_types, prim->name()); + auto infer_dtype = x_dtype->element()->type_id(); + return TypeIdToType(infer_dtype); +} +} // namespace + +AbstractBasePtr TileInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + return std::make_shared(TileInferType(primitive, input_args), + TileInferShape(primitive, input_args)->shape()); +} + +REGISTER_PRIMITIVE_EVAL_IMPL(Tile, prim::kPrimTile, TileInfer); +REGISTER_PRIMITIVE_C(kNameTile, Tile); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/tile.h b/mindspore/core/ops/tile.h new file mode 100644 index 00000000000..afd8640ece1 --- /dev/null +++ b/mindspore/core/ops/tile.h @@ -0,0 +1,45 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_TILE_H_ +#define MINDSPORE_CORE_OPS_TILE_H_ +#include +#include +#include +#include + +#include "ops/primitive_c.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameTile = "Tile"; +class Tile : public PrimitiveC { + public: + Tile() : PrimitiveC(kNameTile) { InitIOName({"x", "multiples"}, {"output"}); } + explicit Tile(const std::string k_name) : PrimitiveC(k_name) { InitIOName({"x", "multiples"}, {"output"}); } + ~Tile() = default; + MS_DECLARE_PARENT(Tile, PrimitiveC); + void Init() {} +}; +AbstractBasePtr TileInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); +using PrimTilePtr = std::shared_ptr; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_TILE_H_ diff --git a/mindspore/core/c_ops/binary_cross_entropy.cc b/mindspore/core/ops/to_format.cc similarity index 55% rename from mindspore/core/c_ops/binary_cross_entropy.cc rename to mindspore/core/ops/to_format.cc index fd28d92d820..39628ccf1a9 100644 --- a/mindspore/core/c_ops/binary_cross_entropy.cc +++ b/mindspore/core/ops/to_format.cc @@ -14,27 +14,35 @@ * limitations under the License. */ -#include "c_ops/binary_cross_entropy.h" +#include "ops/to_format.h" #include #include #include #include #include -#include "c_ops/op_utils.h" +#include "ops/op_utils.h" #include "utils/check_convert_utils.h" #include "abstract/primitive_infer_map.h" namespace mindspore { +namespace ops { -void BinaryCrossEntropy::set_reduction(const std::string &reduction) { - CheckAndConvertUtils::CheckString(kReduction, reduction, {"none", "mean", "sum"}, this->name()); - this->AddAttr(kReduction, MakeValue(reduction)); -} -std::string BinaryCrossEntropy::get_reduction() const { - auto value_ptr = GetAttr(kReduction); - return GetValue(value_ptr); +void ToFormat::set_src_t(const int64_t src_t) { this->AddAttr(kSrcT, MakeValue(src_t)); } +int64_t ToFormat::get_src_t() const { + auto value_ptr = GetAttr(kSrcT); + return GetValue(value_ptr); } -void BinaryCrossEntropy::Init(const std::string &reduction) { this->set_reduction(reduction); } -REGISTER_PRIMITIVE_C(kNameBinaryCrossEntropy, BinaryCrossEntropy); +void ToFormat::set_dst_t(const int64_t dst_t) { this->AddAttr(kDstT, MakeValue(dst_t)); } +int64_t ToFormat::get_dst_t() const { + auto value_ptr = GetAttr(kDstT); + return GetValue(value_ptr); +} + +void ToFormat::Init(const int64_t src_t, const int64_t dst_t) { + this->set_src_t(src_t); + this->set_dst_t(dst_t); +} +REGISTER_PRIMITIVE_C(kNameToFormat, ToFormat); +} // namespace ops } // namespace mindspore diff --git a/mindspore/core/ops/to_format.h b/mindspore/core/ops/to_format.h new file mode 100644 index 00000000000..3e438c168cd --- /dev/null +++ b/mindspore/core/ops/to_format.h @@ -0,0 +1,44 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_TOFORMAT_H_ +#define MINDSPORE_CORE_OPS_TOFORMAT_H_ +#include +#include +#include +#include +#include "ops/primitive_c.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameToFormat = "ToFormat"; +class ToFormat : public PrimitiveC { + public: + ToFormat() : PrimitiveC(kNameToFormat) {} + ~ToFormat() = default; + MS_DECLARE_PARENT(ToFormat, PrimitiveC); + void Init(const int64_t src_t, const int64_t dst_t); + void set_src_t(const int64_t src_t); + void set_dst_t(const int64_t dst_t); + int64_t get_src_t() const; + int64_t get_dst_t() const; +}; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_TOFORMAT_H_ diff --git a/mindspore/core/ops/topk.cc b/mindspore/core/ops/topk.cc new file mode 100644 index 00000000000..d7ec6df277e --- /dev/null +++ b/mindspore/core/ops/topk.cc @@ -0,0 +1,59 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include "ops/topk.h" +#include "ops/op_utils.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +void TopK::Init(const bool sorted) { this->set_sorted(sorted); } +void TopK::set_sorted(const bool sorted) { this->AddAttr(kSorted, MakeValue(sorted)); } + +bool TopK::get_sorted() const { + auto value_ptr = this->GetAttr(kSorted); + return GetValue(value_ptr); +} +AbstractBasePtr TopKInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(primitive); + auto top_prim = primitive->cast(); + MS_EXCEPTION_IF_NULL(top_prim); + auto prim_name = top_prim->name(); + CheckAndConvertUtils::CheckInteger("top_k_infer", input_args.size(), kEqual, 2, prim_name); + + // Infer dtype + auto output0_type = input_args[0]->BuildType()->cast()->element(); + auto output1_type = TypeIdToType(kNumberTypeInt32); + const std::set valid_types = {kNumberTypeFloat16, kNumberTypeFloat32}; + CheckAndConvertUtils::CheckTensorTypeValid("input_x", input_args[0]->BuildType(), valid_types, prim_name); + + // Infer shape + auto x_shape = CheckAndConvertUtils::ConvertShapePtrToShape("x_shape", input_args[0]->BuildShape(), prim_name); + auto k_v = GetValue(input_args[1]->BuildValue()); + auto ndims = x_shape.size() - 1; + x_shape[ndims] = k_v; + + auto output0 = std::make_shared(output0_type, x_shape); + auto output1 = std::make_shared(output1_type, x_shape); + AbstractBasePtrList output = {output0, output1}; + return std::make_shared(output); +} +REGISTER_PRIMITIVE_EVAL_IMPL(TopK, prim::kPrimTopK, TopKInfer); +REGISTER_PRIMITIVE_C(kNameTopK, TopK); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/topk.h b/mindspore/core/ops/topk.h new file mode 100644 index 00000000000..94a5cf8c126 --- /dev/null +++ b/mindspore/core/ops/topk.h @@ -0,0 +1,46 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_TOPK_H_ +#define MINDSPORE_CORE_OPS_TOPK_H_ +#include +#include +#include +#include "ops/primitive_c.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameTopK = "TopK"; +class TopK : public PrimitiveC { + public: + explicit TopK(const std::string &k_name = kNameTopK) : PrimitiveC(k_name) { + InitIOName({"input", "k"}, {"values", "indices"}); + } + ~TopK() = default; + MS_DECLARE_PARENT(TopK, PrimitiveC); + void Init(const bool sorted = false); + void set_sorted(const bool sorted); + bool get_sorted() const; +}; +AbstractBasePtr TopKInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); +using PrimTopKPtr = std::shared_ptr; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_TOPK_H_ diff --git a/mindspore/core/ops/transpose.cc b/mindspore/core/ops/transpose.cc new file mode 100644 index 00000000000..0ddc5ecae11 --- /dev/null +++ b/mindspore/core/ops/transpose.cc @@ -0,0 +1,26 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops/transpose.h" +#include "ops/op_utils.h" +#include "utils/check_convert_utils.h" +#include "abstract/primitive_infer_map.h" + +namespace mindspore { +namespace ops { +REGISTER_PRIMITIVE_C(kNameTranspose, Transpose); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/transpose.h b/mindspore/core/ops/transpose.h new file mode 100644 index 00000000000..da58b5d5161 --- /dev/null +++ b/mindspore/core/ops/transpose.h @@ -0,0 +1,36 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_TRANSPOSE_H_ +#define MINDSPORE_CORE_OPS_TRANSPOSE_H_ +#include "ops/primitive_c.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameTranspose = "Transpose"; +class Transpose : public PrimitiveC { + public: + Transpose() : PrimitiveC(kNameTranspose) { InitIOName({"x", "perm"}, {"output"}); } + ~Transpose() = default; + MS_DECLARE_PARENT(Transpose, PrimitiveC); + void Init() {} +}; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_TRANSPOSE_H_ diff --git a/mindspore/core/ops/tuple_get_item.cc b/mindspore/core/ops/tuple_get_item.cc new file mode 100644 index 00000000000..069dd6b675a --- /dev/null +++ b/mindspore/core/ops/tuple_get_item.cc @@ -0,0 +1,23 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops/tuple_get_item.h" + +namespace mindspore { +namespace ops { +REGISTER_PRIMITIVE_C(kNameTupleGetItem, TupleGetItem); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/tuple_get_item.h b/mindspore/core/ops/tuple_get_item.h new file mode 100644 index 00000000000..33f9c3125ca --- /dev/null +++ b/mindspore/core/ops/tuple_get_item.h @@ -0,0 +1,40 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_TUPLE_GET_ITEM_H_ +#define MINDSPORE_CORE_OPS_TUPLE_GET_ITEM_H_ +#include +#include +#include +#include +#include "ops/primitive_c.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameTupleGetItem = "TupleGetItem"; +class TupleGetItem : public PrimitiveC { + public: + TupleGetItem() : PrimitiveC(kNameTupleGetItem) {} + ~TupleGetItem() = default; + MS_DECLARE_PARENT(TupleGetItem, PrimitiveC); + void Init() {} +}; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_TUPLE_GET_ITEM_H_ diff --git a/mindspore/core/c_ops/assign_add.cc b/mindspore/core/ops/unique.cc similarity index 86% rename from mindspore/core/c_ops/assign_add.cc rename to mindspore/core/ops/unique.cc index 0810b2cc855..d51a1cdeba6 100644 --- a/mindspore/core/c_ops/assign_add.cc +++ b/mindspore/core/ops/unique.cc @@ -14,8 +14,10 @@ * limitations under the License. */ -#include "c_ops/assign_add.h" +#include "ops/unique.h" namespace mindspore { -REGISTER_PRIMITIVE_C(kNameAssignAdd, AssignAdd); +namespace ops { +REGISTER_PRIMITIVE_C(kNameUnique, Unique); +} // namespace ops } // namespace mindspore diff --git a/mindspore/core/ops/unique.h b/mindspore/core/ops/unique.h new file mode 100644 index 00000000000..d0d797c0611 --- /dev/null +++ b/mindspore/core/ops/unique.h @@ -0,0 +1,36 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_UNIQUE_H_ +#define MINDSPORE_CORE_OPS_UNIQUE_H_ +#include "ops/primitive_c.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameUnique = "Unique"; +class Unique : public PrimitiveC { + public: + Unique() : PrimitiveC(kNameUnique) { InitIOName({"x", "y"}, {"output"}); } + ~Unique() = default; + MS_DECLARE_PARENT(Unique, PrimitiveC); + void Init() {} +}; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_UNIQUE_H_ diff --git a/mindspore/core/ops/unpack.cc b/mindspore/core/ops/unpack.cc new file mode 100644 index 00000000000..d1bf4f1e798 --- /dev/null +++ b/mindspore/core/ops/unpack.cc @@ -0,0 +1,64 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops/unpack.h" + +namespace mindspore { +namespace ops { + +void Unpack::Init(const int64_t axis) { this->set_axis(axis); } +void Unpack::set_axis(const int64_t axis) { AddAttr(kAxis, MakeValue(axis)); } +int64_t Unpack::get_axis() const { + auto value_ptr = this->GetAttr(kAxis); + return GetValue(value_ptr); +} + +AbstractBasePtr UnpackInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(primitive); + auto unpack_prim = primitive->cast(); + MS_EXCEPTION_IF_NULL(unpack_prim); + auto prim_name = unpack_prim->name(); + CheckAndConvertUtils::CheckSubClass("x", input_args[0]->BuildType(), {TypeIdToType(kObjectTypeTensorType)}, + prim_name); + auto x_shape = CheckAndConvertUtils::ConvertShapePtrToShape("x_shape", input_args[0]->BuildShape(), prim_name); + int64_t dim = x_shape.size(); + int64_t axis = unpack_prim->get_axis(); + // CheckAndConvertUtils::CheckInRange("axis value", axis, kIncludeLeft, {-dim, dim}, prim_name); + if (axis < 0) { + axis = axis + dim; + } + auto output_num = x_shape[axis]; + CheckAndConvertUtils::CheckInteger("output_num", output_num, kGreaterThan, 0, prim_name); + auto output_valid_check = x_shape[axis] - output_num; + CheckAndConvertUtils::CheckInteger("The dimension which to unpack divides output_num", output_valid_check, kEqual, 0, + prim_name); + std::vector infer_shape(x_shape.begin(), x_shape.begin() + axis); + infer_shape.insert(infer_shape.end(), x_shape.begin() + axis + 1, x_shape.end()); + AbstractBasePtrList output; + auto tensor_type = input_args[0]->BuildType()->cast(); + MS_EXCEPTION_IF_NULL(tensor_type); + auto element = tensor_type->element(); + for (int64_t i = 0; i != output_num; i++) { + output.push_back(std::make_shared(element, infer_shape)); + } + return std::make_shared(output); +} + +REGISTER_PRIMITIVE_EVAL_IMPL(Unpack, prim::kPrimUnpack, UnpackInfer); +REGISTER_PRIMITIVE_C(kNameUnpack, Unpack); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/unpack.h b/mindspore/core/ops/unpack.h new file mode 100644 index 00000000000..ee53d711e60 --- /dev/null +++ b/mindspore/core/ops/unpack.h @@ -0,0 +1,49 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_UNPACK_H_ +#define MINDSPORE_CORE_OPS_UNPACK_H_ + +#include +#include +#include +#include +#include +#include "ops/op_utils.h" +#include "ops/primitive_c.h" +#include "abstract/primitive_infer_map.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameUnpack = "Unpack"; +class Unpack : public PrimitiveC { + public: + Unpack() : PrimitiveC(kNameUnpack) {} + ~Unpack() = default; + MS_DECLARE_PARENT(Unpack, PrimitiveC); + void Init(const int64_t axis = 0); + void set_axis(const int64_t axis); + int64_t get_axis() const; +}; +AbstractBasePtr UnpackInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); +using PrimUnpackPtr = std::shared_ptr; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_UNPACK_H_ diff --git a/mindspore/core/ops/unsorted_segment_sum.cc b/mindspore/core/ops/unsorted_segment_sum.cc new file mode 100644 index 00000000000..d0c5d63be2b --- /dev/null +++ b/mindspore/core/ops/unsorted_segment_sum.cc @@ -0,0 +1,86 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "ops/unsorted_segment_sum.h" +#include +#include +#include +#include +#include +#include "ops/op_utils.h" +#include "utils/check_convert_utils.h" +#include "abstract/primitive_infer_map.h" + +namespace mindspore { +namespace ops { +AbstractBasePtr UnsortedSegmentSumInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(primitive); + auto unsortedsegmentsum_prim = primitive->cast(); + MS_EXCEPTION_IF_NULL(unsortedsegmentsum_prim); + auto prim_name = unsortedsegmentsum_prim->name(); + + // Infer type + auto x_type = input_args[0]->BuildType()->cast()->element(); + auto num_segments_type = input_args[2]->BuildType(); + auto num_segments_v = 4; + std::set valid_x_type = {TypeIdToType(kObjectTypeTensorType)}; + CheckAndConvertUtils::CheckSubClass("input_x", input_args[0]->BuildType(), valid_x_type, prim_name); + std::set valid_segment_ids_type = {TypeIdToType(kObjectTypeTensorType)}; + CheckAndConvertUtils::CheckSubClass("segment_ids", input_args[1]->BuildType(), valid_segment_ids_type, prim_name); + + // Infer shape + auto x_shape = CheckAndConvertUtils::ConvertShapePtrToShape("x_shape", input_args[0]->BuildShape(), prim_name); + CheckAndConvertUtils::CheckInteger("x_shape", x_shape.size(), kGreaterThan, 0, prim_name); + auto shp = x_shape; + auto segment_ids_shape = + CheckAndConvertUtils::ConvertShapePtrToShape("x_shape", input_args[1]->BuildShape(), prim_name); + CheckAndConvertUtils::CheckInteger("segment_ids_shape", segment_ids_shape.size(), kGreaterThan, 0, prim_name); + CheckAndConvertUtils::Check("input_x", x_shape.size(), kGreaterEqual, "segment_ids_shape", segment_ids_shape.size(), + prim_name); + + if ((x_shape.end() != find(x_shape.begin(), x_shape.end(), -1)) && + (segment_ids_shape.end() != find(segment_ids_shape.begin(), segment_ids_shape.end(), -1))) { + int64_t size = segment_ids_shape.size(); + for (int64_t i = 0; i < size; ++i) { + CheckAndConvertUtils::Check("segment_ids_shp", segment_ids_shape[i], kEqual, "x_shape", x_shape[i], prim_name); + } + } + + const std::set valid_segments_types = {TypeIdToType(kObjectTypeTensorType)}; + for (const auto &valid_segments_type : valid_segments_types) { + if (IsIdentidityOrSubclass(num_segments_type, valid_segments_type)) { + const std::set valid_num_segments_types = {kNumberTypeInt32, kNumberTypeInt64}; + CheckAndConvertUtils::CheckTensorTypeValid("num_segments", input_args[2]->BuildType(), valid_num_segments_types, + prim_name); + shp = {-1}; + } else { + CheckAndConvertUtils::CheckInteger("num_segments", num_segments_v, kGreaterThan, 0, prim_name); + shp = {num_segments_v}; + } + } + + int64_t size_segment_ids_shp = segment_ids_shape.size(); + int64_t size_x_shpe = x_shape.size(); + for (int64_t i = size_segment_ids_shp; i < size_x_shpe; ++i) { + shp.emplace_back(x_shape[i]); + } + + return std::make_shared(x_type, shp); +} +REGISTER_PRIMITIVE_EVAL_IMPL(UnsortedSegmentSum, prim::kPrimUnsortedSegmentSum, UnsortedSegmentSumInfer); +REGISTER_PRIMITIVE_C(kNameUnsortedSegmentSum, UnsortedSegmentSum); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/unsorted_segment_sum.h b/mindspore/core/ops/unsorted_segment_sum.h new file mode 100644 index 00000000000..986c3b730cb --- /dev/null +++ b/mindspore/core/ops/unsorted_segment_sum.h @@ -0,0 +1,47 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_UNSORTED_SEGMENT_SUM_H_ +#define MINDSPORE_CORE_OPS_UNSORTED_SEGMENT_SUM_H_ + +#include +#include +#include +#include +#include "ops/primitive_c.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameUnsortedSegmentSum = "UnsortedSegmentSum"; +class UnsortedSegmentSum : public PrimitiveC { + public: + UnsortedSegmentSum() : PrimitiveC(kNameUnsortedSegmentSum) { + InitIOName({"x", "segment_ids", "num_segments"}, {"y"}); + } + ~UnsortedSegmentSum() = default; + MS_DECLARE_PARENT(UnsortedSegmentSum, PrimitiveC); + void Init() {} +}; + +AbstractBasePtr UnsortedSegmentSumInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); +using PrimUnsortedSegmentSumPtr = std::shared_ptr; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_UNSORTED_SEGMENT_SUM_H_ diff --git a/mindspore/core/ops/unsqueeze.cc b/mindspore/core/ops/unsqueeze.cc new file mode 100644 index 00000000000..3168651ac0c --- /dev/null +++ b/mindspore/core/ops/unsqueeze.cc @@ -0,0 +1,75 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops/unsqueeze.h" +#include "utils/check_convert_utils.h" +#include "abstract/primitive_infer_map.h" +#include "ops/op_utils.h" + +namespace mindspore { +namespace ops { +void Unsqueeze::Init(const std::vector axis) { this->set_axis(axis); } + +void Unsqueeze::set_axis(std::vector axis) { this->AddAttr(kAxis, MakeValue(axis)); } + +std::vector Unsqueeze::get_axis() const { + auto value_ptr = this->GetAttr(kAxis); + return GetValue>(value_ptr); +} +AbstractBasePtr UnsqueezeInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(primitive); + auto unsqueeze_prim = primitive->cast(); + MS_EXCEPTION_IF_NULL(unsqueeze_prim); + auto prim_name = unsqueeze_prim->name(); + CheckAndConvertUtils::CheckInteger("unsqueeze_infer", input_args.size(), kEqual, 1, prim_name); + MS_EXCEPTION_IF_NULL(input_args[0]); + auto input = input_args[0]; + + // Infer type + auto input_type = input->BuildType()->cast()->element(); + + // Infer shape + auto dims = unsqueeze_prim->get_axis(); + auto input_shape = CheckAndConvertUtils::ConvertShapePtrToShape("input", input->BuildShape(), prim_name); + auto input_rank = input_shape.size(); + auto dim_rank = dims.size(); + std::vector out_shape; + if (dim_rank == 0) { + std::copy_if(input_shape.begin(), input_shape.end(), out_shape.begin(), [](const auto item) { return item == 1; }); + } else { + auto sz = input_rank + dim_rank; + size_t in_itr = 0; + size_t ax_itr = 0; + for (size_t i = 0; i < sz; i++) { + if (ax_itr < dim_rank && dims[ax_itr] == (int64_t)i) { + out_shape.emplace_back(1); + ax_itr++; + } else if (ax_itr < dim_rank && dims[ax_itr] + sz == i) { + out_shape.emplace_back(1); + ax_itr++; + } else { + out_shape.emplace_back(input_shape[in_itr]); + in_itr++; + } + } + } + return std::make_shared(input_type, out_shape); +} +REGISTER_PRIMITIVE_EVAL_IMPL(Unsqueeze, prim::kPrimUnsqueeze, UnsqueezeInfer); +REGISTER_PRIMITIVE_C(kNameUnsqueeze, Unsqueeze); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/unsqueeze.h b/mindspore/core/ops/unsqueeze.h new file mode 100644 index 00000000000..a207d51db2b --- /dev/null +++ b/mindspore/core/ops/unsqueeze.h @@ -0,0 +1,45 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_UNSQUEEZE_H_ +#define MINDSPORE_CORE_OPS_UNSQUEEZE_H_ + +#include +#include + +#include "ops/primitive_c.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameUnsqueeze = "Unsqueeze"; +class Unsqueeze : public PrimitiveC { + public: + Unsqueeze() : PrimitiveC(kNameUnsqueeze) {} + ~Unsqueeze() = default; + MS_DECLARE_PARENT(Unsqueeze, PrimitiveC); + void Init(const std::vector axis); + void set_axis(const std::vector axis); + std::vector get_axis() const; +}; +AbstractBasePtr UnsqueezeInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); +using PrimUnsqueezePtr = std::shared_ptr; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_UNSQUEEZE_H_ diff --git a/mindspore/core/ops/unstack.cc b/mindspore/core/ops/unstack.cc new file mode 100644 index 00000000000..050174f0beb --- /dev/null +++ b/mindspore/core/ops/unstack.cc @@ -0,0 +1,63 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops/unstack.h" + +namespace mindspore { +namespace ops { + +void Unstack::Init(const int64_t axis) { this->set_axis(axis); } +void Unstack::set_axis(const int64_t axis) { AddAttr(kAxis, MakeValue(axis)); } +int64_t Unstack::get_axis() const { + auto value_ptr = this->GetAttr(kAxis); + return GetValue(value_ptr); +} +AbstractBasePtr UnstackInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(primitive); + auto unstack_prim = primitive->cast(); + MS_EXCEPTION_IF_NULL(unstack_prim); + auto prim_name = unstack_prim->name(); + CheckAndConvertUtils::CheckSubClass("x", input_args[0]->BuildType(), {TypeIdToType(kObjectTypeTensorType)}, + prim_name); + auto x_shape = CheckAndConvertUtils::ConvertShapePtrToShape("x_shape", input_args[0]->BuildShape(), prim_name); + int64_t dim = x_shape.size(); + int64_t axis = unstack_prim->get_axis(); + // CheckAndConvertUtils::CheckInRange("axis value", axis, kIncludeLeft, {-dim, dim}, prim_name); + if (axis < 0) { + axis = axis + dim; + } + auto output_num = x_shape[axis]; + CheckAndConvertUtils::CheckInteger("output_num", output_num, kGreaterThan, 0, prim_name); + auto output_valid_check = x_shape[axis] - output_num; + CheckAndConvertUtils::CheckInteger("The dimension which to unstack divides output_num", output_valid_check, kEqual, 0, + prim_name); + std::vector infer_shape(x_shape.begin(), x_shape.begin() + axis); + infer_shape.insert(infer_shape.end(), x_shape.begin() + axis + 1, x_shape.end()); + AbstractBasePtrList output; + auto tensor_type = input_args[0]->BuildType()->cast(); + MS_EXCEPTION_IF_NULL(tensor_type); + auto element = tensor_type->element(); + for (int64_t i = 0; i != output_num; i++) { + output.push_back(std::make_shared(element, infer_shape)); + } + return std::make_shared(output); +} + +REGISTER_PRIMITIVE_EVAL_IMPL(Unstack, prim::kPrimUnstack, UnstackInfer); +REGISTER_PRIMITIVE_C(kNameUnstack, Unstack); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/unstack.h b/mindspore/core/ops/unstack.h new file mode 100644 index 00000000000..3657f5dc7dd --- /dev/null +++ b/mindspore/core/ops/unstack.h @@ -0,0 +1,50 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_UNSTACK_H_ +#define MINDSPORE_CORE_OPS_UNSTACK_H_ + +#include +#include +#include +#include +#include +#include "ops/op_utils.h" +#include "ops/primitive_c.h" +#include "abstract/primitive_infer_map.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameUnstack = "Unstack"; +class Unstack : public PrimitiveC { + public: + Unstack() : PrimitiveC(kNameUnstack) {} + ~Unstack() = default; + MS_DECLARE_PARENT(Unstack, PrimitiveC); + void Init(const int64_t axis = 0); + void set_axis(const int64_t axis); + int64_t get_axis() const; +}; + +AbstractBasePtr UnstackInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); +using PrimUnstackPtr = std::shared_ptr; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_UNSTACK_H_ diff --git a/mindspore/core/ops/where.cc b/mindspore/core/ops/where.cc new file mode 100644 index 00000000000..57b19d430e6 --- /dev/null +++ b/mindspore/core/ops/where.cc @@ -0,0 +1,80 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include "ops/where.h" +#include "utils/check_convert_utils.h" +#include "abstract/primitive_infer_map.h" +#include "ops/op_utils.h" + +namespace mindspore { +namespace ops { + +AbstractBasePtr WhereInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(primitive); + auto Where_prim = primitive->cast(); + MS_EXCEPTION_IF_NULL(Where_prim); + for (auto input : input_args) { + MS_EXCEPTION_IF_NULL(input); + } + + if (input_args.size() < 3) { + MS_LOG(ERROR) << "Input shape tensors should b"; + } + auto op_name = Where_prim->name(); + CheckAndConvertUtils::CheckInteger("input numbers", input_args.size(), kGreaterEqual, 3, op_name); + auto input0_type_ = input_args[0]->BuildType()->cast(); + MS_EXCEPTION_IF_NULL(input0_type_); + auto input0_type = input0_type_->element(); + auto input0_shape = + CheckAndConvertUtils::ConvertShapePtrToShape("input0_shape", input_args[0]->BuildShape(), op_name); + auto num = input_args[0]->BuildValue()->cast()->ElementsNum(); + auto input1_shape = + CheckAndConvertUtils::ConvertShapePtrToShape("input1_shape", input_args[1]->BuildShape(), op_name); + auto num1 = input_args[1]->BuildValue()->cast()->ElementsNum(); + auto input2_shape = + CheckAndConvertUtils::ConvertShapePtrToShape("input2_shape", input_args[2]->BuildShape(), op_name); + auto num2 = input_args[2]->BuildValue()->cast()->ElementsNum(); + int64_t nummax = num > num1 ? num : (num1 > num2 ? num1 : num2); + int64_t axisout = 0; + int64_t temp = 0; + for (int64_t j = 0; j < (int64_t)input0_shape.size(); j++) { + if (input0_shape[j] == input1_shape[j] && input0_shape[j] != input2_shape[j]) { + axisout = j; + break; + } + if (input0_shape[j] == input2_shape[j] && input0_shape[j] != input1_shape[j]) { + axisout = j; + break; + } + if (input1_shape[j] != input2_shape[j] && input0_shape[j] == input1_shape[j]) { + axisout = j; + break; + } + temp += 1; + if (temp == (int64_t)input0_shape.size()) { + return std::make_shared(input0_type, input0_shape); + } + } + input0_shape[axisout] = nummax; + return std::make_shared(input0_type, input0_shape); +} + +REGISTER_PRIMITIVE_EVAL_IMPL(Where, prim::kPrimWhere, WhereInfer); +REGISTER_PRIMITIVE_C(kNameWhere, Where); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/where.h b/mindspore/core/ops/where.h new file mode 100644 index 00000000000..5a0b8c37a88 --- /dev/null +++ b/mindspore/core/ops/where.h @@ -0,0 +1,43 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_WHERE_H_ +#define MINDSPORE_CORE_OPS_WHERE_H_ +#include +#include + +#include "ops/primitive_c.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameWhere = "Where"; +class Where : public PrimitiveC { + public: + Where() : PrimitiveC(kNameWhere) { InitIOName({"condition"}, {"output"}); } + ~Where() = default; + MS_DECLARE_PARENT(Where, PrimitiveC); + void Init() {} +}; + +AbstractBasePtr WhereInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); +using PrimWherePtr = std::shared_ptr; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_WHERE_H_ diff --git a/mindspore/core/ops/while.cc b/mindspore/core/ops/while.cc new file mode 100644 index 00000000000..91f308d3b14 --- /dev/null +++ b/mindspore/core/ops/while.cc @@ -0,0 +1,65 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include "ops/while.h" +#include "ops/op_utils.h" +#include "utils/check_convert_utils.h" +#include "abstract/primitive_infer_map.h" + +namespace mindspore { +namespace ops { +void While::Init(const int64_t cond_subgraph_index, const int64_t body_subgraph_index) { + this->set_cond_subgraph_index(cond_subgraph_index); + this->set_body_subgraph_index(body_subgraph_index); +} + +void While::set_cond_subgraph_index(const int64_t cond_subgraph_index) { + this->AddAttr(kCondSubgraphIndex, MakeValue(cond_subgraph_index)); +} + +int64_t While::get_cond_subgraph_index() const { + auto value_ptr = this->GetAttr(kCondSubgraphIndex); + return GetValue(value_ptr); +} + +void While::set_body_subgraph_index(const int64_t body_subgraph_index) { + this->AddAttr(kBodySubgraphIndex, MakeValue(body_subgraph_index)); +} + +int64_t While::get_body_subgraph_index() const { + auto value_ptr = this->GetAttr(kBodySubgraphIndex); + return GetValue(value_ptr); +} +AbstractBasePtr WhileInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(primitive); + auto While_prim = primitive->cast(); + MS_EXCEPTION_IF_NULL(While_prim); + auto op_name = While_prim->name(); + AbstractBasePtrList output; + for (int64_t i = 0; i < (int64_t)input_args.size(); i++) { + auto shape = CheckAndConvertUtils::ConvertShapePtrToShape("input_shape" + std::to_string(i), + input_args[i]->BuildShape(), op_name); + output.push_back(std::make_shared(input_args[i]->BuildType(), shape)); + } + return std::make_shared(output); +} + +REGISTER_PRIMITIVE_EVAL_IMPL(While, prim::kPrimWhile, WhileInfer); +REGISTER_PRIMITIVE_C(kNameWhile, While); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/while.h b/mindspore/core/ops/while.h new file mode 100644 index 00000000000..913e8d16d43 --- /dev/null +++ b/mindspore/core/ops/while.h @@ -0,0 +1,47 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_WHILE_H_ +#define MINDSPORE_CORE_OPS_WHILE_H_ +#include +#include + +#include "ops/primitive_c.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameWhile = "While"; +class While : public PrimitiveC { + public: + While() : PrimitiveC(kNameWhile) {} + ~While() = default; + MS_DECLARE_PARENT(While, PrimitiveC); + void Init(const int64_t cond_subgraph_index, const int64_t body_subgraph_index); + void set_cond_subgraph_index(const int64_t cond_subgraph_index); + void set_body_subgraph_index(const int64_t body_subgraph_index); + int64_t get_cond_subgraph_index() const; + int64_t get_body_subgraph_index() const; +}; + +AbstractBasePtr WhileInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); +using PrimWhilePtr = std::shared_ptr; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_WHILE_H_ diff --git a/mindspore/core/ops/zeros_like.cc b/mindspore/core/ops/zeros_like.cc new file mode 100644 index 00000000000..5bfe3e106fa --- /dev/null +++ b/mindspore/core/ops/zeros_like.cc @@ -0,0 +1,66 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include +#include + +#include "ops/zeros_like.h" +#include "ops/op_utils.h" +#include "utils/check_convert_utils.h" +#include "abstract/primitive_infer_map.h" + +namespace mindspore { +namespace ops { +namespace { +abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(primitive); + auto zeroslike_prim = primitive->cast(); + MS_EXCEPTION_IF_NULL(zeroslike_prim); + auto prim_name = zeroslike_prim->name(); + CheckAndConvertUtils::CheckInteger("input number", input_args.size(), kEqual, 1, prim_name); + for (const auto &item : input_args) { + MS_EXCEPTION_IF_NULL(item); + } + auto in_shape = CheckAndConvertUtils::ConvertShapePtrToShape("x_shape", input_args[0]->BuildShape(), prim_name); + return std::make_shared(in_shape); +} + +TypePtr InferType(const PrimitivePtr &prim, const std::vector &input_args) { + std::set tmp(common_valid_types); + tmp.insert(kNumberTypeBool); + const std::set valid_types(tmp); + if (std::any_of(input_args.begin(), input_args.end(), [](AbstractBasePtr a) { return a == nullptr; })) { + MS_LOG(EXCEPTION) << "nullptr"; + } + std::map types; + types.emplace("x", input_args[0]->BuildType()); + auto infer_type = CheckAndConvertUtils::CheckTensorTypeSame(types, valid_types, prim->name()); + return TypeIdToType(infer_type); +} +} // namespace + +AbstractBasePtr ZerosLikeInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args) { + return std::make_shared(InferType(primitive, input_args), + InferShape(primitive, input_args)->shape()); +} +REGISTER_PRIMITIVE_EVAL_IMPL(ZerosLike, prim::kPrimZerosLike, ZerosLikeInfer); +REGISTER_PRIMITIVE_C(kNameZerosLike, ZerosLike); +} // namespace ops +} // namespace mindspore diff --git a/mindspore/core/ops/zeros_like.h b/mindspore/core/ops/zeros_like.h new file mode 100644 index 00000000000..dd45d45f370 --- /dev/null +++ b/mindspore/core/ops/zeros_like.h @@ -0,0 +1,42 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_OPS_ZEROSLIKE_H_ +#define MINDSPORE_CORE_OPS_ZEROSLIKE_H_ +#include +#include + +#include "ops/primitive_c.h" +#include "abstract/abstract_value.h" +#include "utils/check_convert_utils.h" + +namespace mindspore { +namespace ops { +constexpr auto kNameZerosLike = "ZerosLike"; +class ZerosLike : public PrimitiveC { + public: + ZerosLike() : PrimitiveC(kNameZerosLike) { InitIOName({"x"}, {"y"}); } + ~ZerosLike() = default; + MS_DECLARE_PARENT(ZerosLike, PrimitiveC); + void Init() {} +}; +AbstractBasePtr ZerosLikeInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, + const std::vector &input_args); +using PrimZerosLike = std::shared_ptr; +} // namespace ops +} // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_ZEROSLIKE_H_ diff --git a/mindspore/core/utils/check_convert_utils.cc b/mindspore/core/utils/check_convert_utils.cc index a4de577607e..5be1af7f83e 100644 --- a/mindspore/core/utils/check_convert_utils.cc +++ b/mindspore/core/utils/check_convert_utils.cc @@ -14,40 +14,233 @@ * limitations under the License. */ -#include "utils/check_convert_utils.h" #include +#include +#include +#include +#include +#include "utils/check_convert_utils.h" #include "abstract/abstract_value.h" +#include "ir/dtype/type.h" +#include "ir/dtype/tensor_type.h" +#include "ir/dtype.h" namespace mindspore { +static std::map DataFormatToEnumMap = { + {"NCHW", Format::NCHW}, {"NHWC", Format::NHWC}, {"NHWC4", Format::NHWC4}, + {"HWKC", Format::HWKC}, {"HWCK", Format::HWCK}, {"KCHW", Format::KCHW}, + {"CKHW", Format::CKHW}, {"KHWC", Format::KHWC}, {"CHWK", Format::CHWK}, + {"HW", Format::HW}, {"HW4", Format::HW4}, {"NC", Format::NC}, + {"NC4", Format::NC4}, {"NC4HW4", Format::NC4HW4}, {"NUM_OF_FORMAT", Format::NUM_OF_FORMAT}, +}; + +static std::map DataFormatToStrMap = { + {Format::NCHW, "NCHW"}, {Format::NHWC, "NHWC"}, {Format::NHWC4, "NHWC4"}, + {Format::HWKC, "HWKC"}, {Format::HWCK, "HWCK"}, {Format::KCHW, "KCHW"}, + {Format::CKHW, "CKHW"}, {Format::KHWC, "KHWC"}, {Format::CHWK, "CHWK"}, + {Format::HW, "HW"}, {Format::HW4, "HW4"}, {Format::NC, "NC"}, + {Format::NC4, "NC4"}, {Format::NC4HW4, "NC4HW4"}, {Format::NUM_OF_FORMAT, "NUM_OF_FORMAT"}, +}; + +static std::map ReductionToEnumMap = { + {"sum", Reduction::REDUCTION_SUM}, + {"mean", Reduction::MEAN}, + {"none", Reduction::NONE}, +}; + +static std::map ReductionToStrMap = { + {Reduction::REDUCTION_SUM, "sum"}, + {Reduction::MEAN, "mean"}, + {Reduction::NONE, "none"}, +}; + +static std::map PadModToEnumMap = { + {"pad", PadMode::PAD}, + {"same", PadMode::SAME}, + {"valid", PadMode::VALID}, +}; + +static std::map PadModToStrMap = { + {PadMode::PAD, "pad"}, + {PadMode::SAME, "same"}, + {PadMode::VALID, "valid"}, +}; + +static std::map PadModToEnumUpperMap = { + {"PAD", PadMode::PAD}, + {"SAME", PadMode::SAME}, + {"VALID", PadMode::VALID}, +}; + +static std::map PadModToStrUpperMap = { + {PadMode::PAD, "PAD"}, + {PadMode::SAME, "SAME"}, + {PadMode::VALID, "VALID"}, +}; + +AttrConverterPair DataFormatConverter(DataFormatToEnumMap, DataFormatToStrMap); +AttrConverterPair PadModeConverter(PadModToEnumMap, PadModToStrMap); +AttrConverterPair PadModeUpperConverter(PadModToEnumUpperMap, PadModToStrUpperMap); +AttrConverterPair ReductionConverter(ReductionToEnumMap, ReductionToStrMap); + +static std::map FormatAndPadAttrMap = { + {"format", DataFormatConverter}, + {"pad_mode", PadModeConverter}, +}; + +static std::map FormatAndPadUpperAttrMap = { + {"format", DataFormatConverter}, + {"pad_mode", PadModeUpperConverter}, +}; + +static std::map DataFormatMap = { + {"format", DataFormatConverter}, +}; + +static std::map ReductionMap = { + {"reduction", ReductionConverter}, +}; + +static std::map> PrimAttrConvertMap = { + {"Conv2D", FormatAndPadAttrMap}, + {"Conv2DBackpropInput", FormatAndPadUpperAttrMap}, + {"Conv2DBackpropFilter", FormatAndPadUpperAttrMap}, + {"Conv3D", FormatAndPadAttrMap}, + {"Conv3DBackpropInput", FormatAndPadAttrMap}, + {"Conv3DBackpropFilter", FormatAndPadAttrMap}, + {"Conv3DTranspose", DataFormatMap}, + {"DepthwiseConv2dNative", FormatAndPadAttrMap}, + {"DepthwiseConv2dNativeBackpropInput", FormatAndPadAttrMap}, + {"DepthwiseConv2dNativeBackpropFilter", FormatAndPadAttrMap}, + {"AvgPool", FormatAndPadUpperAttrMap}, + {"MaxPool", FormatAndPadUpperAttrMap}, + {"MaxPoolWithArgmax", FormatAndPadUpperAttrMap}, + {"AvgPoolGrad", FormatAndPadUpperAttrMap}, + {"AvgPoolGradVm", FormatAndPadUpperAttrMap}, + {"AvgPoolGradGpu", FormatAndPadUpperAttrMap}, + {"AvgPoolGradCpu", FormatAndPadUpperAttrMap}, + {"MaxPoolGrad", FormatAndPadUpperAttrMap}, + {"MaxPoolGradGrad", FormatAndPadUpperAttrMap}, + {"MaxPoolGradWithArgmax", FormatAndPadUpperAttrMap}, + {"MaxPoolGradGradWithArgmax", FormatAndPadUpperAttrMap}, + {"BatchNorm", DataFormatMap}, + {"BatchNormGrad", DataFormatMap}, + {"FusedBatchNormEx", DataFormatMap}, + {"FusedBatchNormGradEx", DataFormatMap}, + {"BiasAdd", DataFormatMap}, + {"BiasAddGrad", DataFormatMap}, + {"BinaryCrossEntropy", ReductionMap}, + {"BinaryCrossEntropyGrad", ReductionMap}, + {"NLLLoss", ReductionMap}, +}; + +int64_t CheckAndConvertUtils::GetDataFormatEnumValue(const std::string &value) { + if (DataFormatToEnumMap.find(value) == DataFormatToEnumMap.end()) { + MS_LOG(ERROR) << "Can not convert data format " << value << "to enum"; + } + return DataFormatToEnumMap[value]; +} + +int64_t CheckAndConvertUtils::GetPadModEnumValue(const std::string &value, bool is_upper) { + std::map pad_map = PadModToEnumMap; + if (is_upper) { + pad_map = PadModToEnumUpperMap; + } + if (pad_map.find(value) == pad_map.end()) { + MS_LOG(ERROR) << "Can not convert pad mode " << value << "to enum"; + } + return pad_map[value]; +} + +AttrConverterPair CheckAndConvertUtils::GetAttrConvertPair(const std::string &op_type, const std::string &attr_name) { + AttrConverterPair attr_pair; + if (op_type.empty() || attr_name.empty()) { + return attr_pair; + } + auto op_attr_map_it = PrimAttrConvertMap.find(op_type); + if (op_attr_map_it == PrimAttrConvertMap.end()) { + return attr_pair; + } + auto op_attr_map = op_attr_map_it->second; + auto attr_pair_it = op_attr_map.find(attr_name); + if (attr_pair_it == op_attr_map.end()) { + return attr_pair; + } + + return attr_pair_it->second; +} + +bool CheckAndConvertUtils::ConvertAttrValueToInt(const std::string &op_type, const std::string &attr_name, + ValuePtr *const value) { + if (value == nullptr) { + MS_LOG(ERROR) << "value is nullptr"; + return false; + } + if (!(*value)->isa()) { + return false; + } + auto attr_map_pair = GetAttrConvertPair(op_type, attr_name); + if (attr_map_pair.first.size() == 0) { + return false; + } + + std::string real_value = std::dynamic_pointer_cast(*value)->value(); + bool do_convert = false; + if (attr_map_pair.first.find(real_value) != attr_map_pair.first.end()) { + do_convert = true; + } + if (!do_convert) { + transform(real_value.begin(), real_value.end(), real_value.begin(), ::toupper); + if (attr_map_pair.first.find(real_value) == attr_map_pair.first.end()) { + MS_LOG(DEBUG) << "Can not convert " << op_type << " attr " << attr_name << ": " << real_value << " to int"; + return false; + } + } + + *value = MakeValue(attr_map_pair.first[real_value]); + MS_LOG(DEBUG) << "convert str to int, name: " << op_type << ", attr: " << attr_name; + return true; +} + +bool CheckAndConvertUtils::ConvertAttrValueToString(const std::string &op_type, const std::string &attr_name, + ValuePtr *const value) { + if (value == nullptr) { + MS_LOG(ERROR) << "value is nullptr"; + return false; + } + if (!(*value)->isa()) { + return false; + } + auto attr_map_pair = GetAttrConvertPair(op_type, attr_name); + if (attr_map_pair.second.size() == 0) { + return false; + } + + int64_t real_value = std::dynamic_pointer_cast(*value)->value(); + if (attr_map_pair.second.find(real_value) == attr_map_pair.second.end()) { + MS_LOG(DEBUG) << "Can not convert " << op_type << " attr " << attr_name << ": " << real_value << " to string"; + return false; + } + *value = MakeValue(attr_map_pair.second[real_value]); + MS_LOG(DEBUG) << "convert int to str, name: " << op_type << ", attr: " << attr_name; + return true; +} + namespace { -const std::map> kCompareMap = { - {kEqual, [](int num1, int num2) -> bool { return num1 == num2; }}, - {kNotEqual, [](int num1, int num2) -> bool { return num1 != num2; }}, - {kLessThan, [](int num1, int num2) -> bool { return num1 < num2; }}, - {kLessEqual, [](int num1, int num2) -> bool { return num1 <= num2; }}, - {kGreaterThan, [](int num1, int num2) -> bool { return num1 > num2; }}, - {kGreaterEqual, [](int num1, int num2) -> bool { return num1 >= num2; }}}; +typedef std::map> AttrFunction; -const std::map)>> kCompareRangeMap = { - {kIncludeNeither, - [](int num1, std::pair range) -> bool { return num1 > range.first && num1 < range.second; }}, - {kIncludeLeft, - [](int num1, std::pair range) -> bool { return num1 >= range.first && num1 < range.second; }}, - {kIncludeRight, - [](int num1, std::pair range) -> bool { return num1 > range.first && num1 <= range.second; }}, - {kIncludeBoth, - [](int num1, std::pair range) -> bool { return num1 >= range.first && num1 <= range.second; }}}; +ValuePtr L2NormalizeAttrConversion(ValuePtr attr) { + if (attr->isa()) { + return attr; + } + auto attr_value = GetValue>(attr); + return MakeValue(attr_value[0]); +} -const std::map kCompareToString = { - {kEqual, "equal "}, {kNotEqual, "not equal "}, {kLessThan, "less than "}, - {kLessEqual, "less eqaul "}, {kGreaterThan, "greater than "}, {kGreaterEqual, "greate equal "}}; - -const std::map> kCompareRangeToString = { - {kIncludeNeither, {"in (", ")"}}, - {kIncludeLeft, {" in [", ")"}}, - {kIncludeRight, {"in (", "]"}}, - {kIncludeBoth, {"in [", "]"}}}; +std::map kIrAttrToOpAttr = {{"L2Normalize", {{"axis", L2NormalizeAttrConversion}}}, + {"L2NormalizeGrad", {{"axis", L2NormalizeAttrConversion}}}}; } // namespace + bool CheckAndConvertUtils::IsEqualVector(const std::vector &vec_1, const std::vector &vec_2) { if (vec_1.size() != vec_2.size()) { return false; @@ -70,7 +263,7 @@ std::vector CheckAndConvertUtils::CheckPositiveVector(const std::string if (allow_four) { buffer << "or four "; } - buffer << " positive int numbers , but got ["; + buffer << " positive int64_t numbers , but got ["; for (auto item : arg_value) { buffer << item << ","; } @@ -115,10 +308,10 @@ std::string CheckAndConvertUtils::CheckString(const std::string &arg_name, const MS_EXCEPTION(ValueError) << buffer.str(); } -int CheckAndConvertUtils::CheckInteger(const std::string &arg_name, int arg_value, CompareEnum compare_operator, - int match_value, const std::string &prim_name) { - auto iter = kCompareMap.find(compare_operator); - if (iter == kCompareMap.end()) { +int64_t CheckAndConvertUtils::CheckInteger(const std::string &arg_name, int64_t arg_value, CompareEnum compare_operator, + int64_t match_value, const std::string &prim_name) { + auto iter = kCompareMap.find(compare_operator); + if (iter == kCompareMap.end()) { MS_EXCEPTION(NotExistsError) << "compare_operator " << compare_operator << " cannot find in the compare map"; } if (iter->second(arg_value, match_value)) { @@ -139,35 +332,6 @@ int CheckAndConvertUtils::CheckInteger(const std::string &arg_name, int arg_valu MS_EXCEPTION(ValueError) << buffer.str(); } -void CheckAndConvertUtils::CheckInRange(const std::string &arg_name, int arg_value, CompareRange compare_operator, - const std::pair &range, const std::string &prim_name) { - auto iter = kCompareRangeMap.find(compare_operator); - if (iter == kCompareRangeMap.end()) { - MS_EXCEPTION(NotExistsError) << "compare_operator " << compare_operator << " cannot find in the compare map"; - } - if (range.first >= range.second) { - MS_EXCEPTION(ArgumentError) << "the check range left must be larger than right number bug got [ " << range.first - << "," << range.second; - } - if (iter->second(arg_value, range)) { - return; - } - std::ostringstream buffer; - if (prim_name.empty()) { - buffer << "The "; - } else { - buffer << "For " << prim_name << " the "; - } - buffer << arg_name << " must "; - auto iter_to_string = kCompareRangeToString.find(compare_operator); - if (iter_to_string == kCompareRangeToString.end()) { - MS_EXCEPTION(NotExistsError) << "compare_operator " << compare_operator << " cannot find in the compare string map"; - } - auto range_strng = iter_to_string->second; - buffer << range_strng.first << range.first << "," << range_strng.second << " , but got " << arg_value; - MS_EXCEPTION(ValueError) << buffer.str(); -} - std::vector CheckAndConvertUtils::ConvertShapePtrToShape(const std::string &arg_name, const BaseShapePtr &shape, const std::string &prim_name) { @@ -181,11 +345,11 @@ std::vector CheckAndConvertUtils::ConvertShapePtrToShape(const std::str return shape_element->shape(); } -void CheckAndConvertUtils::Check(const string &arg_name, int arg_value, CompareEnum compare_type, - const string &value_name, int value, const string &prim_name, +void CheckAndConvertUtils::Check(const string &arg_name, int64_t arg_value, CompareEnum compare_type, + const string &value_name, int64_t value, const string &prim_name, ExceptionType exception_type) { - auto iter = kCompareMap.find(compare_type); - if (iter == kCompareMap.end()) { + auto iter = kCompareMap.find(compare_type); + if (iter == kCompareMap.end()) { MS_EXCEPTION(NotExistsError) << "the compare type :" << compare_type << " is not in the compare map"; } if (iter->second(arg_value, value)) { @@ -204,41 +368,6 @@ void CheckAndConvertUtils::Check(const string &arg_name, int arg_value, CompareE MS_EXCEPTION(exception_type) << buffer.str() << arg_name << " should be " << iter_to_string->second << value << " but got " << arg_value; } -void CheckAndConvertUtils::Check(const string &arg_name, const std::vector &arg_value, - CompareEnum compare_type, const string &value_name, const std::vector &value, - const string &prim_name, ExceptionType exception_type) { - if (compare_type != kEqual) { - auto iter = kCompareToString.find(compare_type); - if (iter != kCompareToString.end()) { - MS_EXCEPTION(NotSupportError) << "Only supported equal to compare two vectors but got " << iter->second; - } - MS_EXCEPTION(UnknownError) << "Cannot find the operator " << compare_type << "in the compare map!"; - } - if (arg_value == value) { - return; - } - std::ostringstream buffer; - if (prim_name.empty()) { - buffer << "The "; - } else { - buffer << "For " << prim_name << " the "; - } - auto iter_to_string = kCompareToString.find(compare_type); - if (iter_to_string == kCompareToString.end()) { - MS_EXCEPTION(NotExistsError) << "compare_operator " << compare_type << " cannot find in the compare string map"; - } - buffer << arg_name << "should be " << iter_to_string->second << " ["; - for (auto item : value) { - buffer << item << ","; - } - buffer << "] " - << "but got ["; - for (auto item : arg_value) { - buffer << item << " ,"; - } - buffer << "]"; - MS_EXCEPTION(exception_type) << buffer.str(); -} TypeId CheckAndConvertUtils::CheckTensorTypeSame(const std::map &types, const std::set &check_list, const std::string &prim_name) { @@ -280,4 +409,173 @@ TypeId CheckAndConvertUtils::CheckTensorTypeSame(const std::map &check_list, const std::string &prim_name) { + MS_EXCEPTION_IF_NULL(type); + if (!type->isa()) { + MS_EXCEPTION(TypeError) << "The " << prim_name << "'s " << type_name << " input must be tensor type but got " + << type->ToString(); + } + auto tensor_type = type->cast(); + MS_EXCEPTION_IF_NULL(tensor_type); + auto element = tensor_type->element(); + MS_EXCEPTION_IF_NULL(element); + std::ostringstream buffer; + if (check_list.find(element->type_id()) == check_list.end()) { + buffer << "type of " << type_name << " should be in ["; + for (auto type_elem : check_list) { + buffer << TypeIdToType(type_elem)->ToString() << " ,"; + } + buffer << "], but got " << type->ToString(); + MS_EXCEPTION(TypeError) << buffer.str(); + } +} + +void CheckAndConvertUtils::CheckSubClass(const std::string &type_name, const TypePtr type_, + const std::set &template_types, const std::string &prim_name) { + MS_EXCEPTION_IF_NULL(type_); + bool hit = false; + for (auto template_type : template_types) { + if (type_->isa()) { + if (IsIdentidityOrSubclass(type_, template_type)) { + hit = true; + break; + } + } else if (type_->type_id() == template_type->type_id()) { + hit = true; + break; + } + } + if (!hit) { + std::string type_str = type_->ToString(); + std::ostringstream buffer; + buffer << "For '" << prim_name << "', the type of `" << type_name << "` should be subclass of "; + for (auto template_type : template_types) { + buffer << template_type->ToString() << ","; + } + buffer << " but got " << type_str << "."; + MS_EXCEPTION(TypeError) << buffer.str(); + } +} + +void CheckAndConvertUtils::CheckScalarOrTensorTypesSame(const std::map &args, + const std::set &valid_values, + const std::string &prim_name, const bool allow_mix) { + std::vector> check_results; + for (auto &iter : args) { + std::map arg = {{iter.first, iter.second}}; + check_results.push_back(_CheckArgumentType(arg, valid_values, prim_name)); + } + + std::map &arg_ = check_results[0]; + int64_t size = check_results.size(); + for (int64_t it = 1; it != size; it++) { + arg_ = _CheckTypeSame(arg_, check_results[it], prim_name, allow_mix); + } +} + +std::map CheckAndConvertUtils::_CheckArgumentType(const std::map &arg, + const std::set &valid_values, + const std::string &prim_name) { + std::string arg_key = arg.begin()->first; + TypePtr arg_val = arg.begin()->second; + + if (arg_val->isa()) { + auto arg_val_ = std::static_pointer_cast(arg_val); + arg_val = arg_val_->element(); + } + + auto it = valid_values.find(arg_val); + if (it == valid_values.end()) { + std::ostringstream buffer; + buffer << "For '" << prim_name << "' , the `" << arg_key << "` should be in { "; + for (auto valid_value : valid_values) { + buffer << valid_value->ToString() << " },"; + buffer << "but `" << arg_key << "`" + << "is" << arg_val->ToString() << "."; + } + MS_EXCEPTION(TypeError) << buffer.str(); + } + return arg; +} + +std::map CheckAndConvertUtils::_CheckTypeSame(const std::map &arg1, + const std::map &arg2, + const std::string &prim_name, + const bool allow_mix) { + std::string arg1_name = arg1.begin()->first; + TypePtr arg1_type = arg1.begin()->second; + std::string arg2_name = arg2.begin()->first; + TypePtr arg2_type = arg2.begin()->second; + bool except_flag = false; + + if (arg1_type->isa() && arg2_type->isa()) { + arg1_type = std::static_pointer_cast(arg1_type)->element(); + arg2_type = std::static_pointer_cast(arg2_type)->element(); + } else if (allow_mix) { + arg1_type = arg1_type->isa() ? std::static_pointer_cast(arg1_type)->element() : arg1_type; + arg2_type = arg2_type->isa() ? std::static_pointer_cast(arg2_type)->element() : arg2_type; + } else { + except_flag = true; + } + + if (except_flag || arg1_type != arg2_type) { + std::ostringstream buffer; + buffer << "For '" << prim_name << "'" + << "type of " + << "`" << arg2_name << "` should be same as " + << "`" << arg1_name << "`,"; + buffer << "but `" << arg1_name << "` is " << arg1_type->ToString() << "and `" << arg2_name << "` is " + << arg2_type->ToString() << "."; + MS_EXCEPTION(TypeError) << buffer.str(); + } + return arg1; +} + +TypeId CheckAndConvertUtils::CheckTypeSame(const std::string &arg_name, const TypePtr arg_type, + const std::set &valid_type, const std::string &prim_name) { + if (valid_type.empty()) { + MS_EXCEPTION(ArgumentError) << "Trying to use the function to check a empty valid_type!"; + } + // std::set types_id; + std::ostringstream buffer; + TypeId arg_type_; + arg_type_ = arg_type->isa() ? std::static_pointer_cast(arg_type)->generic_type_id() + : arg_type->type_id(); + + auto it = valid_type.find(arg_type_); + if (it == valid_type.end()) { + buffer << "For" << prim_name << ", the '" << arg_name << "' should be {' one of '" << valid_type.size() << "'}"; + for (auto type : valid_type) { + buffer << "{" << TypeIdLabel(type); + } + buffer << "},"; + buffer << "but got " << arg_type->ToString() << "."; + MS_EXCEPTION(TypeError) << buffer.str(); + } + return arg_type_; +} + +bool CheckAndConvertUtils::CheckIrAttrtoOpAttr(const std::string &op_type, const std::string &attr_name, + ValuePtr *const value) { + if (*value == nullptr) { + MS_LOG(ERROR) << "value is nullptr"; + return false; + } + if (op_type.empty() || attr_name.empty()) { + return false; + } + auto op_map = kIrAttrToOpAttr.find(op_type); + if (op_map == kIrAttrToOpAttr.end()) { + return false; + } + auto attr_func = op_map->second.find(attr_name); + if (attr_func == op_map->second.end()) { + return false; + } + *value = attr_func->second(*value); + MS_LOG(DEBUG) << "convert ir attr to op attr, name: " << op_type << ", attr: " << attr_name; + return true; +} } // namespace mindspore diff --git a/mindspore/core/utils/check_convert_utils.h b/mindspore/core/utils/check_convert_utils.h index ddfc1c4d65b..758d55f8cca 100644 --- a/mindspore/core/utils/check_convert_utils.h +++ b/mindspore/core/utils/check_convert_utils.h @@ -21,12 +21,15 @@ #include #include #include +#include #include "base/base.h" #include "ir/anf.h" #include "ir/dtype/type_id.h" #include "utils/log_adapter.h" namespace mindspore { -enum CompareEnum : int { +typedef std::pair, std::map> AttrConverterPair; + +enum CompareEnum : int64_t { kEqual = 1, // == kNotEqual = 2, // != kLessThan = 3, // < @@ -41,7 +44,23 @@ enum CompareRange { kIncludeRight = 3, // (a,b] kIncludeBoth = 4, // [a,b] }; - +enum Format : int64_t { + NCHW = 0, + NHWC = 1, + NHWC4 = 2, + HWKC = 3, + HWCK = 4, + KCHW = 5, + CKHW = 6, + KHWC = 7, + CHWK = 8, + HW = 9, + HW4 = 10, + NC = 11, + NC4 = 12, + NC4HW4 = 13, + NUM_OF_FORMAT = 14 +}; enum ActivationType : int64_t { NO_ACTIVATION = 0, RELU = 1, @@ -62,27 +81,81 @@ enum ActivationType : int64_t { HARD_TANH = 16, SIGN = 17, SWISH = 18, - UNKNOW = 19, + GELU = 19, + UNKNOWN = 20 }; -enum Format : int64_t { - NCHW = 0, - NHWC = 1, - NHWC4 = 2, - HWKC = 3, - HWCK = 4, - KCHW = 5, - CKHW = 6, - KHWC = 7, - CHWK = 8, - HW = 9, - HW4 = 10, - NC = 11, - NC4 = 12, - NC4HW4 = 13, - NUM_OF_FORMAT = 14 +enum ReduceMode : int64_t { + Reduce_Mean = 0, + Reduce_Max = 1, + Reduce_Min = 2, + Reduce_Prod = 3, + Reduce_Sum = 4, + Reduce_Sum_Square = 5, + Reduce_ASum = 6, + Reduce_All = 7 +}; +enum ReduceType : int64_t { + REDUCE_MAX = 0, + REDUCE_MEAN = 1, + REDUCE_ALL = 2, + REDUCE_ANY = 3, + REDUCE_LOG_SUM_EXP = 4, + REDUCE_PROD = 5, + REDUCE_SUM = 6, + REDUCE_UNKNOW = 7, }; enum EltwiseMode : int64_t { PROD = 0, SUM = 1, MAXIMUM = 2, ELTWISEMODE_UNKNOW = 3 }; +enum Reduction : int64_t { REDUCTION_SUM = 0, MEAN = 1, NONE = 2 }; + +enum PadMode : int64_t { PAD = 0, SAME = 1, VALID = 2 }; + +enum RoundMode : int64_t { + FLOOR = 0, + CEIL = 1, +}; + +enum PoolMode : int64_t { + MAX_POOLING = 0, + MEAN_POOLING = 1, +}; + +enum class LshProjectionType : int64_t { UNKNOWN = 0, SPARSE = 1, DENSE = 2 }; + +enum PaddingMode : int64_t { CONSTANT = 0, REFLECT = 1, SYMMETRIC = 2, MODE_RESERVED = 3 }; + +enum class ResizeMethod : int64_t { UNKNOWN = -1, LINEAR = 0, NEAREST = 1, CUBIC = 2 }; + +enum CoordinateTransformMode : int64_t { ASYMMETRIC = 0, ALIGN_CORNERS = 1, HALF_PIXEL = 2, CROP_AND_RESIZE = 3 }; + +enum class NearestMode : int64_t { NORMAL = 0, ROUND_HALF_DOWN = 1, ROUND_HALF_UP = 2, FLOOR = 3, CEIL = 4 }; + +template +const std::map> kCompareMap = { + {kEqual, [](T num1, T num2) -> bool { return num1 == num2; }}, + {kNotEqual, [](T num1, T num2) -> bool { return num1 != num2; }}, + {kLessThan, [](T num1, T num2) -> bool { return num1 < num2; }}, + {kLessEqual, [](T num1, T num2) -> bool { return num1 <= num2; }}, + {kGreaterThan, [](T num1, T num2) -> bool { return num1 > num2; }}, + {kGreaterEqual, [](T num1, T num2) -> bool { return num1 >= num2; }}}; + +template +const std::map)>> kCompareRangeMap = { + {kIncludeNeither, [](T num1, std::pair range) -> bool { return num1 > range.first && num1 < range.second; }}, + {kIncludeLeft, [](T num1, std::pair range) -> bool { return num1 >= range.first && num1 < range.second; }}, + {kIncludeBoth, [](T num1, std::pair range) -> bool { return num1 >= range.first && num1 <= range.second; }}, + {kIncludeRight, [](T num1, std::pair range) -> bool { return num1 > range.first && num1 <= range.second; }}}; + +const std::map kCompareToString = { + {kEqual, "equal "}, {kNotEqual, "not equal "}, {kLessThan, "less than "}, + {kLessEqual, "less equal "}, {kGreaterThan, "greater than "}, {kGreaterEqual, "greater equal "}}; + +const std::map> kCompareRangeToString = { + {kIncludeNeither, {"in (", ")"}}, + {kIncludeLeft, {" in [", ")"}}, + {kIncludeRight, {"in (", "]"}}, + {kIncludeBoth, {"in [", "]"}}}; + class CheckAndConvertUtils { public: static std::vector CheckPositiveVector(const std::string &arg_name, const std::vector &arg_value, @@ -90,22 +163,137 @@ class CheckAndConvertUtils { bool ret_four = false); static std::string CheckString(const std::string &arg_name, const std::string &arg_value, const std::set &check_list, const std::string &prim_name); - static int CheckInteger(const std::string &arg_name, int arg_value, CompareEnum compare_operator, int match_value, - const std::string &prim_name); - static void CheckInRange(const std::string &arg_name, int arg_value, CompareRange compare_operator, - const std::pair &range, const std::string &prim_name); + + // CheckValue should replace CheckInteger + static int64_t CheckInteger(const std::string &arg_name, int64_t arg_value, CompareEnum compare_operator, + int64_t match_value, const std::string &prim_name); + + template + static T CheckValue(const std::string &arg_name, T arg_value, CompareEnum compare_operator, T match_value, + const std::string &prim_name) { + auto iter = kCompareMap.find(compare_operator); + if (iter == kCompareMap.end()) { + MS_EXCEPTION(NotExistsError) << "compare_operator " << compare_operator << " cannot find in the compare map"; + } + if (iter->second(arg_value, match_value)) { + return arg_value; + } + std::ostringstream buffer; + if (prim_name.empty()) { + buffer << "The "; + } else { + buffer << "For " << prim_name << " the "; + } + buffer << arg_name << " must "; + auto iter_to_string = kCompareToString.find(compare_operator); + if (iter_to_string == kCompareToString.end()) { + MS_EXCEPTION(NotExistsError) << "compare_operator " << compare_operator + << " cannot find in the compare string map"; + } + buffer << iter_to_string->second << match_value << " , but got " << arg_value; + MS_EXCEPTION(ValueError) << buffer.str(); + } + + template + static void CheckInRange(const std::string &arg_name, T arg_value, CompareRange compare_operator, + const std::pair &range, const std::string &prim_name) { + auto iter = kCompareRangeMap.find(compare_operator); + if (iter == kCompareRangeMap.end()) { + MS_EXCEPTION(NotExistsError) << "compare_operator " << compare_operator << " cannot find in the compare map"; + } + if (range.first >= range.second) { + MS_EXCEPTION(ArgumentError) << "the check range left must be larger than right number bug got [ " << range.first + << "," << range.second; + } + if (iter->second(arg_value, range)) { + return; + } + std::ostringstream buffer; + if (prim_name.empty()) { + buffer << "The "; + } else { + buffer << "For " << prim_name << " the "; + } + buffer << arg_name << " must "; + auto iter_to_string = kCompareRangeToString.find(compare_operator); + if (iter_to_string == kCompareRangeToString.end()) { + MS_EXCEPTION(NotExistsError) << "compare_operator " << compare_operator + << " cannot find in the compare string map"; + } + auto range_strng = iter_to_string->second; + buffer << range_strng.first << range.first << "," << range_strng.second << " , but got " << arg_value; + MS_EXCEPTION(ValueError) << buffer.str(); + } + static std::vector ConvertShapePtrToShape(const std::string &arg_name, const BaseShapePtr &shape, const std::string &prim_name); - static void Check(const std::string &arg_name, int arg_value, CompareEnum compare_type, const std::string &value_name, - int value, const std::string &prim_name = "", ExceptionType exception_type = ValueError); - static void Check(const std::string &arg_name, const std::vector &arg_value, CompareEnum compare_type, - const std::string &value_name, const std::vector &value, const std::string &prim_name = "", + static void Check(const std::string &arg_name, int64_t arg_value, CompareEnum compare_type, + const std::string &value_name, int64_t value, const std::string &prim_name = "", ExceptionType exception_type = ValueError); + + template + static void Check(const std::string &arg_name, const std::vector &arg_value, CompareEnum compare_type, + const std::string &value_name, const std::vector &value, const std::string &prim_name = "", + ExceptionType exception_type = ValueError) { + if (compare_type != kEqual) { + auto iter = kCompareToString.find(compare_type); + if (iter != kCompareToString.end()) { + MS_EXCEPTION(NotSupportError) << "Only supported equal to compare two vectors but got " << iter->second; + } + MS_EXCEPTION(UnknownError) << "Cannot find the operator " << compare_type << "in the compare map!"; + } + if (arg_value == value) { + return; + } + std::ostringstream buffer; + if (prim_name.empty()) { + buffer << "The "; + } else { + buffer << "For " << prim_name << " the "; + } + auto iter_to_string = kCompareToString.find(compare_type); + if (iter_to_string == kCompareToString.end()) { + MS_EXCEPTION(NotExistsError) << "compare_operator " << compare_type << " cannot find in the compare string map"; + } + buffer << arg_name << "should be " << iter_to_string->second << " ["; + for (auto item : value) { + buffer << item << ","; + } + buffer << "] " + << "but got ["; + for (auto item : arg_value) { + buffer << item << " ,"; + } + buffer << "]"; + MS_EXCEPTION(exception_type) << buffer.str(); + } + static TypeId CheckTensorTypeSame(const std::map &types, const std::set &check_list, const std::string &prim_name); + static void CheckTensorTypeValid(const std::string &type_name, const TypePtr type, const std::set &check_list, + const std::string &prim_name); + static void CheckSubClass(const std::string &type_name, const TypePtr type, const std::set &template_types, + const std::string &prim_name); + static void CheckScalarOrTensorTypesSame(const std::map &args, + const std::set &valid_values, const std::string &prim_name, + bool allow_mix = false); + static TypeId CheckTypeSame(const std::string &arg_name, const TypePtr arg_type, const std::set &valid_type, + const std::string &prim_name); + static bool ConvertAttrValueToInt(const std::string &op_type, const std::string &attr_name, ValuePtr *const value); + static bool ConvertAttrValueToString(const std::string &op_type, const std::string &attr_name, ValuePtr *const value); + static AttrConverterPair GetAttrConvertPair(const std::string &op_type, const std::string &attr_name); + static int64_t GetDataFormatEnumValue(const std::string &value); + static int64_t GetPadModEnumValue(const std::string &value, bool is_upper = false); + static bool CheckIrAttrtoOpAttr(const std::string &op_type, const std::string &attr_name, ValuePtr *const value); private: static bool IsEqualVector(const std::vector &vec_1, const std::vector &vec_2); + static std::map _CheckArgumentType(const std::map &arg, + const std::set &valid_values, + const std::string &prim_name); + static std::map _CheckTypeSame(const std::map &arg1, + const std::map &arg2, + const std::string &prim_name, const bool allow_mix); }; } // namespace mindspore #endif // MINDSPORE_CORE_UTILS_CHECK_CONVERT_UTILS_H_ diff --git a/mindspore/lite/src/ops/ops_def.cc b/mindspore/lite/src/ops/ops_def.cc index ef9cc32d8b3..720662a05aa 100644 --- a/mindspore/lite/src/ops/ops_def.cc +++ b/mindspore/lite/src/ops/ops_def.cc @@ -13,11 +13,5 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -#include "src/ops/schema_def.h" -#ifdef PRIMITIVE_WRITEABLE -#include "c_ops/conv2d.h" -#endif -OP_SCHEMA_DEF(Conv2D) -OP_ATTR(group, int) -OP_SCHEMA_DEF_END(Conv2D) +#include "src/ops/schema_def.h" diff --git a/mindspore/lite/src/ops/schema_def.h b/mindspore/lite/src/ops/schema_def.h index 322692290b5..2231471b4a3 100644 --- a/mindspore/lite/src/ops/schema_def.h +++ b/mindspore/lite/src/ops/schema_def.h @@ -18,7 +18,7 @@ #include #include "src/ops/schema_register.h" #ifdef PRIMITIVE_WRITEABLE -#include "c_ops/conv2d.h" +#include "ops/conv2d.h" #include "schema/inner/model_generated.h" #endif diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_node_parser.h b/mindspore/lite/tools/converter/parser/caffe/caffe_node_parser.h index c713436dee7..3390f3118ae 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_node_parser.h +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_node_parser.h @@ -20,7 +20,7 @@ #include #include #include "src/ops/primitive_c.h" -#include "c_ops/primitive_c.h" +#include "ops/primitive_c.h" #include "google/protobuf/message.h" #include "schema/inner/model_generated.h" #include "proto/caffe.pb.h" diff --git a/tests/ut/cpp/CMakeLists.txt b/tests/ut/cpp/CMakeLists.txt index 3e233fb2021..f10a4a3f751 100644 --- a/tests/ut/cpp/CMakeLists.txt +++ b/tests/ut/cpp/CMakeLists.txt @@ -39,7 +39,7 @@ if(ENABLE_MINDDATA) ./abstract/*.cc ./base/*.cc ./dataset/*.cc - ./c_ops/*.cc + ./ops/*.cc ./ir/dtype/*.cc ${CMAKE_SOURCE_DIR}/mindspore/ccsrc/minddata/dataset/kernels/image/lite_cv/*.cc ./debug/*.cc @@ -112,7 +112,6 @@ file(GLOB_RECURSE MINDSPORE_SRC_LIST RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} "../../../mindspore/ccsrc/backend/kernel_compiler/cpu/adam_delta_cpu_kernel.cc" "../../../mindspore/ccsrc/backend/kernel_compiler/akg/*.cc" "../../../mindspore/ccsrc/backend/kernel_compiler/rts/*.cc" - "../../../mindspore/core/c_ops/*.cc" "../../../mindspore/ccsrc/backend/kernel_compiler/hccl/*.cc" "../../../mindspore/ccsrc/backend/kernel_compiler/kernel_query.cc" "../../../mindspore/ccsrc/backend/kernel_compiler/aicpu/aicpu_kernel_metadata.cc" @@ -126,7 +125,7 @@ file(GLOB_RECURSE MINDSPORE_SRC_LIST RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} "../../../mindspore/ccsrc/backend/session/kernel_graph.cc" "../../../mindspore/ccsrc/backend/session/session_basic.cc" "../../../mindspore/ccsrc/backend/session/executor.cc" - "../../../mindspore/core/c_ops/*.cc" + "../../../mindspore/core/ops/*.cc" "../../../mindspore/ccsrc/backend/session/executor_manager.cc" "../../../mindspore/ccsrc/backend/session/session_factory.cc" "../../../mindspore/ccsrc/backend/session/kernel_build_client.cc" @@ -184,4 +183,5 @@ if(USE_GLOG) target_link_libraries(ut_tests PRIVATE mindspore::glog) endif() +target_link_libraries(mindspore mindspore_core) target_link_libraries(ut_tests PRIVATE mindspore mindspore_shared_lib securec graph) diff --git a/tests/ut/cpp/ops/test_ops_add.cc b/tests/ut/cpp/ops/test_ops_add.cc new file mode 100644 index 00000000000..ce22771179e --- /dev/null +++ b/tests/ut/cpp/ops/test_ops_add.cc @@ -0,0 +1,61 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include "common/common_test.h" +#include "ops/add.h" +#include "ir/dtype/type.h" +#include "abstract/dshape.h" +#include "utils/tensor_construct_utils.h" +namespace mindspore { +namespace ops { +class TestAdd : public UT::Common { + public: + TestAdd() {} + void SetUp() {} + void TearDown() {} +}; + +TEST_F(TestAdd, test_ops_add) { + auto add = std::make_shared(); + add->Init(); + auto tensor_x = TensorConstructUtils::CreateOnesTensor(kNumberTypeFloat32, std::vector{1, 3}); + auto tensor_y = TensorConstructUtils::CreateOnesTensor(kNumberTypeFloat32, std::vector{1, 3}); + MS_EXCEPTION_IF_NULL(tensor_x); + MS_EXCEPTION_IF_NULL(tensor_y); + auto add_abstract = add->Infer({tensor_x->ToAbstract(), tensor_y->ToAbstract()}); + MS_EXCEPTION_IF_NULL(add_abstract); + EXPECT_EQ(add_abstract->isa(), true); + auto shape_ptr = add_abstract->BuildShape(); + MS_EXCEPTION_IF_NULL(shape_ptr); + EXPECT_EQ(shape_ptr->isa(), true); + auto add_shape = shape_ptr->cast(); + MS_EXCEPTION_IF_NULL(add_shape); + auto shape_vec = add_shape->shape(); + auto type = add_abstract->BuildType(); + MS_EXCEPTION_IF_NULL(type); + EXPECT_EQ(type->isa(), true); + auto tensor_type = type->cast(); + MS_EXCEPTION_IF_NULL(tensor_type); + auto elem_type = tensor_type->element(); + EXPECT_EQ(elem_type->type_id(), kNumberTypeFloat32); + EXPECT_EQ(shape_vec.size(), 2); + EXPECT_EQ(shape_vec[0], 1); + EXPECT_EQ(shape_vec[1], 3); +} + +} // namespace ops +} // namespace mindspore diff --git a/tests/ut/cpp/ops/test_ops_addn.cc b/tests/ut/cpp/ops/test_ops_addn.cc new file mode 100644 index 00000000000..05d393c7e1d --- /dev/null +++ b/tests/ut/cpp/ops/test_ops_addn.cc @@ -0,0 +1,97 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include "common/common_test.h" +#include "ops/addn.h" +#include "ir/dtype/type.h" +#include "ir/value.h" +#include "abstract/dshape.h" +#include "utils/tensor_construct_utils.h" + +namespace mindspore { +namespace ops { +class TestAddN : public UT::Common { + public: + TestAddN() {} + void SetUp() {} + void TearDown() {} +}; + +TEST_F(TestAddN, test_ops_addn1) { + auto addn = std::make_shared(); + auto tensor_x1 = TensorConstructUtils::CreateOnesTensor(kNumberTypeFloat32, std::vector{3, 2, 7, 7}); + auto tensor_x2 = TensorConstructUtils::CreateOnesTensor(kNumberTypeFloat32, std::vector{3, 2, 7, 7}); + MS_EXCEPTION_IF_NULL(tensor_x1); + MS_EXCEPTION_IF_NULL(tensor_x2); + auto input_tuple = std::make_shared(std::vector{tensor_x1, tensor_x2}); + auto abstract = addn->Infer({input_tuple->ToAbstract()}); + MS_EXCEPTION_IF_NULL(abstract); + EXPECT_EQ(abstract->isa(), true); + auto shape_ptr = abstract->BuildShape(); + MS_EXCEPTION_IF_NULL(shape_ptr); + EXPECT_EQ(shape_ptr->isa(), true); + auto shape = shape_ptr->cast(); + MS_EXCEPTION_IF_NULL(shape); + auto shape_vec = shape->shape(); + auto type = abstract->BuildType(); + MS_EXCEPTION_IF_NULL(type); + EXPECT_EQ(type->isa(), true); + auto tensor_type = type->cast(); + MS_EXCEPTION_IF_NULL(tensor_type); + auto data_type = tensor_type->element(); + MS_EXCEPTION_IF_NULL(data_type); + EXPECT_EQ(data_type->type_id(), kNumberTypeFloat32); + EXPECT_EQ(shape_vec.size(), 4); + EXPECT_EQ(shape_vec[0], 3); + EXPECT_EQ(shape_vec[1], 2); + EXPECT_EQ(shape_vec[2], 7); + EXPECT_EQ(shape_vec[3], 7); +} + +TEST_F(TestAddN, test_ops_addn2) { + auto addn = std::make_shared(); + auto tensor_x1 = TensorConstructUtils::CreateOnesTensor(kNumberTypeBool, std::vector{3, 4, 5}); + auto tensor_x2 = TensorConstructUtils::CreateOnesTensor(kNumberTypeBool, std::vector{3, 4, 5}); + auto tensor_x3 = TensorConstructUtils::CreateOnesTensor(kNumberTypeBool, std::vector{3, 4, 5}); + MS_EXCEPTION_IF_NULL(tensor_x1); + MS_EXCEPTION_IF_NULL(tensor_x2); + MS_EXCEPTION_IF_NULL(tensor_x3); + auto input_tuple = std::make_shared(std::vector{tensor_x1, tensor_x2, tensor_x3}); + auto abstract = addn->Infer({input_tuple->ToAbstract()}); + MS_EXCEPTION_IF_NULL(abstract); + EXPECT_EQ(abstract->isa(), true); + auto shape_ptr = abstract->BuildShape(); + MS_EXCEPTION_IF_NULL(shape_ptr); + EXPECT_EQ(shape_ptr->isa(), true); + auto shape = shape_ptr->cast(); + MS_EXCEPTION_IF_NULL(shape); + auto shape_vec = shape->shape(); + auto type = abstract->BuildType(); + MS_EXCEPTION_IF_NULL(type); + EXPECT_EQ(type->isa(), true); + auto tensor_type = type->cast(); + MS_EXCEPTION_IF_NULL(tensor_type); + auto data_type = tensor_type->element(); + MS_EXCEPTION_IF_NULL(data_type); + EXPECT_EQ(data_type->type_id(), kNumberTypeBool); + EXPECT_EQ(shape_vec.size(), 3); + EXPECT_EQ(shape_vec[0], 3); + EXPECT_EQ(shape_vec[1], 4); + EXPECT_EQ(shape_vec[2], 5); +} +} // namespace ops +} // namespace mindspore diff --git a/tests/ut/cpp/ops/test_ops_argmax.cc b/tests/ut/cpp/ops/test_ops_argmax.cc new file mode 100644 index 00000000000..4bcee1856d3 --- /dev/null +++ b/tests/ut/cpp/ops/test_ops_argmax.cc @@ -0,0 +1,89 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include "common/common_test.h" +#include "ops/arg_max.h" +#include "ir/dtype/type.h" +#include "ir/value.h" +#include "abstract/dshape.h" +#include "utils/tensor_construct_utils.h" + +namespace mindspore { +namespace ops { +class TestArgMax : public UT::Common { + public: + TestArgMax() {} + void SetUp() {} + void TearDown() {} +}; + +TEST_F(TestArgMax, test_ops_argmax1) { + auto argmax = std::make_shared(); + argmax->Init(2, kNumberTypeInt32); + auto tensor_x = TensorConstructUtils::CreateOnesTensor(kNumberTypeFloat32, std::vector{2, 3, 4, 5}); + MS_EXCEPTION_IF_NULL(tensor_x); + auto abstract = argmax->Infer({tensor_x->ToAbstract()}); + MS_EXCEPTION_IF_NULL(abstract); + EXPECT_EQ(abstract->isa(), true); + auto shape_ptr = abstract->BuildShape(); + MS_EXCEPTION_IF_NULL(shape_ptr); + EXPECT_EQ(shape_ptr->isa(), true); + auto shape = shape_ptr->cast(); + MS_EXCEPTION_IF_NULL(shape); + auto shape_vec = shape->shape(); + auto type = abstract->BuildType(); + MS_EXCEPTION_IF_NULL(type); + EXPECT_EQ(type->isa(), true); + auto tensor_type = type->cast(); + MS_EXCEPTION_IF_NULL(tensor_type); + auto data_type = tensor_type->element(); + MS_EXCEPTION_IF_NULL(data_type); + EXPECT_EQ(data_type->type_id(), kNumberTypeInt32); + EXPECT_EQ(shape_vec.size(), 3); + EXPECT_EQ(shape_vec[0], 2); + EXPECT_EQ(shape_vec[1], 3); + EXPECT_EQ(shape_vec[2], 5); +} + +TEST_F(TestArgMax, test_ops_argmax2) { + auto argmax = std::make_shared(); + argmax->Init(1, kNumberTypeInt32); + auto tensor_x = TensorConstructUtils::CreateOnesTensor(kNumberTypeFloat16, std::vector{2, 3, 4}); + MS_EXCEPTION_IF_NULL(tensor_x); + auto abstract = argmax->Infer({tensor_x->ToAbstract()}); + MS_EXCEPTION_IF_NULL(abstract); + EXPECT_EQ(abstract->isa(), true); + auto shape_ptr = abstract->BuildShape(); + MS_EXCEPTION_IF_NULL(shape_ptr); + EXPECT_EQ(shape_ptr->isa(), true); + auto shape = shape_ptr->cast(); + MS_EXCEPTION_IF_NULL(shape); + auto shape_vec = shape->shape(); + auto type = abstract->BuildType(); + MS_EXCEPTION_IF_NULL(type); + EXPECT_EQ(type->isa(), true); + auto tensor_type = type->cast(); + MS_EXCEPTION_IF_NULL(tensor_type); + auto data_type = tensor_type->element(); + MS_EXCEPTION_IF_NULL(data_type); + EXPECT_EQ(data_type->type_id(), kNumberTypeInt32); + EXPECT_EQ(shape_vec.size(), 2); + EXPECT_EQ(shape_vec[0], 2); + EXPECT_EQ(shape_vec[1], 4); +} +} // namespace ops +} // namespace mindspore diff --git a/tests/ut/cpp/ops/test_ops_assert.cc b/tests/ut/cpp/ops/test_ops_assert.cc new file mode 100644 index 00000000000..e423b014454 --- /dev/null +++ b/tests/ut/cpp/ops/test_ops_assert.cc @@ -0,0 +1,108 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include "common/common_test.h" +#include "ops/assert.h" +#include "ir/dtype/type.h" +#include "ir/value.h" +#include "abstract/dshape.h" +#include "utils/tensor_construct_utils.h" + +namespace mindspore { +namespace ops { + +namespace { +template +void SetTensorData(void *data, T num, size_t data_length) { + MS_EXCEPTION_IF_NULL(data); + auto tensor_data = reinterpret_cast(data); + MS_EXCEPTION_IF_NULL(tensor_data); + for (size_t index = 0; index < data_length; ++index) { + *tensor_data = num; + ++tensor_data; + } +} +} // namespace + +class TestAssert : public UT::Common { + public: + TestAssert() {} + void SetUp() {} + void TearDown() {} +}; + +TEST_F(TestAssert, test_ops_assert1) { + auto assert = std::make_shared(); + assert->Init(3); + EXPECT_EQ(assert->get_summarize(), 3); + std::vector inputs_ = {TensorConstructUtils::CreateOnesTensor(kNumberTypeFloat32, std::vector{1})}; + auto condition = MakeValue(std::vector{true}); + auto inputs = std::make_shared(inputs_); + auto abstract = assert->Infer({condition->ToAbstract(), inputs->ToAbstract()}); + MS_EXCEPTION_IF_NULL(abstract); + EXPECT_EQ(abstract->isa(), true); + auto shape_ptr = abstract->BuildShape(); + MS_EXCEPTION_IF_NULL(shape_ptr); + EXPECT_EQ(shape_ptr->isa(), true); + auto shape = shape_ptr->cast(); + MS_EXCEPTION_IF_NULL(shape); + auto shape_vec = shape->shape(); + EXPECT_EQ(shape_vec.size(), 1); + EXPECT_EQ(shape_vec[0], 1); + auto type = abstract->BuildType(); + MS_EXCEPTION_IF_NULL(type); + EXPECT_EQ(type->isa(), true); + auto tensor_type = type->cast(); + MS_EXCEPTION_IF_NULL(tensor_type); + auto data_type = tensor_type->element(); + MS_EXCEPTION_IF_NULL(data_type); + EXPECT_EQ(data_type->type_id(), kNumberTypeInt32); +} + +TEST_F(TestAssert, test_ops_assert2) { + auto assert = std::make_shared(); + assert->Init(3); + EXPECT_EQ(assert->get_summarize(), 3); + std::vector inputs_ = {TensorConstructUtils::CreateOnesTensor(kNumberTypeFloat32, std::vector{1})}; + auto tensor = std::make_shared(kNumberTypeBool, std::vector{1}); + MS_EXCEPTION_IF_NULL(tensor); + auto mem_size = IntToSize(tensor->ElementsNum()); + SetTensorData(tensor->data_c(), true, mem_size); + auto inputs = std::make_shared(inputs_); + auto abstract = assert->Infer({tensor->ToAbstract(), inputs->ToAbstract()}); + MS_EXCEPTION_IF_NULL(abstract); + EXPECT_EQ(abstract->isa(), true); + auto shape_ptr = abstract->BuildShape(); + MS_EXCEPTION_IF_NULL(shape_ptr); + EXPECT_EQ(shape_ptr->isa(), true); + auto shape = shape_ptr->cast(); + MS_EXCEPTION_IF_NULL(shape); + auto shape_vec = shape->shape(); + EXPECT_EQ(shape_vec.size(), 1); + EXPECT_EQ(shape_vec[0], 1); + auto type = abstract->BuildType(); + MS_EXCEPTION_IF_NULL(type); + EXPECT_EQ(type->isa(), true); + auto tensor_type = type->cast(); + MS_EXCEPTION_IF_NULL(tensor_type); + auto data_type = tensor_type->element(); + MS_EXCEPTION_IF_NULL(data_type); + EXPECT_EQ(data_type->type_id(), kNumberTypeInt32); +} + +} // namespace ops +} // namespace mindspore diff --git a/tests/ut/cpp/ops/test_ops_audio_spectrogram.cc b/tests/ut/cpp/ops/test_ops_audio_spectrogram.cc new file mode 100644 index 00000000000..c883db9bd66 --- /dev/null +++ b/tests/ut/cpp/ops/test_ops_audio_spectrogram.cc @@ -0,0 +1,69 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include "common/common_test.h" +#include "ops/audio_spectrogram.h" +#include "ir/dtype/type.h" +#include "ir/value.h" +#include "abstract/dshape.h" +#include "utils/tensor_construct_utils.h" + +namespace mindspore { +namespace ops { + +class TestAudioSpectrogram : public UT::Common { + public: + TestAudioSpectrogram() {} + void SetUp() {} + void TearDown() {} +}; + +TEST_F(TestAudioSpectrogram, test_ops_audio_spectrogram1) { + auto audio_spectrogram = std::make_shared(); + audio_spectrogram->Init(3, 2, false); + EXPECT_EQ(audio_spectrogram->get_window_size(), 3); + EXPECT_EQ(audio_spectrogram->get_stride(), 2); + EXPECT_EQ(audio_spectrogram->get_mag_square(), false); + auto input0 = TensorConstructUtils::CreateOnesTensor(kNumberTypeInt32, std::vector{4, 3}); + auto input1 = TensorConstructUtils::CreateOnesTensor(kNumberTypeInt32, std::vector{1}); + MS_EXCEPTION_IF_NULL(input0); + MS_EXCEPTION_IF_NULL(input1); + auto abstract = audio_spectrogram->Infer({input0->ToAbstract(), input1->ToAbstract()}); + MS_EXCEPTION_IF_NULL(abstract); + EXPECT_EQ(abstract->isa(), true); + auto shape_ptr = abstract->BuildShape(); + MS_EXCEPTION_IF_NULL(shape_ptr); + EXPECT_EQ(shape_ptr->isa(), true); + auto shape = shape_ptr->cast(); + MS_EXCEPTION_IF_NULL(shape); + auto shape_vec = shape->shape(); + EXPECT_EQ(shape_vec.size(), 3); + EXPECT_EQ(shape_vec[0], 3); + EXPECT_EQ(shape_vec[1], 1); + EXPECT_EQ(shape_vec[2], 2); + auto type = abstract->BuildType(); + MS_EXCEPTION_IF_NULL(type); + EXPECT_EQ(type->isa(), true); + auto tensor_type = type->cast(); + MS_EXCEPTION_IF_NULL(tensor_type); + auto data_type = tensor_type->element(); + MS_EXCEPTION_IF_NULL(data_type); + EXPECT_EQ(data_type->type_id(), kNumberTypeInt32); +} + +} // namespace ops +} // namespace mindspore diff --git a/tests/ut/cpp/ops/test_ops_avg_pool_grad.cc b/tests/ut/cpp/ops/test_ops_avg_pool_grad.cc new file mode 100644 index 00000000000..b39f67501c5 --- /dev/null +++ b/tests/ut/cpp/ops/test_ops_avg_pool_grad.cc @@ -0,0 +1,64 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include "common/common_test.h" +#include "ops/grad/avg_pool_grad.h" +#include "ir/dtype/type.h" +#include "ir/value.h" +#include "abstract/dshape.h" +#include "utils/tensor_construct_utils.h" + +namespace mindspore { +namespace ops { +class TestAvgPoolGrad : public UT::Common { + public: + TestAvgPoolGrad() {} + void SetUp() {} + void TearDown() {} +}; + +TEST_F(TestAvgPoolGrad, test_ops_avg_pool_grad1) { + auto avg_pool_grad = std::make_shared(); + avg_pool_grad->Init(); + EXPECT_EQ(avg_pool_grad->get_pad_mode(), VALID); + auto origin_input = MakeValue(std::vector{1, 2}); + auto dout = TensorConstructUtils::CreateOnesTensor(kNumberTypeFloat32, std::vector{1}); + MS_EXCEPTION_IF_NULL(origin_input); + MS_EXCEPTION_IF_NULL(dout); + auto abstract = avg_pool_grad->Infer({origin_input->ToAbstract(), dout->ToAbstract()}); + MS_EXCEPTION_IF_NULL(abstract); + EXPECT_EQ(abstract->isa(), true); + auto shape_ptr = abstract->BuildShape(); + MS_EXCEPTION_IF_NULL(shape_ptr); + EXPECT_EQ(shape_ptr->isa(), true); + auto shape = shape_ptr->cast(); + MS_EXCEPTION_IF_NULL(shape); + auto shape_vec = shape->shape(); + EXPECT_EQ(shape_vec.size(), 2); + EXPECT_EQ(shape_vec[0], 1); + EXPECT_EQ(shape_vec[1], 2); + auto type = abstract->BuildType(); + MS_EXCEPTION_IF_NULL(type); + EXPECT_EQ(type->isa(), true); + auto tensor_type = type->cast(); + MS_EXCEPTION_IF_NULL(tensor_type); + auto data_type = tensor_type->element(); + MS_EXCEPTION_IF_NULL(data_type); + EXPECT_EQ(data_type->type_id(), kNumberTypeFloat32); +} +} // namespace ops +} // namespace mindspore diff --git a/tests/ut/cpp/ops/test_ops_batch_norm.cc b/tests/ut/cpp/ops/test_ops_batch_norm.cc new file mode 100644 index 00000000000..fd63d3addd1 --- /dev/null +++ b/tests/ut/cpp/ops/test_ops_batch_norm.cc @@ -0,0 +1,95 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include "common/common_test.h" +#include "ops/batch_norm.h" +#include "ir/dtype/type.h" +#include "ir/value.h" +#include "abstract/dshape.h" +#include "utils/tensor_construct_utils.h" +namespace mindspore { +namespace ops { +class TestBatchNorm : public UT::Common { + public: + TestBatchNorm() {} + void SetUp() {} + void TearDown() {} +}; + +TEST_F(TestBatchNorm, test_batch_norm) { + auto batch = std::make_shared(); + batch->Init(); + auto input_x = TensorConstructUtils::CreateOnesTensor(kNumberTypeFloat16, std::vector{2, 2}); + auto scale = TensorConstructUtils::CreateOnesTensor(kNumberTypeFloat32, std::vector{2}); + auto bias = TensorConstructUtils::CreateOnesTensor(kNumberTypeFloat32, std::vector{2}); + auto mean = TensorConstructUtils::CreateOnesTensor(kNumberTypeFloat32, std::vector{2}); + auto variance = TensorConstructUtils::CreateOnesTensor(kNumberTypeFloat32, std::vector{2}); + auto abstract = batch->Infer( + {input_x->ToAbstract(), scale->ToAbstract(), bias->ToAbstract(), mean->ToAbstract(), variance->ToAbstract()}); + MS_EXCEPTION_IF_NULL(abstract); + EXPECT_EQ(abstract->isa(), true); + auto shape_ptr = abstract->BuildShape(); + MS_EXCEPTION_IF_NULL(shape_ptr); + EXPECT_EQ(shape_ptr->isa(), true); + auto shape = shape_ptr->cast(); + MS_EXCEPTION_IF_NULL(shape); + auto shape_vec = shape->shape(); + EXPECT_EQ(shape_vec.size(), 5); + auto shape1 = shape_vec[0]->cast()->shape(); + EXPECT_EQ(shape1.size(), 2); + EXPECT_EQ(shape1[0], 2); + EXPECT_EQ(shape1[1], 2); + auto shape2 = shape_vec[1]->cast()->shape(); + EXPECT_EQ(shape2.size(), 1); + EXPECT_EQ(shape2[0], 2); + auto shape3 = shape_vec[2]->cast()->shape(); + EXPECT_EQ(shape3.size(), 1); + EXPECT_EQ(shape3[0], 2); + auto shape4 = shape_vec[3]->cast()->shape(); + EXPECT_EQ(shape4.size(), 1); + EXPECT_EQ(shape4[0], 2); + auto shape5 = shape_vec[4]->cast()->shape(); + EXPECT_EQ(shape5.size(), 1); + EXPECT_EQ(shape5[0], 2); + auto type_ptr = abstract->BuildType(); + MS_EXCEPTION_IF_NULL(type_ptr); + auto type = type_ptr->cast(); + auto type_vec = type->elements(); + EXPECT_EQ(type_vec.size(), 5); + MS_EXCEPTION_IF_NULL(type_vec[0]); + auto data_type1 = type_vec[0]->cast()->element(); + MS_EXCEPTION_IF_NULL(data_type1); + EXPECT_EQ(data_type1->type_id(), kNumberTypeFloat16); + MS_EXCEPTION_IF_NULL(type_vec[1]); + auto data_type2 = type_vec[1]->cast()->element(); + MS_EXCEPTION_IF_NULL(data_type2); + EXPECT_EQ(data_type2->type_id(), kNumberTypeFloat32); + MS_EXCEPTION_IF_NULL(type_vec[2]); + auto data_type3 = type_vec[2]->cast()->element(); + MS_EXCEPTION_IF_NULL(data_type3); + EXPECT_EQ(data_type3->type_id(), kNumberTypeFloat32); + MS_EXCEPTION_IF_NULL(type_vec[3]); + auto data_type4 = type_vec[3]->cast()->element(); + MS_EXCEPTION_IF_NULL(data_type4); + EXPECT_EQ(data_type4->type_id(), kNumberTypeFloat16); + MS_EXCEPTION_IF_NULL(type_vec[4]); + auto data_type5 = type_vec[4]->cast()->element(); + MS_EXCEPTION_IF_NULL(data_type5); + EXPECT_EQ(data_type5->type_id(), kNumberTypeFloat16); +} +} // namespace ops +} // namespace mindspore diff --git a/tests/ut/cpp/ops/test_ops_batch_norm_fold.cc b/tests/ut/cpp/ops/test_ops_batch_norm_fold.cc new file mode 100644 index 00000000000..5914424e7ab --- /dev/null +++ b/tests/ut/cpp/ops/test_ops_batch_norm_fold.cc @@ -0,0 +1,73 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include "common/common_test.h" +#include "ops/batch_norm_fold.h" +#include "ir/dtype/type.h" +#include "ir/value.h" +#include "abstract/dshape.h" +#include "abstract/abstract_value.h" +#include "utils/tensor_construct_utils.h" + +namespace mindspore { +namespace ops { + +class TestBatchNormFold : public UT::Common { + public: + TestBatchNormFold() {} + void SetUp() {} + void TearDown() {} +}; + +TEST_F(TestBatchNormFold, test_ops_batch_norm_fold1) { + auto batch_norm_fold = std::make_shared(); + batch_norm_fold->Init(0.9, 1e-5, true, 0); + EXPECT_EQ((int64_t)(batch_norm_fold->get_momentum() - 0.9), 0); + EXPECT_EQ((int64_t)(batch_norm_fold->get_epsilon() - 1e-05), 0); + EXPECT_EQ(batch_norm_fold->get_is_training(), true); + EXPECT_EQ(batch_norm_fold->get_freeze_bn(), 0); + auto input_x = TensorConstructUtils::CreateOnesTensor(kNumberTypeFloat32, std::vector{2, 3}); + auto mean = TensorConstructUtils::CreateOnesTensor(kNumberTypeFloat32, std::vector{3}); + auto variance = TensorConstructUtils::CreateOnesTensor(kNumberTypeFloat32, std::vector{3}); + auto global_step = TensorConstructUtils::CreateOnesTensor(kNumberTypeInt32, std::vector{1}); + auto abstract = batch_norm_fold->Infer( + {input_x->ToAbstract(), mean->ToAbstract(), variance->ToAbstract(), global_step->ToAbstract()}); + MS_EXCEPTION_IF_NULL(abstract); + EXPECT_EQ(abstract->isa(), true); + auto shape_ptr = abstract->BuildShape(); + MS_EXCEPTION_IF_NULL(shape_ptr); + EXPECT_EQ(shape_ptr->isa(), true); + auto shape = shape_ptr->cast(); + MS_EXCEPTION_IF_NULL(shape); + auto shape_vec = shape->shape(); + EXPECT_EQ(shape_vec.size(), 4); + auto shape1 = shape_vec[0]->cast()->shape(); + EXPECT_EQ(shape1.size(), 1); + EXPECT_EQ(shape1[0], 3); + auto type_ptr = abstract->BuildType(); + MS_EXCEPTION_IF_NULL(type_ptr); + auto type = type_ptr->cast(); + MS_EXCEPTION_IF_NULL(type); + auto type_vec = type->elements(); + MS_EXCEPTION_IF_NULL(type_vec[0]); + auto data_type = type_vec[0]->cast()->element(); + MS_EXCEPTION_IF_NULL(data_type); + EXPECT_EQ(data_type->type_id(), kNumberTypeFloat32); +} + +} // namespace ops +} // namespace mindspore diff --git a/tests/ut/cpp/ops/test_ops_batch_norm_grad.cc b/tests/ut/cpp/ops/test_ops_batch_norm_grad.cc new file mode 100644 index 00000000000..06d74392d9a --- /dev/null +++ b/tests/ut/cpp/ops/test_ops_batch_norm_grad.cc @@ -0,0 +1,98 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include "common/common_test.h" +#include "ops/grad/batch_norm_grad.h" +#include "ir/dtype/type.h" +#include "ir/value.h" +#include "abstract/dshape.h" +#include "abstract/abstract_value.h" +#include "utils/tensor_construct_utils.h" + +namespace mindspore { +namespace ops { +class TestBatchNormGrad : public UT::Common { + public: + TestBatchNormGrad() {} + void SetUp() {} + void TearDown() {} +}; + +TEST_F(TestBatchNormGrad, test_ops_batch_norm_grad1) { + auto batch_norm_grad = std::make_shared(); + batch_norm_grad->Init(); + EXPECT_EQ(batch_norm_grad->get_is_training(), false); + EXPECT_EQ((int64_t)(batch_norm_grad->get_epsilon() - 1e-05), 0); + auto input0 = TensorConstructUtils::CreateOnesTensor(kNumberTypeFloat32, std::vector{2}); + auto input1 = TensorConstructUtils::CreateOnesTensor(kNumberTypeFloat32, std::vector{2}); + auto input2 = TensorConstructUtils::CreateOnesTensor(kNumberTypeFloat32, std::vector{1}); + auto input3 = TensorConstructUtils::CreateOnesTensor(kNumberTypeFloat32, std::vector{1}); + auto input4 = TensorConstructUtils::CreateOnesTensor(kNumberTypeFloat32, std::vector{1}); + auto abstract = batch_norm_grad->Infer( + {input0->ToAbstract(), input1->ToAbstract(), input2->ToAbstract(), input3->ToAbstract(), input4->ToAbstract()}); + MS_EXCEPTION_IF_NULL(abstract); + EXPECT_EQ(abstract->isa(), true); + auto shape_ptr = abstract->BuildShape(); + MS_EXCEPTION_IF_NULL(shape_ptr); + EXPECT_EQ(shape_ptr->isa(), true); + auto shape = shape_ptr->cast(); + MS_EXCEPTION_IF_NULL(shape); + auto shape_vec = shape->shape(); + EXPECT_EQ(shape_vec.size(), 5); + auto shape0 = shape_vec[0]->cast()->shape(); + EXPECT_EQ(shape0.size(), 1); + EXPECT_EQ(shape0[0], 2); + auto shape1 = shape_vec[1]->cast()->shape(); + EXPECT_EQ(shape1.size(), 1); + EXPECT_EQ(shape1[0], 1); + auto shape2 = shape_vec[2]->cast()->shape(); + EXPECT_EQ(shape2.size(), 1); + EXPECT_EQ(shape2[0], 1); + auto shape3 = shape_vec[3]->cast()->shape(); + EXPECT_EQ(shape3.size(), 1); + EXPECT_EQ(shape3[0], 1); + auto shape4 = shape_vec[4]->cast()->shape(); + EXPECT_EQ(shape4.size(), 1); + EXPECT_EQ(shape4[0], 1); + auto type_ptr = abstract->BuildType(); + MS_EXCEPTION_IF_NULL(type_ptr); + auto type = type_ptr->cast(); + MS_EXCEPTION_IF_NULL(type); + auto type_vec = type->elements(); + MS_EXCEPTION_IF_NULL(type_vec[0]); + auto data_type0 = type_vec[0]->cast()->element(); + MS_EXCEPTION_IF_NULL(data_type0); + EXPECT_EQ(data_type0->type_id(), kNumberTypeFloat32); + MS_EXCEPTION_IF_NULL(type_vec[1]); + auto data_type1 = type_vec[1]->cast()->element(); + MS_EXCEPTION_IF_NULL(data_type1); + EXPECT_EQ(data_type1->type_id(), kNumberTypeFloat32); + MS_EXCEPTION_IF_NULL(type_vec[2]); + auto data_type2 = type_vec[2]->cast()->element(); + MS_EXCEPTION_IF_NULL(data_type2); + EXPECT_EQ(data_type2->type_id(), kNumberTypeFloat32); + MS_EXCEPTION_IF_NULL(type_vec[3]); + auto data_type3 = type_vec[3]->cast()->element(); + MS_EXCEPTION_IF_NULL(data_type3); + EXPECT_EQ(data_type3->type_id(), kNumberTypeFloat32); + MS_EXCEPTION_IF_NULL(type_vec[4]); + auto data_type4 = type_vec[4]->cast()->element(); + MS_EXCEPTION_IF_NULL(data_type4); + EXPECT_EQ(data_type4->type_id(), kNumberTypeFloat32); +} +} // namespace ops +} // namespace mindspore diff --git a/tests/ut/cpp/ops/test_ops_batchtospace.cc b/tests/ut/cpp/ops/test_ops_batchtospace.cc new file mode 100644 index 00000000000..000439d02d4 --- /dev/null +++ b/tests/ut/cpp/ops/test_ops_batchtospace.cc @@ -0,0 +1,66 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include "common/common_test.h" +#include "ops/batch_to_space.h" +#include "ir/dtype/type.h" +#include "ir/value.h" +#include "abstract/dshape.h" +#include "utils/tensor_construct_utils.h" + +namespace mindspore { +namespace ops { +class TestBatchToSpace : public UT::Common { + public: + TestBatchToSpace() {} + void SetUp() {} + void TearDown() {} +}; + +TEST_F(TestBatchToSpace, test_ops_batch_to_space1) { + auto batch_to_space = std::make_shared(); + auto block = std::vector{2, 2}; + auto crop = std::vector{2, 3}; + auto crops = std::vector>{crop, crop}; + batch_to_space->Init(block, crops); + auto tensor_x = TensorConstructUtils::CreateOnesTensor(kNumberTypeFloat32, std::vector{12, 3, 4, 5}); + MS_EXCEPTION_IF_NULL(tensor_x); + auto abstract = batch_to_space->Infer({tensor_x->ToAbstract()}); + MS_EXCEPTION_IF_NULL(abstract); + EXPECT_EQ(abstract->isa(), true); + auto shape_ptr = abstract->BuildShape(); + MS_EXCEPTION_IF_NULL(shape_ptr); + EXPECT_EQ(shape_ptr->isa(), true); + auto shape = shape_ptr->cast(); + MS_EXCEPTION_IF_NULL(shape); + auto shape_vec = shape->shape(); + auto type = abstract->BuildType(); + MS_EXCEPTION_IF_NULL(type); + EXPECT_EQ(type->isa(), true); + auto tensor_type = type->cast(); + MS_EXCEPTION_IF_NULL(tensor_type); + auto data_type = tensor_type->element(); + MS_EXCEPTION_IF_NULL(data_type); + EXPECT_EQ(data_type->type_id(), kNumberTypeFloat32); + EXPECT_EQ(shape_vec.size(), 4); + EXPECT_EQ(shape_vec[0], 3); + EXPECT_EQ(shape_vec[1], 3); + EXPECT_EQ(shape_vec[2], 3); + EXPECT_EQ(shape_vec[3], 5); +} +} // namespace ops +} // namespace mindspore diff --git a/tests/ut/cpp/ops/test_ops_binary_cross_entropy_grad.cc b/tests/ut/cpp/ops/test_ops_binary_cross_entropy_grad.cc new file mode 100644 index 00000000000..b9146983463 --- /dev/null +++ b/tests/ut/cpp/ops/test_ops_binary_cross_entropy_grad.cc @@ -0,0 +1,98 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include "common/common_test.h" +#include "ops/grad/binary_cross_entropy_grad.h" +#include "ir/dtype/type.h" +#include "ir/value.h" +#include "abstract/dshape.h" +#include "utils/tensor_construct_utils.h" + +namespace mindspore { +namespace ops { +class TestBinaryCrossEntropyGrad : public UT::Common { + public: + TestBinaryCrossEntropyGrad() {} + void SetUp() {} + void TearDown() {} +}; + +TEST_F(TestBinaryCrossEntropyGrad, test_ops_binary_cross_entropy_grad1) { + auto binary_cross_entropy_grad = std::make_shared(); + binary_cross_entropy_grad->Init(MEAN); + EXPECT_EQ(binary_cross_entropy_grad->get_reduction(), MEAN); + binary_cross_entropy_grad->set_reduction(MEAN); + EXPECT_EQ(binary_cross_entropy_grad->get_reduction(), MEAN); + auto input1 = TensorConstructUtils::CreateOnesTensor(kNumberTypeFloat16, std::vector{1, 2, 3, 4, 5}); + auto input2 = TensorConstructUtils::CreateOnesTensor(kNumberTypeFloat16, std::vector{1, 2, 3, 4, 5}); + auto input3 = TensorConstructUtils::CreateOnesTensor(kNumberTypeFloat16, std::vector{1, 2, 3, 4, 5}); + auto input4 = TensorConstructUtils::CreateOnesTensor(kNumberTypeFloat16, std::vector{1, 2, 3, 4, 5}); + auto abstract = binary_cross_entropy_grad->Infer( + {input1->ToAbstract(), input2->ToAbstract(), input3->ToAbstract(), input4->ToAbstract()}); + MS_EXCEPTION_IF_NULL(abstract); + EXPECT_EQ(abstract->isa(), true); + auto shape_ptr = abstract->BuildShape(); + MS_EXCEPTION_IF_NULL(shape_ptr); + EXPECT_EQ(shape_ptr->isa(), true); + auto shape = shape_ptr->cast(); + MS_EXCEPTION_IF_NULL(shape); + auto shape_vec = shape->shape(); + auto type = abstract->BuildType(); + MS_EXCEPTION_IF_NULL(type); + EXPECT_EQ(type->isa(), true); + auto tensor_type = type->cast(); + MS_EXCEPTION_IF_NULL(tensor_type); + auto data_type = tensor_type->element(); + MS_EXCEPTION_IF_NULL(data_type); + EXPECT_EQ(data_type->type_id(), kNumberTypeFloat16); + EXPECT_EQ(shape_vec.size(), 5); + EXPECT_EQ(shape_vec[0], 1); +} + +TEST_F(TestBinaryCrossEntropyGrad, test_ops_binary_cross_entropy_grad2) { + auto binary_cross_entropy_grad = std::make_shared(); + binary_cross_entropy_grad->Init(MEAN); + EXPECT_EQ(binary_cross_entropy_grad->get_reduction(), MEAN); + binary_cross_entropy_grad->set_reduction(MEAN); + EXPECT_EQ(binary_cross_entropy_grad->get_reduction(), MEAN); + auto input1 = TensorConstructUtils::CreateOnesTensor(kNumberTypeFloat32, std::vector{1, 2, 3, 4, 5}); + auto input2 = TensorConstructUtils::CreateOnesTensor(kNumberTypeFloat32, std::vector{1, 2, 3, 4, 5}); + auto input3 = TensorConstructUtils::CreateOnesTensor(kNumberTypeFloat32, std::vector{1, 2, 3, 4, 5}); + auto input4 = TensorConstructUtils::CreateOnesTensor(kNumberTypeFloat32, std::vector{1, 2, 3, 4, 5}); + auto abstract = binary_cross_entropy_grad->Infer( + {input1->ToAbstract(), input2->ToAbstract(), input3->ToAbstract(), input4->ToAbstract()}); + MS_EXCEPTION_IF_NULL(abstract); + EXPECT_EQ(abstract->isa(), true); + auto shape_ptr = abstract->BuildShape(); + MS_EXCEPTION_IF_NULL(shape_ptr); + EXPECT_EQ(shape_ptr->isa(), true); + auto shape = shape_ptr->cast(); + MS_EXCEPTION_IF_NULL(shape); + auto shape_vec = shape->shape(); + auto type = abstract->BuildType(); + MS_EXCEPTION_IF_NULL(type); + EXPECT_EQ(type->isa(), true); + auto tensor_type = type->cast(); + MS_EXCEPTION_IF_NULL(tensor_type); + auto data_type = tensor_type->element(); + MS_EXCEPTION_IF_NULL(data_type); + EXPECT_EQ(data_type->type_id(), kNumberTypeFloat32); + EXPECT_EQ(shape_vec.size(), 5); + EXPECT_EQ(shape_vec[0], 1); +} +} // namespace ops +} // namespace mindspore diff --git a/tests/ut/cpp/ops/test_ops_ceil.cc b/tests/ut/cpp/ops/test_ops_ceil.cc new file mode 100644 index 00000000000..db9b7bf7611 --- /dev/null +++ b/tests/ut/cpp/ops/test_ops_ceil.cc @@ -0,0 +1,61 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include "common/common_test.h" +#include "ops/ceil.h" +#include "ir/dtype/type.h" +#include "ir/value.h" +#include "abstract/dshape.h" +#include "utils/tensor_construct_utils.h" + +namespace mindspore { +namespace ops { + +class TestCeil : public UT::Common { + public: + TestCeil() {} + void SetUp() {} + void TearDown() {} +}; + +TEST_F(TestCeil, test_ops_ceil1) { + auto ceil = std::make_shared(); + auto input0 = TensorConstructUtils::CreateOnesTensor(kNumberTypeFloat32, std::vector{3}); + MS_EXCEPTION_IF_NULL(input0); + auto abstract = ceil->Infer({input0->ToAbstract()}); + MS_EXCEPTION_IF_NULL(abstract); + EXPECT_EQ(abstract->isa(), true); + auto shape_ptr = abstract->BuildShape(); + MS_EXCEPTION_IF_NULL(shape_ptr); + EXPECT_EQ(shape_ptr->isa(), true); + auto shape = shape_ptr->cast(); + MS_EXCEPTION_IF_NULL(shape); + auto shape_vec = shape->shape(); + EXPECT_EQ(shape_vec.size(), 1); + EXPECT_EQ(shape_vec[0], 3); + auto type = abstract->BuildType(); + MS_EXCEPTION_IF_NULL(type); + EXPECT_EQ(type->isa(), true); + auto tensor_type = type->cast(); + MS_EXCEPTION_IF_NULL(tensor_type); + auto data_type = tensor_type->element(); + MS_EXCEPTION_IF_NULL(data_type); + EXPECT_EQ(data_type->type_id(), kNumberTypeFloat32); +} + +} // namespace ops +} // namespace mindspore diff --git a/tests/ut/cpp/ops/test_ops_concat.cc b/tests/ut/cpp/ops/test_ops_concat.cc new file mode 100644 index 00000000000..1c9de36a32a --- /dev/null +++ b/tests/ut/cpp/ops/test_ops_concat.cc @@ -0,0 +1,101 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include "common/common_test.h" +#include "ops/concat.h" +#include "ir/dtype/type.h" +#include "ir/value.h" +#include "abstract/dshape.h" +#include "utils/tensor_construct_utils.h" + +namespace mindspore { +namespace ops { +class TestConcat : public UT::Common { + public: + TestConcat() {} + void SetUp() {} + void TearDown() {} +}; + +TEST_F(TestConcat, test_ops_concat1) { + auto concat = std::make_shared(); + concat->Init(1); + auto tensor_x1 = TensorConstructUtils::CreateOnesTensor(kNumberTypeFloat32, std::vector{3, 2, 7, 7}); + auto tensor_x2 = TensorConstructUtils::CreateOnesTensor(kNumberTypeFloat32, std::vector{3, 3, 7, 7}); + auto tensor_x3 = TensorConstructUtils::CreateOnesTensor(kNumberTypeFloat32, std::vector{3, 4, 7, 7}); + MS_EXCEPTION_IF_NULL(tensor_x1); + MS_EXCEPTION_IF_NULL(tensor_x2); + MS_EXCEPTION_IF_NULL(tensor_x3); + auto input_tuple = std::make_shared(std::vector{tensor_x1, tensor_x2, tensor_x3}); + auto abstract = concat->Infer({input_tuple->ToAbstract()}); + MS_EXCEPTION_IF_NULL(abstract); + EXPECT_EQ(abstract->isa(), true); + auto shape_ptr = abstract->BuildShape(); + MS_EXCEPTION_IF_NULL(shape_ptr); + EXPECT_EQ(shape_ptr->isa(), true); + auto shape = shape_ptr->cast(); + MS_EXCEPTION_IF_NULL(shape); + auto shape_vec = shape->shape(); + auto type = abstract->BuildType(); + MS_EXCEPTION_IF_NULL(type); + EXPECT_EQ(type->isa(), true); + auto tensor_type = type->cast(); + MS_EXCEPTION_IF_NULL(tensor_type); + auto data_type = tensor_type->element(); + MS_EXCEPTION_IF_NULL(data_type); + EXPECT_EQ(data_type->type_id(), kNumberTypeFloat32); + EXPECT_EQ(shape_vec.size(), 4); + EXPECT_EQ(shape_vec[0], 3); + EXPECT_EQ(shape_vec[1], 9); + EXPECT_EQ(shape_vec[2], 7); + EXPECT_EQ(shape_vec[3], 7); +} + +TEST_F(TestConcat, test_ops_concat2) { + auto concat = std::make_shared(); + concat->Init(2); + auto tensor_x1 = TensorConstructUtils::CreateOnesTensor(kNumberTypeFloat16, std::vector{3, 4, 5}); + auto tensor_x2 = TensorConstructUtils::CreateOnesTensor(kNumberTypeFloat16, std::vector{3, 4, 2}); + auto tensor_x3 = TensorConstructUtils::CreateOnesTensor(kNumberTypeFloat16, std::vector{3, 4, 3}); + MS_EXCEPTION_IF_NULL(tensor_x1); + MS_EXCEPTION_IF_NULL(tensor_x2); + MS_EXCEPTION_IF_NULL(tensor_x3); + auto input_tuple = std::make_shared(std::vector{tensor_x1, tensor_x2, tensor_x3}); + auto abstract = concat->Infer({input_tuple->ToAbstract()}); + MS_EXCEPTION_IF_NULL(abstract); + EXPECT_EQ(abstract->isa(), true); + auto shape_ptr = abstract->BuildShape(); + MS_EXCEPTION_IF_NULL(shape_ptr); + EXPECT_EQ(shape_ptr->isa(), true); + auto shape = shape_ptr->cast(); + MS_EXCEPTION_IF_NULL(shape); + auto shape_vec = shape->shape(); + auto type = abstract->BuildType(); + MS_EXCEPTION_IF_NULL(type); + EXPECT_EQ(type->isa(), true); + auto tensor_type = type->cast(); + MS_EXCEPTION_IF_NULL(tensor_type); + auto data_type = tensor_type->element(); + MS_EXCEPTION_IF_NULL(data_type); + EXPECT_EQ(data_type->type_id(), kNumberTypeFloat16); + EXPECT_EQ(shape_vec.size(), 3); + EXPECT_EQ(shape_vec[0], 3); + EXPECT_EQ(shape_vec[1], 4); + EXPECT_EQ(shape_vec[2], 10); +} +} // namespace ops +} // namespace mindspore diff --git a/tests/ut/cpp/ops/test_ops_constant.cc b/tests/ut/cpp/ops/test_ops_constant.cc new file mode 100644 index 00000000000..d1819a16446 --- /dev/null +++ b/tests/ut/cpp/ops/test_ops_constant.cc @@ -0,0 +1,62 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include "common/common_test.h" +#include "ops/constant.h" +#include "ir/dtype/type.h" +#include "ir/value.h" +#include "abstract/dshape.h" +#include "utils/tensor_construct_utils.h" + +namespace mindspore { +namespace ops { +class TestConstant : public UT::Common { + public: + TestConstant() {} + void SetUp() {} + void TearDown() {} +}; + +TEST_F(TestConstant, test_ops_constant1) { + auto constant = std::make_shared(); + auto tensor_x = TensorConstructUtils::CreateOnesTensor(kNumberTypeFloat32, std::vector{2, 3, 4, 5}); + MS_EXCEPTION_IF_NULL(tensor_x); + auto abstract = constant->Infer({tensor_x->ToAbstract()}); + MS_EXCEPTION_IF_NULL(abstract); + EXPECT_EQ(abstract->isa(), true); + auto shape_ptr = abstract->BuildShape(); + MS_EXCEPTION_IF_NULL(shape_ptr); + EXPECT_EQ(shape_ptr->isa(), true); + auto shape = shape_ptr->cast(); + MS_EXCEPTION_IF_NULL(shape); + auto shape_vec = shape->shape(); + auto type = abstract->BuildType(); + MS_EXCEPTION_IF_NULL(type); + EXPECT_EQ(type->isa(), true); + auto tensor_type = type->cast(); + MS_EXCEPTION_IF_NULL(tensor_type); + auto data_type = tensor_type->element(); + MS_EXCEPTION_IF_NULL(data_type); + EXPECT_EQ(data_type->type_id(), kNumberTypeFloat32); + EXPECT_EQ(shape_vec.size(), 4); + EXPECT_EQ(shape_vec[0], 2); + EXPECT_EQ(shape_vec[1], 3); + EXPECT_EQ(shape_vec[2], 4); + EXPECT_EQ(shape_vec[3], 5); +} +} // namespace ops +} // namespace mindspore diff --git a/tests/ut/cpp/c_ops/test_c_ops_conv2d.cc b/tests/ut/cpp/ops/test_ops_conv2d.cc similarity index 73% rename from tests/ut/cpp/c_ops/test_c_ops_conv2d.cc rename to tests/ut/cpp/ops/test_ops_conv2d.cc index befde522c99..40f462097a5 100644 --- a/tests/ut/cpp/c_ops/test_c_ops_conv2d.cc +++ b/tests/ut/cpp/ops/test_ops_conv2d.cc @@ -16,11 +16,12 @@ #include #include #include "common/common_test.h" -#include "c_ops/conv2d.h" +#include "ops/conv2d.h" #include "ir/dtype/type.h" #include "abstract/dshape.h" #include "utils/tensor_construct_utils.h" namespace mindspore { +namespace ops { class TestConv2d : public UT::Common { public: TestConv2d() {} @@ -28,9 +29,30 @@ class TestConv2d : public UT::Common { void TearDown() {} }; -TEST_F(TestConv2d, test_cops_conv2d) { +TEST_F(TestConv2d, test_ops_conv2d) { auto conv_2d = std::make_shared(); conv_2d->Init(64, {7, 7}); + std::vector kernel_size = conv_2d->get_kernel_size(); + for (auto item : kernel_size) { + EXPECT_EQ(item, 7); + } + std::vector stride = conv_2d->get_stride(); + for (auto item : stride) { + EXPECT_EQ(item, 1); + } + std::vector dilation = conv_2d->get_dilation(); + for (auto item : dilation) { + EXPECT_EQ(item, 1); + } + EXPECT_EQ(conv_2d->get_pad_mode(), VALID); + std::vector pad = conv_2d->get_pad(); + for (auto item : pad) { + EXPECT_EQ(item, 0); + } + EXPECT_EQ(conv_2d->get_mode(), 1); + EXPECT_EQ(conv_2d->get_group(), 1); + EXPECT_EQ(conv_2d->get_out_channel(), 64); + EXPECT_EQ(conv_2d->get_format(), NCHW); auto tensor_x = TensorConstructUtils::CreateOnesTensor(kNumberTypeFloat32, std::vector{32, 3, 224, 224}); auto tensor_w = TensorConstructUtils::CreateOnesTensor(kNumberTypeFloat32, std::vector{64, 3, 7, 7}); MS_EXCEPTION_IF_NULL(tensor_x); @@ -58,4 +80,5 @@ TEST_F(TestConv2d, test_cops_conv2d) { EXPECT_EQ(shape_vec[3], 218); } -} // namespace mindspore \ No newline at end of file +} // namespace ops +} // namespace mindspore diff --git a/tests/ut/cpp/ops/test_ops_cos.cc b/tests/ut/cpp/ops/test_ops_cos.cc new file mode 100644 index 00000000000..ec6b2e07195 --- /dev/null +++ b/tests/ut/cpp/ops/test_ops_cos.cc @@ -0,0 +1,63 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include "common/common_test.h" +#include "ops/cos.h" +#include "ir/dtype/type.h" +#include "ir/value.h" +#include "abstract/dshape.h" +#include "utils/tensor_construct_utils.h" + +namespace mindspore { +namespace ops { +class TestCos : public UT::Common { + public: + TestCos() {} + void SetUp() {} + void TearDown() {} +}; + +TEST_F(TestCos, test_ops_cos1) { + auto cos = std::make_shared(); + // cos->Init(1.1); + auto tensor_x = TensorConstructUtils::CreateOnesTensor(kNumberTypeFloat32, std::vector{2, 3, 4, 5}); + MS_EXCEPTION_IF_NULL(tensor_x); + auto abstract = cos->Infer({tensor_x->ToAbstract()}); + MS_EXCEPTION_IF_NULL(abstract); + EXPECT_EQ(abstract->isa(), true); + auto shape_ptr = abstract->BuildShape(); + MS_EXCEPTION_IF_NULL(shape_ptr); + EXPECT_EQ(shape_ptr->isa(), true); + auto shape = shape_ptr->cast(); + MS_EXCEPTION_IF_NULL(shape); + auto shape_vec = shape->shape(); + auto type = abstract->BuildType(); + MS_EXCEPTION_IF_NULL(type); + EXPECT_EQ(type->isa(), true); + auto tensor_type = type->cast(); + MS_EXCEPTION_IF_NULL(tensor_type); + auto data_type = tensor_type->element(); + MS_EXCEPTION_IF_NULL(data_type); + EXPECT_EQ(data_type->type_id(), kNumberTypeFloat32); + EXPECT_EQ(shape_vec.size(), 4); + EXPECT_EQ(shape_vec[0], 2); + EXPECT_EQ(shape_vec[1], 3); + EXPECT_EQ(shape_vec[2], 4); + EXPECT_EQ(shape_vec[3], 5); +} +} // namespace ops +} // namespace mindspore diff --git a/tests/ut/cpp/ops/test_ops_crop.cc b/tests/ut/cpp/ops/test_ops_crop.cc new file mode 100644 index 00000000000..547c2374873 --- /dev/null +++ b/tests/ut/cpp/ops/test_ops_crop.cc @@ -0,0 +1,78 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include "common/common_test.h" +#include "ops/crop.h" +#include "ir/dtype/type.h" +#include "ir/value.h" +#include "abstract/dshape.h" +#include "utils/tensor_construct_utils.h" + +namespace mindspore { +namespace ops { +class TestCrop : public UT::Common { + public: + TestCrop() {} + void SetUp() {} + void TearDown() {} +}; + +TEST_F(TestCrop, test_ops_crop1) { + auto crop = std::make_shared(); + crop->Init(1, std::vector{1, 1, 1, 1}); + std::vector ret = crop->get_offsets(); + EXPECT_EQ(crop->get_axis(), 1); + for (auto item : ret) { + EXPECT_EQ(item, 1); + } + auto tensor_x1 = std::make_shared(kNumberTypeFloat32, std::vector{2, 2}); + auto tensor_x2 = std::make_shared(kNumberTypeInt32, std::vector{1}); + MS_EXCEPTION_IF_NULL(tensor_x1); + MS_EXCEPTION_IF_NULL(tensor_x2); + auto tensor_x1_data = reinterpret_cast(tensor_x1->data_c()); + *tensor_x1_data = 1.0; + tensor_x1_data++; + *tensor_x1_data = 2.0; + tensor_x1_data++; + *tensor_x1_data = 3.0; + tensor_x1_data++; + *tensor_x1_data = 4.0; + tensor_x1_data++; + auto tensor_x2_data = reinterpret_cast(tensor_x2->data_c()); + *tensor_x2_data = 1; + auto abstract = crop->Infer({tensor_x1->ToAbstract(), tensor_x2->ToAbstract()}); + MS_EXCEPTION_IF_NULL(abstract); + EXPECT_EQ(abstract->isa(), true); + auto shape_ptr = abstract->BuildShape(); + MS_EXCEPTION_IF_NULL(shape_ptr); + EXPECT_EQ(shape_ptr->isa(), true); + auto shape = shape_ptr->cast(); + MS_EXCEPTION_IF_NULL(shape); + auto shape_vec = shape->shape(); + auto type = abstract->BuildType(); + MS_EXCEPTION_IF_NULL(type); + EXPECT_EQ(type->isa(), true); + auto tensor_type = type->cast(); + MS_EXCEPTION_IF_NULL(tensor_type); + auto data_type = tensor_type->element(); + MS_EXCEPTION_IF_NULL(data_type); + EXPECT_EQ(data_type->type_id(), kNumberTypeFloat32); + EXPECT_EQ(shape_vec.size(), 1); + EXPECT_EQ(shape_vec[0], 1); +} +} // namespace ops +} // namespace mindspore \ No newline at end of file diff --git a/tests/ut/cpp/ops/test_ops_custom_predict.cc b/tests/ut/cpp/ops/test_ops_custom_predict.cc new file mode 100644 index 00000000000..673e0b3c13f --- /dev/null +++ b/tests/ut/cpp/ops/test_ops_custom_predict.cc @@ -0,0 +1,74 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include "common/common_test.h" +#include "ops/custom_predict.h" +#include "ir/dtype/type.h" +#include "ir/value.h" +#include "abstract/dshape.h" +#include "utils/tensor_construct_utils.h" + +namespace mindspore { +namespace ops { + +class TestCustomPredict : public UT::Common { + public: + TestCustomPredict() {} + void SetUp() {} + void TearDown() {} +}; + +TEST_F(TestCustomPredict, test_ops_custom_predict1) { + auto custom_predict = std::make_shared(); + custom_predict->Init(5, 0.1); + EXPECT_EQ(custom_predict->get_output_num(), 5); + EXPECT_EQ((int64_t)(custom_predict->get_weight_threshold() - 0.1), 0); + auto inputs0 = TensorConstructUtils::CreateOnesTensor(kNumberTypeInt32, std::vector{1}); + MS_EXCEPTION_IF_NULL(inputs0); + auto abstract = custom_predict->Infer({inputs0->ToAbstract()}); + MS_EXCEPTION_IF_NULL(abstract); + EXPECT_EQ(abstract->isa(), true); + auto shape_ptr = abstract->BuildShape(); + MS_EXCEPTION_IF_NULL(shape_ptr); + EXPECT_EQ(shape_ptr->isa(), true); + auto shape = shape_ptr->cast(); + MS_EXCEPTION_IF_NULL(shape); + auto shape_vec = shape->shape(); + EXPECT_EQ(shape_vec.size(), 2); + auto shape1 = shape_vec[0]->cast()->shape(); + EXPECT_EQ(shape1.size(), 1); + EXPECT_EQ(shape1[0], 5); + auto shape2 = shape_vec[1]->cast()->shape(); + EXPECT_EQ(shape2.size(), 1); + EXPECT_EQ(shape2[0], 5); + auto type_ptr = abstract->BuildType(); + MS_EXCEPTION_IF_NULL(type_ptr); + auto type = type_ptr->cast(); + MS_EXCEPTION_IF_NULL(type); + auto type_vec = type->elements(); + MS_EXCEPTION_IF_NULL(type_vec[0]); + auto data0_type = type_vec[0]->cast()->element(); + MS_EXCEPTION_IF_NULL(data0_type); + EXPECT_EQ(data0_type->type_id(), kNumberTypeInt32); + MS_EXCEPTION_IF_NULL(type_vec[1]); + auto data1_type = type_vec[1]->cast()->element(); + MS_EXCEPTION_IF_NULL(data1_type); + EXPECT_EQ(data1_type->type_id(), kNumberTypeFloat32); +} + +} // namespace ops +} // namespace mindspore diff --git a/tests/ut/cpp/ops/test_ops_depthtospace.cc b/tests/ut/cpp/ops/test_ops_depthtospace.cc new file mode 100644 index 00000000000..530b61dde29 --- /dev/null +++ b/tests/ut/cpp/ops/test_ops_depthtospace.cc @@ -0,0 +1,63 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include "common/common_test.h" +#include "ops/depth_to_space.h" +#include "ir/dtype/type.h" +#include "ir/value.h" +#include "abstract/dshape.h" +#include "utils/tensor_construct_utils.h" + +namespace mindspore { +namespace ops { +class TestDepthToSpace : public UT::Common { + public: + TestDepthToSpace() {} + void SetUp() {} + void TearDown() {} +}; + +TEST_F(TestDepthToSpace, test_ops_depth_to_space1) { + auto depth_to_space = std::make_shared(); + depth_to_space->Init(2, NCHW); + auto tensor_x = TensorConstructUtils::CreateOnesTensor(kNumberTypeFloat32, std::vector{3, 12, 4, 5}); + MS_EXCEPTION_IF_NULL(tensor_x); + auto abstract = depth_to_space->Infer({tensor_x->ToAbstract()}); + MS_EXCEPTION_IF_NULL(abstract); + EXPECT_EQ(abstract->isa(), true); + auto shape_ptr = abstract->BuildShape(); + MS_EXCEPTION_IF_NULL(shape_ptr); + EXPECT_EQ(shape_ptr->isa(), true); + auto shape = shape_ptr->cast(); + MS_EXCEPTION_IF_NULL(shape); + auto shape_vec = shape->shape(); + auto type = abstract->BuildType(); + MS_EXCEPTION_IF_NULL(type); + EXPECT_EQ(type->isa(), true); + auto tensor_type = type->cast(); + MS_EXCEPTION_IF_NULL(tensor_type); + auto data_type = tensor_type->element(); + MS_EXCEPTION_IF_NULL(data_type); + EXPECT_EQ(data_type->type_id(), kNumberTypeFloat32); + EXPECT_EQ(shape_vec.size(), 4); + EXPECT_EQ(shape_vec[0], 3); + EXPECT_EQ(shape_vec[1], 3); + EXPECT_EQ(shape_vec[2], 8); + EXPECT_EQ(shape_vec[3], 10); +} +} // namespace ops +} // namespace mindspore diff --git a/tests/ut/cpp/ops/test_ops_detection_post_process.cc b/tests/ut/cpp/ops/test_ops_detection_post_process.cc new file mode 100644 index 00000000000..989839cc1d9 --- /dev/null +++ b/tests/ut/cpp/ops/test_ops_detection_post_process.cc @@ -0,0 +1,89 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include "common/common_test.h" +#include "ops/detection_post_process.h" +#include "ir/dtype/type.h" +#include "ir/value.h" +#include "abstract/dshape.h" +#include "utils/tensor_construct_utils.h" + +namespace mindspore { +namespace ops { +class TestDetectionPostProcess : public UT::Common { + public: + TestDetectionPostProcess() {} + void SetUp() {} + void TearDown() {} +}; + +TEST_F(TestDetectionPostProcess, test_detection_post_process) { + auto op = std::make_shared(); + op->Init(2, {1.0, 2.0}, 5.0, 4.0, 3, 3, 3, 3, false, false); + auto boxes = TensorConstructUtils::CreateOnesTensor(kNumberTypeFloat16, std::vector{2, 3, 4}); + auto scores = TensorConstructUtils::CreateOnesTensor(kNumberTypeFloat16, std::vector{2, 3, 4}); + auto anchors = TensorConstructUtils::CreateOnesTensor(kNumberTypeFloat16, std::vector{3, 4}); + auto abstract = op->Infer({boxes->ToAbstract(), scores->ToAbstract(), anchors->ToAbstract()}); + MS_EXCEPTION_IF_NULL(abstract); + EXPECT_EQ(abstract->isa(), true); + auto shape_ptr = abstract->BuildShape(); + MS_EXCEPTION_IF_NULL(shape_ptr); + EXPECT_EQ(shape_ptr->isa(), true); + auto shape = shape_ptr->cast(); + MS_EXCEPTION_IF_NULL(shape); + auto shape_vec = shape->shape(); + EXPECT_EQ(shape_vec.size(), 4); + auto shape1 = shape_vec[0]->cast()->shape(); + EXPECT_EQ(shape1.size(), 3); + EXPECT_EQ(shape1[0], 1); + EXPECT_EQ(shape1[1], 9); + EXPECT_EQ(shape1[2], 4); + auto shape2 = shape_vec[1]->cast()->shape(); + EXPECT_EQ(shape2.size(), 2); + EXPECT_EQ(shape2[0], 1); + EXPECT_EQ(shape2[1], 9); + auto shape3 = shape_vec[2]->cast()->shape(); + EXPECT_EQ(shape3.size(), 2); + EXPECT_EQ(shape3[0], 1); + EXPECT_EQ(shape3[1], 9); + auto shape4 = shape_vec[3]->cast()->shape(); + EXPECT_EQ(shape4.size(), 1); + EXPECT_EQ(shape4[0], 1); + auto type_ptr = abstract->BuildType(); + MS_EXCEPTION_IF_NULL(type_ptr); + auto type = type_ptr->cast(); + auto type_vec = type->elements(); + EXPECT_EQ(type_vec.size(), 4); + MS_EXCEPTION_IF_NULL(type_vec[0]); + auto data_type1 = type_vec[0]->cast()->element(); + MS_EXCEPTION_IF_NULL(data_type1); + EXPECT_EQ(data_type1->type_id(), kNumberTypeFloat32); + MS_EXCEPTION_IF_NULL(type_vec[1]); + auto data_type2 = type_vec[1]->cast()->element(); + MS_EXCEPTION_IF_NULL(data_type2); + EXPECT_EQ(data_type2->type_id(), kNumberTypeFloat32); + MS_EXCEPTION_IF_NULL(type_vec[2]); + auto data_type3 = type_vec[2]->cast()->element(); + MS_EXCEPTION_IF_NULL(data_type3); + EXPECT_EQ(data_type3->type_id(), kNumberTypeFloat32); + MS_EXCEPTION_IF_NULL(type_vec[3]); + auto data_type4 = type_vec[3]->cast()->element(); + MS_EXCEPTION_IF_NULL(data_type4); + EXPECT_EQ(data_type4->type_id(), kNumberTypeFloat32); +} +} // namespace ops +} // namespace mindspore diff --git a/tests/ut/cpp/ops/test_ops_div.cc b/tests/ut/cpp/ops/test_ops_div.cc new file mode 100644 index 00000000000..34e4e615160 --- /dev/null +++ b/tests/ut/cpp/ops/test_ops_div.cc @@ -0,0 +1,63 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include "common/common_test.h" +#include "ops/div.h" +#include "ir/dtype/type.h" +#include "abstract/dshape.h" +#include "utils/tensor_construct_utils.h" +namespace mindspore { +namespace ops { +class TestDiv : public UT::Common { + public: + TestDiv() {} + void SetUp() {} + void TearDown() {} +}; + +TEST_F(TestDiv, test_ops_div) { + auto div = std::make_shared
(); + div->Init(); + auto tensor_x = TensorConstructUtils::CreateOnesTensor(kNumberTypeFloat32, std::vector{1, 3, 4, 5}); + auto tensor_y = TensorConstructUtils::CreateOnesTensor(kNumberTypeFloat32, std::vector{1, 3, 4, 5}); + MS_EXCEPTION_IF_NULL(tensor_x); + MS_EXCEPTION_IF_NULL(tensor_y); + auto div_abstract = div->Infer({tensor_x->ToAbstract(), tensor_y->ToAbstract()}); + MS_EXCEPTION_IF_NULL(div_abstract); + EXPECT_EQ(div_abstract->isa(), true); + auto shape_ptr = div_abstract->BuildShape(); + MS_EXCEPTION_IF_NULL(shape_ptr); + EXPECT_EQ(shape_ptr->isa(), true); + auto div_shape = shape_ptr->cast(); + MS_EXCEPTION_IF_NULL(div_shape); + auto shape_vec = div_shape->shape(); + auto type = div_abstract->BuildType(); + MS_EXCEPTION_IF_NULL(type); + EXPECT_EQ(type->isa(), true); + auto tensor_type = type->cast(); + MS_EXCEPTION_IF_NULL(tensor_type); + auto elem_type = tensor_type->element(); + EXPECT_EQ(elem_type->type_id(), kNumberTypeFloat32); + EXPECT_EQ(shape_vec.size(), 4); + EXPECT_EQ(shape_vec[0], 1); + EXPECT_EQ(shape_vec[1], 3); + EXPECT_EQ(shape_vec[2], 4); + EXPECT_EQ(shape_vec[3], 5); +} + +} // namespace ops +} // namespace mindspore diff --git a/tests/ut/cpp/ops/test_ops_dropout_grad.cc b/tests/ut/cpp/ops/test_ops_dropout_grad.cc new file mode 100644 index 00000000000..636f2b41261 --- /dev/null +++ b/tests/ut/cpp/ops/test_ops_dropout_grad.cc @@ -0,0 +1,63 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include "common/common_test.h" +#include "ops/grad/dropout_grad.h" +#include "ir/dtype/type.h" +#include "ir/value.h" +#include "abstract/dshape.h" +#include "utils/tensor_construct_utils.h" + +namespace mindspore { +namespace ops { + +class TestDropoutGrad : public UT::Common { + public: + TestDropoutGrad() {} + void SetUp() {} + void TearDown() {} +}; + +TEST_F(TestDropoutGrad, test_ops_dropout_grad1) { + auto dropout_grad = std::make_shared(); + dropout_grad->Init(0.5); + EXPECT_EQ((int64_t)(dropout_grad->get_keep_prob() - 0.5), 0); + auto in = TensorConstructUtils::CreateOnesTensor(kNumberTypeFloat32, std::vector{4}); + MS_EXCEPTION_IF_NULL(in); + auto abstract = dropout_grad->Infer({in->ToAbstract(), in->ToAbstract()}); + MS_EXCEPTION_IF_NULL(abstract); + EXPECT_EQ(abstract->isa(), true); + auto shape_ptr = abstract->BuildShape(); + MS_EXCEPTION_IF_NULL(shape_ptr); + EXPECT_EQ(shape_ptr->isa(), true); + auto shape = shape_ptr->cast(); + MS_EXCEPTION_IF_NULL(shape); + auto shape_vec = shape->shape(); + EXPECT_EQ(shape_vec.size(), 1); + EXPECT_EQ(shape_vec[0], 4); + auto type = abstract->BuildType(); + MS_EXCEPTION_IF_NULL(type); + EXPECT_EQ(type->isa(), true); + auto tensor_type = type->cast(); + MS_EXCEPTION_IF_NULL(tensor_type); + auto data_type = tensor_type->element(); + MS_EXCEPTION_IF_NULL(data_type); + EXPECT_EQ(data_type->type_id(), kNumberTypeFloat32); +} + +} // namespace ops +} // namespace mindspore diff --git a/tests/ut/cpp/ops/test_ops_elu.cc b/tests/ut/cpp/ops/test_ops_elu.cc new file mode 100644 index 00000000000..1458fa2f8af --- /dev/null +++ b/tests/ut/cpp/ops/test_ops_elu.cc @@ -0,0 +1,62 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include "common/common_test.h" +#include "ops/elu.h" +#include "ir/dtype/type.h" +#include "ir/value.h" +#include "abstract/dshape.h" +#include "utils/tensor_construct_utils.h" + +namespace mindspore { +namespace ops { +class TestELU : public UT::Common { + public: + TestELU() {} + void SetUp() {} + void TearDown() {} +}; + +TEST_F(TestELU, test_ops_elu1) { + auto elu = std::make_shared(); + elu->Init(1); + EXPECT_EQ(elu->get_alpha(), 1); + auto tensor_x = TensorConstructUtils::CreateOnesTensor(kNumberTypeFloat32, std::vector{2, 2}); + MS_EXCEPTION_IF_NULL(tensor_x); + auto abstract = elu->Infer({tensor_x->ToAbstract()}); + MS_EXCEPTION_IF_NULL(abstract); + EXPECT_EQ(abstract->isa(), true); + auto shape_ptr = abstract->BuildShape(); + MS_EXCEPTION_IF_NULL(shape_ptr); + EXPECT_EQ(shape_ptr->isa(), true); + auto shape = shape_ptr->cast(); + MS_EXCEPTION_IF_NULL(shape); + auto shape_vec = shape->shape(); + auto type = abstract->BuildType(); + MS_EXCEPTION_IF_NULL(type); + EXPECT_EQ(type->isa(), true); + auto tensor_type = type->cast(); + MS_EXCEPTION_IF_NULL(tensor_type); + auto data_type = tensor_type->element(); + MS_EXCEPTION_IF_NULL(data_type); + EXPECT_EQ(data_type->type_id(), kNumberTypeFloat32); + EXPECT_EQ(shape_vec.size(), 2); + EXPECT_EQ(shape_vec[0], 2); + EXPECT_EQ(shape_vec[1], 2); +} +} // namespace ops +} // namespace mindspore \ No newline at end of file diff --git a/tests/ut/cpp/ops/test_ops_embeddinglookup.cc b/tests/ut/cpp/ops/test_ops_embeddinglookup.cc new file mode 100644 index 00000000000..4810a0a7bcf --- /dev/null +++ b/tests/ut/cpp/ops/test_ops_embeddinglookup.cc @@ -0,0 +1,67 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include "common/common_test.h" +#include "ops/embedding_lookup.h" +#include "ir/dtype/type.h" +#include "ir/value.h" +#include "abstract/dshape.h" +#include "utils/tensor_construct_utils.h" + +namespace mindspore { +namespace ops { +class TestEmbeddingLookup : public UT::Common { + public: + TestEmbeddingLookup() {} + void SetUp() {} + void TearDown() {} +}; + +TEST_F(TestEmbeddingLookup, test_ops_embedding_lookup1) { + auto embedding_lookup = std::make_shared(); + embedding_lookup->Init(true); + auto tensor_param = TensorConstructUtils::CreateOnesTensor(kNumberTypeFloat32, std::vector{5, 3}); + auto tensor_indice = TensorConstructUtils::CreateOnesTensor(kNumberTypeInt32, std::vector{2, 3}); + auto offset = MakeValue(4); + MS_EXCEPTION_IF_NULL(tensor_param); + MS_EXCEPTION_IF_NULL(tensor_indice); + MS_EXCEPTION_IF_NULL(offset); + auto abstract = + embedding_lookup->Infer({tensor_param->ToAbstract(), tensor_indice->ToAbstract(), offset->ToAbstract()}); + MS_EXCEPTION_IF_NULL(abstract); + EXPECT_EQ(abstract->isa(), true); + auto shape_ptr = abstract->BuildShape(); + MS_EXCEPTION_IF_NULL(shape_ptr); + EXPECT_EQ(shape_ptr->isa(), true); + auto shape = shape_ptr->cast(); + MS_EXCEPTION_IF_NULL(shape); + auto shape_vec = shape->shape(); + auto type = abstract->BuildType(); + MS_EXCEPTION_IF_NULL(type); + EXPECT_EQ(type->isa(), true); + auto tensor_type = type->cast(); + MS_EXCEPTION_IF_NULL(tensor_type); + auto data_type = tensor_type->element(); + MS_EXCEPTION_IF_NULL(data_type); + EXPECT_EQ(data_type->type_id(), kNumberTypeFloat32); + EXPECT_EQ(shape_vec.size(), 3); + EXPECT_EQ(shape_vec[0], 2); + EXPECT_EQ(shape_vec[1], 3); + EXPECT_EQ(shape_vec[2], 3); +} +} // namespace ops +} // namespace mindspore diff --git a/tests/ut/cpp/ops/test_ops_equal.cc b/tests/ut/cpp/ops/test_ops_equal.cc new file mode 100644 index 00000000000..62c92e5c615 --- /dev/null +++ b/tests/ut/cpp/ops/test_ops_equal.cc @@ -0,0 +1,64 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include "common/common_test.h" +#include "ops/equal.h" +#include "ir/dtype/type.h" +#include "ir/value.h" +#include "abstract/dshape.h" +#include "utils/tensor_construct_utils.h" + +namespace mindspore { +namespace ops { +class TestEqual : public UT::Common { + public: + TestEqual() {} + void SetUp() {} + void TearDown() {} +}; + +TEST_F(TestEqual, test_ops_equal1) { + auto equal = std::make_shared(); + auto tensor_x1 = TensorConstructUtils::CreateOnesTensor(kNumberTypeFloat32, std::vector{3, 2, 7, 7}); + auto tensor_x2 = TensorConstructUtils::CreateOnesTensor(kNumberTypeFloat32, std::vector{3, 2, 7, 7}); + MS_EXCEPTION_IF_NULL(tensor_x1); + MS_EXCEPTION_IF_NULL(tensor_x2); + auto abstract = equal->Infer({tensor_x1->ToAbstract(), tensor_x2->ToAbstract()}); + MS_EXCEPTION_IF_NULL(abstract); + EXPECT_EQ(abstract->isa(), true); + auto shape_ptr = abstract->BuildShape(); + MS_EXCEPTION_IF_NULL(shape_ptr); + EXPECT_EQ(shape_ptr->isa(), true); + auto shape = shape_ptr->cast(); + MS_EXCEPTION_IF_NULL(shape); + auto shape_vec = shape->shape(); + auto type = abstract->BuildType(); + MS_EXCEPTION_IF_NULL(type); + EXPECT_EQ(type->isa(), true); + auto tensor_type = type->cast(); + MS_EXCEPTION_IF_NULL(tensor_type); + auto data_type = tensor_type->element(); + MS_EXCEPTION_IF_NULL(data_type); + EXPECT_EQ(data_type->type_id(), kNumberTypeFloat32); + EXPECT_EQ(shape_vec.size(), 4); + EXPECT_EQ(shape_vec[0], 3); + EXPECT_EQ(shape_vec[1], 2); + EXPECT_EQ(shape_vec[2], 7); + EXPECT_EQ(shape_vec[3], 7); +} +} // namespace ops +} // namespace mindspore \ No newline at end of file diff --git a/tests/ut/cpp/ops/test_ops_exp.cc b/tests/ut/cpp/ops/test_ops_exp.cc new file mode 100644 index 00000000000..aed8bf3767c --- /dev/null +++ b/tests/ut/cpp/ops/test_ops_exp.cc @@ -0,0 +1,62 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include "common/common_test.h" +#include "ops/exp.h" +#include "ir/dtype/type.h" +#include "ir/value.h" +#include "abstract/dshape.h" +#include "utils/tensor_construct_utils.h" + +namespace mindspore { +namespace ops { +class TestExp : public UT::Common { + public: + TestExp() {} + void SetUp() {} + void TearDown() {} +}; + +TEST_F(TestExp, test_ops_exp1) { + auto exp = std::make_shared(); + auto tensor_x = TensorConstructUtils::CreateOnesTensor(kNumberTypeFloat16, std::vector{2, 3, 4, 5}); + MS_EXCEPTION_IF_NULL(tensor_x); + auto abstract = exp->Infer({tensor_x->ToAbstract()}); + MS_EXCEPTION_IF_NULL(abstract); + EXPECT_EQ(abstract->isa(), true); + auto shape_ptr = abstract->BuildShape(); + MS_EXCEPTION_IF_NULL(shape_ptr); + EXPECT_EQ(shape_ptr->isa(), true); + auto shape = shape_ptr->cast(); + MS_EXCEPTION_IF_NULL(shape); + auto shape_vec = shape->shape(); + auto type = abstract->BuildType(); + MS_EXCEPTION_IF_NULL(type); + EXPECT_EQ(type->isa(), true); + auto tensor_type = type->cast(); + MS_EXCEPTION_IF_NULL(tensor_type); + auto data_type = tensor_type->element(); + MS_EXCEPTION_IF_NULL(data_type); + EXPECT_EQ(data_type->type_id(), kNumberTypeFloat16); + EXPECT_EQ(shape_vec.size(), 4); + EXPECT_EQ(shape_vec[0], 2); + EXPECT_EQ(shape_vec[1], 3); + EXPECT_EQ(shape_vec[2], 4); + EXPECT_EQ(shape_vec[3], 5); +} +} // namespace ops +} // namespace mindspore diff --git a/tests/ut/cpp/ops/test_ops_fake_quant_with_min_max_vars_per_channel.cc b/tests/ut/cpp/ops/test_ops_fake_quant_with_min_max_vars_per_channel.cc new file mode 100644 index 00000000000..07176722076 --- /dev/null +++ b/tests/ut/cpp/ops/test_ops_fake_quant_with_min_max_vars_per_channel.cc @@ -0,0 +1,72 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include "common/common_test.h" +#include "ops/fake_quant_with_min_max_vars_per_channel.h" +#include "ir/dtype/type.h" +#include "ir/value.h" +#include "abstract/dshape.h" +#include "utils/tensor_construct_utils.h" + +namespace mindspore { +namespace ops { + +class TestFakeQuantWithMinMaxVarsPerChannel : public UT::Common { + public: + TestFakeQuantWithMinMaxVarsPerChannel() {} + void SetUp() {} + void TearDown() {} +}; + +TEST_F(TestFakeQuantWithMinMaxVarsPerChannel, test_ops_fake_quant_with_min_max_vars_per_channel1) { + auto fake_quant_with_min_max_vars_per_channel = std::make_shared(); + fake_quant_with_min_max_vars_per_channel->Init(); + EXPECT_EQ(fake_quant_with_min_max_vars_per_channel->get_num_bits(), 8); + EXPECT_EQ(fake_quant_with_min_max_vars_per_channel->get_narrow_range(), false); + auto input0 = TensorConstructUtils::CreateOnesTensor(kNumberTypeFloat32, std::vector{3, 16, 3, 4}); + auto input1 = TensorConstructUtils::CreateOnesTensor(kNumberTypeFloat32, std::vector{4}); + auto input2 = TensorConstructUtils::CreateOnesTensor(kNumberTypeFloat32, std::vector{4}); + MS_EXCEPTION_IF_NULL(input0); + MS_EXCEPTION_IF_NULL(input1); + MS_EXCEPTION_IF_NULL(input2); + auto abstract = + fake_quant_with_min_max_vars_per_channel->Infer({input0->ToAbstract(), input1->ToAbstract(), input2->ToAbstract()}); + MS_EXCEPTION_IF_NULL(abstract); + EXPECT_EQ(abstract->isa(), true); + auto shape_ptr = abstract->BuildShape(); + MS_EXCEPTION_IF_NULL(shape_ptr); + EXPECT_EQ(shape_ptr->isa(), true); + auto shape = shape_ptr->cast(); + MS_EXCEPTION_IF_NULL(shape); + auto shape_vec = shape->shape(); + EXPECT_EQ(shape_vec.size(), 4); + EXPECT_EQ(shape_vec[0], 3); + EXPECT_EQ(shape_vec[1], 16); + EXPECT_EQ(shape_vec[2], 3); + EXPECT_EQ(shape_vec[3], 4); + auto type = abstract->BuildType(); + MS_EXCEPTION_IF_NULL(type); + EXPECT_EQ(type->isa(), true); + auto tensor_type = type->cast(); + MS_EXCEPTION_IF_NULL(tensor_type); + auto data_type = tensor_type->element(); + MS_EXCEPTION_IF_NULL(data_type); + EXPECT_EQ(data_type->type_id(), kNumberTypeFloat32); +} + +} // namespace ops +} // namespace mindspore diff --git a/tests/ut/cpp/ops/test_ops_fftimag.cc b/tests/ut/cpp/ops/test_ops_fftimag.cc new file mode 100644 index 00000000000..d52381d28ad --- /dev/null +++ b/tests/ut/cpp/ops/test_ops_fftimag.cc @@ -0,0 +1,59 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include "common/common_test.h" +#include "ops/fft_imag.h" +#include "ir/dtype/type.h" +#include "abstract/dshape.h" +#include "utils/tensor_construct_utils.h" +namespace mindspore { +namespace ops { +class TestFftImag : public UT::Common { + public: + TestFftImag() {} + void SetUp() {} + void TearDown() {} +}; + +TEST_F(TestFftImag, test_ops_fftimag) { + auto fftimag = std::make_shared(); + fftimag->Init(); + auto tensor_x = TensorConstructUtils::CreateOnesTensor(kNumberTypeFloat32, std::vector{1, 3, 4}); + MS_EXCEPTION_IF_NULL(tensor_x); + auto fftimag_abstract = fftimag->Infer({tensor_x->ToAbstract()}); + MS_EXCEPTION_IF_NULL(fftimag_abstract); + EXPECT_EQ(fftimag_abstract->isa(), true); + auto shape_ptr = fftimag_abstract->BuildShape(); + MS_EXCEPTION_IF_NULL(shape_ptr); + EXPECT_EQ(shape_ptr->isa(), true); + auto fftimag_shape = shape_ptr->cast(); + MS_EXCEPTION_IF_NULL(fftimag_shape); + auto shape_vec = fftimag_shape->shape(); + auto type = fftimag_abstract->BuildType(); + MS_EXCEPTION_IF_NULL(type); + EXPECT_EQ(type->isa(), true); + auto tensor_type = type->cast(); + MS_EXCEPTION_IF_NULL(tensor_type); + auto elem_type = tensor_type->element(); + EXPECT_EQ(elem_type->type_id(), kNumberTypeFloat32); + EXPECT_EQ(shape_vec.size(), 2); + EXPECT_EQ(shape_vec[0], 1); + EXPECT_EQ(shape_vec[1], 3); +} + +} // namespace ops +} // namespace mindspore diff --git a/tests/ut/cpp/ops/test_ops_fftreal.cc b/tests/ut/cpp/ops/test_ops_fftreal.cc new file mode 100644 index 00000000000..bd49b927815 --- /dev/null +++ b/tests/ut/cpp/ops/test_ops_fftreal.cc @@ -0,0 +1,61 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include "common/common_test.h" +#include "ops/fft_real.h" +#include "ir/dtype/type.h" +#include "ir/value.h" +#include "abstract/dshape.h" +#include "utils/tensor_construct_utils.h" + +namespace mindspore { +namespace ops { +class TestFftReal : public UT::Common { + public: + TestFftReal() {} + void SetUp() {} + void TearDown() {} +}; + +TEST_F(TestFftReal, test_ops_fft_real1) { + auto fft_real = std::make_shared(); + auto tensor_x = TensorConstructUtils::CreateOnesTensor(kNumberTypeFloat32, std::vector{2, 3, 4, 5}); + MS_EXCEPTION_IF_NULL(tensor_x); + auto abstract = fft_real->Infer({tensor_x->ToAbstract()}); + MS_EXCEPTION_IF_NULL(abstract); + EXPECT_EQ(abstract->isa(), true); + auto shape_ptr = abstract->BuildShape(); + MS_EXCEPTION_IF_NULL(shape_ptr); + EXPECT_EQ(shape_ptr->isa(), true); + auto shape = shape_ptr->cast(); + MS_EXCEPTION_IF_NULL(shape); + auto shape_vec = shape->shape(); + auto type = abstract->BuildType(); + MS_EXCEPTION_IF_NULL(type); + EXPECT_EQ(type->isa(), true); + auto tensor_type = type->cast(); + MS_EXCEPTION_IF_NULL(tensor_type); + auto data_type = tensor_type->element(); + MS_EXCEPTION_IF_NULL(data_type); + EXPECT_EQ(data_type->type_id(), kNumberTypeFloat32); + EXPECT_EQ(shape_vec.size(), 3); + EXPECT_EQ(shape_vec[0], 2); + EXPECT_EQ(shape_vec[1], 3); + EXPECT_EQ(shape_vec[2], 4); +} +} // namespace ops +} // namespace mindspore diff --git a/tests/ut/cpp/ops/test_ops_fill.cc b/tests/ut/cpp/ops/test_ops_fill.cc new file mode 100644 index 00000000000..ca8f9884079 --- /dev/null +++ b/tests/ut/cpp/ops/test_ops_fill.cc @@ -0,0 +1,66 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include "common/common_test.h" +#include "ops/fill.h" +#include "ir/dtype/type.h" +#include "ir/value.h" +#include "abstract/dshape.h" +#include "utils/tensor_construct_utils.h" + +namespace mindspore { +namespace ops { +class TestFill : public UT::Common { + public: + TestFill() {} + void SetUp() {} + void TearDown() {} +}; + +TEST_F(TestFill, test_ops_fill1) { + auto fill = std::make_shared(); + auto dtype = kFloat32; + auto dims = MakeValue(std::vector{2, 3, 4, 5}); + auto x = MakeValue(4); + MS_EXCEPTION_IF_NULL(dtype); + MS_EXCEPTION_IF_NULL(dims); + MS_EXCEPTION_IF_NULL(x); + auto abstract = fill->Infer({dtype->ToAbstract(), dims->ToAbstract(), x->ToAbstract()}); + MS_EXCEPTION_IF_NULL(abstract); + EXPECT_EQ(abstract->isa(), true); + auto shape_ptr = abstract->BuildShape(); + MS_EXCEPTION_IF_NULL(shape_ptr); + EXPECT_EQ(shape_ptr->isa(), true); + auto shape = shape_ptr->cast(); + MS_EXCEPTION_IF_NULL(shape); + auto shape_vec = shape->shape(); + auto type = abstract->BuildType(); + MS_EXCEPTION_IF_NULL(type); + EXPECT_EQ(type->isa(), true); + auto tensor_type = type->cast(); + MS_EXCEPTION_IF_NULL(tensor_type); + auto data_type = tensor_type->element(); + MS_EXCEPTION_IF_NULL(data_type); + EXPECT_EQ(data_type->type_id(), kNumberTypeFloat32); + EXPECT_EQ(shape_vec.size(), 4); + EXPECT_EQ(shape_vec[0], 2); + EXPECT_EQ(shape_vec[1], 3); + EXPECT_EQ(shape_vec[2], 4); + EXPECT_EQ(shape_vec[3], 5); +} +} // namespace ops +} // namespace mindspore diff --git a/tests/ut/cpp/ops/test_ops_flatten.cc b/tests/ut/cpp/ops/test_ops_flatten.cc new file mode 100644 index 00000000000..310abf3c2e5 --- /dev/null +++ b/tests/ut/cpp/ops/test_ops_flatten.cc @@ -0,0 +1,60 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include "common/common_test.h" +#include "ops/flatten.h" +#include "ir/dtype/type.h" +#include "abstract/dshape.h" +#include "utils/tensor_construct_utils.h" +namespace mindspore { +namespace ops { +class TestFlatten : public UT::Common { + public: + TestFlatten() {} + void SetUp() {} + void TearDown() {} +}; + +TEST_F(TestFlatten, test_ops_flatten) { + auto flatten = std::make_shared(); + flatten->Init(); + auto tensor_x = TensorConstructUtils::CreateOnesTensor(kNumberTypeFloat32, std::vector{1, 2, 3, 4}); + MS_EXCEPTION_IF_NULL(tensor_x); + + auto flatten_abstract = flatten->Infer({tensor_x->ToAbstract()}); + MS_EXCEPTION_IF_NULL(flatten_abstract); + EXPECT_EQ(flatten_abstract->isa(), true); + auto shape_ptr = flatten_abstract->BuildShape(); + MS_EXCEPTION_IF_NULL(shape_ptr); + EXPECT_EQ(shape_ptr->isa(), true); + auto flatten_shape = shape_ptr->cast(); + MS_EXCEPTION_IF_NULL(flatten_shape); + auto shape_vec = flatten_shape->shape(); + auto type = flatten_abstract->BuildType(); + MS_EXCEPTION_IF_NULL(type); + EXPECT_EQ(type->isa(), true); + auto tensor_type = type->cast(); + MS_EXCEPTION_IF_NULL(tensor_type); + auto elem_type = tensor_type->element(); + EXPECT_EQ(elem_type->type_id(), kNumberTypeFloat32); + EXPECT_EQ(shape_vec.size(), 2); + EXPECT_EQ(shape_vec[0], 1); + EXPECT_EQ(shape_vec[1], 24); +} + +} // namespace ops +} // namespace mindspore diff --git a/tests/ut/cpp/ops/test_ops_flattengrad.cc b/tests/ut/cpp/ops/test_ops_flattengrad.cc new file mode 100644 index 00000000000..d03592375c9 --- /dev/null +++ b/tests/ut/cpp/ops/test_ops_flattengrad.cc @@ -0,0 +1,63 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include "common/common_test.h" +#include "ops/grad/flatten_grad.h" +#include "ir/dtype/type.h" +#include "ir/value.h" +#include "abstract/dshape.h" +#include "utils/tensor_construct_utils.h" + +namespace mindspore { +namespace ops { +class TestFlattenGrad : public UT::Common { + public: + TestFlattenGrad() {} + void SetUp() {} + void TearDown() {} +}; + +TEST_F(TestFlattenGrad, test_ops_flatten_grad1) { + auto flatten_grad = std::make_shared(); + auto tensor_x = TensorConstructUtils::CreateOnesTensor(kNumberTypeFloat16, std::vector{120}); + auto in_shape = MakeValue(std::vector{2, 3, 4, 5}); + MS_EXCEPTION_IF_NULL(tensor_x); + auto abstract = flatten_grad->Infer({tensor_x->ToAbstract(), in_shape->ToAbstract()}); + MS_EXCEPTION_IF_NULL(abstract); + EXPECT_EQ(abstract->isa(), true); + auto shape_ptr = abstract->BuildShape(); + MS_EXCEPTION_IF_NULL(shape_ptr); + EXPECT_EQ(shape_ptr->isa(), true); + auto shape = shape_ptr->cast(); + MS_EXCEPTION_IF_NULL(shape); + auto shape_vec = shape->shape(); + auto type = abstract->BuildType(); + MS_EXCEPTION_IF_NULL(type); + EXPECT_EQ(type->isa(), true); + auto tensor_type = type->cast(); + MS_EXCEPTION_IF_NULL(tensor_type); + auto data_type = tensor_type->element(); + MS_EXCEPTION_IF_NULL(data_type); + EXPECT_EQ(data_type->type_id(), kNumberTypeFloat16); + EXPECT_EQ(shape_vec.size(), 4); + EXPECT_EQ(shape_vec[0], 2); + EXPECT_EQ(shape_vec[1], 3); + EXPECT_EQ(shape_vec[2], 4); + EXPECT_EQ(shape_vec[3], 5); +} +} // namespace ops +} // namespace mindspore diff --git a/tests/ut/cpp/ops/test_ops_floor.cc b/tests/ut/cpp/ops/test_ops_floor.cc new file mode 100644 index 00000000000..a3bec56ff28 --- /dev/null +++ b/tests/ut/cpp/ops/test_ops_floor.cc @@ -0,0 +1,61 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include "common/common_test.h" +#include "ops/floor.h" +#include "ir/dtype/type.h" +#include "ir/value.h" +#include "abstract/dshape.h" +#include "utils/tensor_construct_utils.h" + +namespace mindspore { +namespace ops { +class TestFloor : public UT::Common { + public: + TestFloor() {} + void SetUp() {} + void TearDown() {} +}; + +TEST_F(TestFloor, test_ops_floor1) { + auto floor = std::make_shared(); + floor->Init(); + auto tensor_x = TensorConstructUtils::CreateOnesTensor(kNumberTypeFloat32, std::vector{2, 2}); + MS_EXCEPTION_IF_NULL(tensor_x); + auto abstract = floor->Infer({tensor_x->ToAbstract()}); + MS_EXCEPTION_IF_NULL(abstract); + EXPECT_EQ(abstract->isa(), true); + auto shape_ptr = abstract->BuildShape(); + MS_EXCEPTION_IF_NULL(shape_ptr); + EXPECT_EQ(shape_ptr->isa(), true); + auto shape = shape_ptr->cast(); + MS_EXCEPTION_IF_NULL(shape); + auto shape_vec = shape->shape(); + auto type = abstract->BuildType(); + MS_EXCEPTION_IF_NULL(type); + EXPECT_EQ(type->isa(), true); + auto tensor_type = type->cast(); + MS_EXCEPTION_IF_NULL(tensor_type); + auto data_type = tensor_type->element(); + MS_EXCEPTION_IF_NULL(data_type); + EXPECT_EQ(data_type->type_id(), kNumberTypeFloat32); + EXPECT_EQ(shape_vec.size(), 2); + EXPECT_EQ(shape_vec[0], 2); + EXPECT_EQ(shape_vec[1], 2); +} +} // namespace ops +} // namespace mindspore \ No newline at end of file diff --git a/tests/ut/cpp/ops/test_ops_full_connection.cc b/tests/ut/cpp/ops/test_ops_full_connection.cc new file mode 100644 index 00000000000..b0d5a4487ac --- /dev/null +++ b/tests/ut/cpp/ops/test_ops_full_connection.cc @@ -0,0 +1,125 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include "common/common_test.h" +#include "ops/fusion/full_connection.h" +#include "ir/dtype/type.h" +#include "ir/value.h" +#include "abstract/dshape.h" +#include "utils/tensor_construct_utils.h" + +namespace mindspore { +namespace ops { +class TestFullConnection : public UT::Common { + public: + TestFullConnection() {} + void SetUp() {} + void TearDown() {} +}; + +TEST_F(TestFullConnection, test_full_connection_1) { + auto op = std::make_shared(); + bool has_bias = false; + bool use_axis = false; + int64_t axis = 3; + op->Init(has_bias, axis, use_axis, NO_ACTIVATION); + auto tensor_1 = TensorConstructUtils::CreateOnesTensor(kNumberTypeFloat16, std::vector{2, 3}); + auto tensor_2 = TensorConstructUtils::CreateOnesTensor(kNumberTypeFloat16, std::vector{2, 3}); + auto abstract = op->Infer({tensor_1->ToAbstract(), tensor_2->ToAbstract()}); + MS_EXCEPTION_IF_NULL(abstract); + EXPECT_EQ(abstract->isa(), true); + auto shape_ptr = abstract->BuildShape(); + MS_EXCEPTION_IF_NULL(shape_ptr); + EXPECT_EQ(shape_ptr->isa(), true); + auto shape = shape_ptr->cast(); + MS_EXCEPTION_IF_NULL(shape); + auto shape_vec = shape->shape(); + auto type = abstract->BuildType(); + MS_EXCEPTION_IF_NULL(type); + EXPECT_EQ(type->isa(), true); + auto tensor_type = type->cast(); + MS_EXCEPTION_IF_NULL(tensor_type); + auto data_type = tensor_type->element(); + MS_EXCEPTION_IF_NULL(data_type); + EXPECT_EQ(data_type->type_id(), kNumberTypeFloat16); + EXPECT_EQ(shape_vec.size(), 2); + EXPECT_EQ(shape_vec[0], 2); + EXPECT_EQ(shape_vec[1], 2); +} + +TEST_F(TestFullConnection, test_full_connection_2) { + auto op = std::make_shared(); + bool has_bias = true; + bool use_axis = false; + int64_t axis = 1; + op->Init(has_bias, axis, use_axis, NO_ACTIVATION); + auto tensor_1 = TensorConstructUtils::CreateOnesTensor(kNumberTypeFloat16, std::vector{2, 3}); + auto tensor_2 = TensorConstructUtils::CreateOnesTensor(kNumberTypeFloat16, std::vector{2, 3}); + auto tensor_3 = TensorConstructUtils::CreateOnesTensor(kNumberTypeFloat16, std::vector{2, 2}); + auto abstract = op->Infer({tensor_1->ToAbstract(), tensor_2->ToAbstract(), tensor_3->ToAbstract()}); + MS_EXCEPTION_IF_NULL(abstract); + EXPECT_EQ(abstract->isa(), true); + auto shape_ptr = abstract->BuildShape(); + MS_EXCEPTION_IF_NULL(shape_ptr); + EXPECT_EQ(shape_ptr->isa(), true); + auto shape = shape_ptr->cast(); + MS_EXCEPTION_IF_NULL(shape); + auto shape_vec = shape->shape(); + auto type = abstract->BuildType(); + MS_EXCEPTION_IF_NULL(type); + EXPECT_EQ(type->isa(), true); + auto tensor_type = type->cast(); + MS_EXCEPTION_IF_NULL(tensor_type); + auto data_type = tensor_type->element(); + MS_EXCEPTION_IF_NULL(data_type); + EXPECT_EQ(data_type->type_id(), kNumberTypeFloat16); + EXPECT_EQ(shape_vec.size(), 2); + EXPECT_EQ(shape_vec[0], 2); + EXPECT_EQ(shape_vec[1], 2); +} + +TEST_F(TestFullConnection, test_full_connection_3) { + auto op = std::make_shared(); + bool has_bias = false; + bool use_axis = true; + int64_t axis = 1; + op->Init(has_bias, axis, use_axis, NO_ACTIVATION); + auto tensor_1 = TensorConstructUtils::CreateOnesTensor(kNumberTypeFloat16, std::vector{2, 3}); + auto tensor_2 = TensorConstructUtils::CreateOnesTensor(kNumberTypeFloat16, std::vector{2, 3}); + auto abstract = op->Infer({tensor_1->ToAbstract(), tensor_2->ToAbstract()}); + MS_EXCEPTION_IF_NULL(abstract); + EXPECT_EQ(abstract->isa(), true); + auto shape_ptr = abstract->BuildShape(); + MS_EXCEPTION_IF_NULL(shape_ptr); + EXPECT_EQ(shape_ptr->isa(), true); + auto shape = shape_ptr->cast(); + MS_EXCEPTION_IF_NULL(shape); + auto shape_vec = shape->shape(); + auto type = abstract->BuildType(); + MS_EXCEPTION_IF_NULL(type); + EXPECT_EQ(type->isa(), true); + auto tensor_type = type->cast(); + MS_EXCEPTION_IF_NULL(tensor_type); + auto data_type = tensor_type->element(); + MS_EXCEPTION_IF_NULL(data_type); + EXPECT_EQ(data_type->type_id(), kNumberTypeFloat16); + EXPECT_EQ(shape_vec.size(), 2); + EXPECT_EQ(shape_vec[0], 2); + EXPECT_EQ(shape_vec[1], 2); +} +} // namespace ops +} // namespace mindspore diff --git a/tests/ut/cpp/ops/test_ops_gather.cc b/tests/ut/cpp/ops/test_ops_gather.cc new file mode 100644 index 00000000000..94395eec5fc --- /dev/null +++ b/tests/ut/cpp/ops/test_ops_gather.cc @@ -0,0 +1,83 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include "common/common_test.h" +#include "ops/gather.h" +#include "ir/dtype/type.h" +#include "ir/value.h" +#include "abstract/dshape.h" +#include "utils/tensor_construct_utils.h" + +namespace mindspore { +namespace ops { +class TestGather : public UT::Common { + public: + TestGather() {} + void SetUp() {} + void TearDown() {} +}; + +TEST_F(TestGather, test_gather) { + auto gather = std::make_shared(); + gather->Init(); + auto tensor_x = std::make_shared(kNumberTypeInt32, std::vector{2, 2}); + MS_EXCEPTION_IF_NULL(tensor_x); + auto tensor_x_data = reinterpret_cast(tensor_x->data_c()); + *tensor_x_data = 1; + tensor_x_data++; + *tensor_x_data = 2; + tensor_x_data++; + *tensor_x_data = 3; + tensor_x_data++; + *tensor_x_data = 4; + tensor_x_data++; + auto index = std::make_shared(kNumberTypeInt32, std::vector{2, 2}); + MS_EXCEPTION_IF_NULL(index); + auto index_data = reinterpret_cast(index->data_c()); + *index_data = 0; + index_data++; + *index_data = 0; + index_data++; + *index_data = 1; + index_data++; + *index_data = 0; + index_data++; + auto dim = MakeValue(1); + MS_EXCEPTION_IF_NULL(dim); + auto abstract = gather->Infer({tensor_x->ToAbstract(), dim->ToAbstract(), index->ToAbstract()}); + MS_EXCEPTION_IF_NULL(abstract); + EXPECT_EQ(abstract->isa(), true); + auto shape_ptr = abstract->BuildShape(); + MS_EXCEPTION_IF_NULL(shape_ptr); + EXPECT_EQ(shape_ptr->isa(), true); + auto shape = shape_ptr->cast(); + MS_EXCEPTION_IF_NULL(shape); + auto shape_vec = shape->shape(); + auto type = abstract->BuildType(); + MS_EXCEPTION_IF_NULL(type); + EXPECT_EQ(type->isa(), true); + auto tensor_type = type->cast(); + MS_EXCEPTION_IF_NULL(tensor_type); + auto data_type = tensor_type->element(); + MS_EXCEPTION_IF_NULL(data_type); + EXPECT_EQ(data_type->type_id(), kNumberTypeInt32); + EXPECT_EQ(shape_vec.size(), 2); + EXPECT_EQ(shape_vec[0], 2); + EXPECT_EQ(shape_vec[1], 2); +} +} // namespace ops +} // namespace mindspore diff --git a/tests/ut/cpp/ops/test_ops_hashtable_lookup.cc b/tests/ut/cpp/ops/test_ops_hashtable_lookup.cc new file mode 100644 index 00000000000..ea9f926e34c --- /dev/null +++ b/tests/ut/cpp/ops/test_ops_hashtable_lookup.cc @@ -0,0 +1,75 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include "common/common_test.h" +#include "ops/hashtable_lookup.h" +#include "ir/dtype/type.h" +#include "ir/value.h" +#include "abstract/dshape.h" +#include "utils/tensor_construct_utils.h" + +namespace mindspore { +namespace ops { + +class TestHashtableLookup : public UT::Common { + public: + TestHashtableLookup() {} + void SetUp() {} + void TearDown() {} +}; + +TEST_F(TestHashtableLookup, test_ops_hashtable_lookup1) { + auto hashtable_lookup = std::make_shared(); + hashtable_lookup->Init(); + auto inputs0 = TensorConstructUtils::CreateOnesTensor(kNumberTypeInt32, std::vector{4, 3}); + auto inputs1 = TensorConstructUtils::CreateOnesTensor(kNumberTypeFloat32, std::vector{1}); + auto inputs2 = TensorConstructUtils::CreateOnesTensor(kNumberTypeFloat32, std::vector{1}); + MS_EXCEPTION_IF_NULL(inputs0); + MS_EXCEPTION_IF_NULL(inputs1); + MS_EXCEPTION_IF_NULL(inputs2); + auto abstract = hashtable_lookup->Infer({inputs0->ToAbstract(), inputs1->ToAbstract(), inputs2->ToAbstract()}); + MS_EXCEPTION_IF_NULL(abstract); + EXPECT_EQ(abstract->isa(), true); + auto shape_ptr = abstract->BuildShape(); + MS_EXCEPTION_IF_NULL(shape_ptr); + EXPECT_EQ(shape_ptr->isa(), true); + auto shape = shape_ptr->cast(); + MS_EXCEPTION_IF_NULL(shape); + auto shape_vec = shape->shape(); + EXPECT_EQ(shape_vec.size(), 2); + auto shape1 = shape_vec[0]->cast()->shape(); + EXPECT_EQ(shape1.size(), 0); + auto shape2 = shape_vec[1]->cast()->shape(); + EXPECT_EQ(shape2.size(), 1); + EXPECT_EQ(shape2[0], 4); + auto type_ptr = abstract->BuildType(); + MS_EXCEPTION_IF_NULL(type_ptr); + auto type = type_ptr->cast(); + MS_EXCEPTION_IF_NULL(type); + auto type_vec = type->elements(); + MS_EXCEPTION_IF_NULL(type_vec[0]); + auto data0_type = type_vec[0]->cast()->element(); + MS_EXCEPTION_IF_NULL(data0_type); + EXPECT_EQ(data0_type->type_id(), kNumberTypeFloat32); + MS_EXCEPTION_IF_NULL(type_vec[1]); + auto data1_type = type_vec[1]->cast()->element(); + MS_EXCEPTION_IF_NULL(data1_type); + EXPECT_EQ(data1_type->type_id(), kNumberTypeInt8); +} + +} // namespace ops +} // namespace mindspore diff --git a/tests/ut/cpp/ops/test_ops_l2normalize.cc b/tests/ut/cpp/ops/test_ops_l2normalize.cc new file mode 100644 index 00000000000..0223ee9941a --- /dev/null +++ b/tests/ut/cpp/ops/test_ops_l2normalize.cc @@ -0,0 +1,63 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include "common/common_test.h" +#include "ops/l2_normalize.h" +#include "ir/dtype/type.h" +#include "ir/value.h" +#include "abstract/dshape.h" +#include "utils/tensor_construct_utils.h" + +namespace mindspore { +namespace ops { +class TestL2Normalize : public UT::Common { + public: + TestL2Normalize() {} + void SetUp() {} + void TearDown() {} +}; + +TEST_F(TestL2Normalize, test_ops_l2_normalize1) { + auto l2_normalize = std::make_shared(); + l2_normalize->Init(std::vector{0, 1, 2}); + auto tensor_x = TensorConstructUtils::CreateOnesTensor(kNumberTypeFloat32, std::vector{2, 3, 4, 5}); + MS_EXCEPTION_IF_NULL(tensor_x); + auto abstract = l2_normalize->Infer({tensor_x->ToAbstract()}); + MS_EXCEPTION_IF_NULL(abstract); + EXPECT_EQ(abstract->isa(), true); + auto shape_ptr = abstract->BuildShape(); + MS_EXCEPTION_IF_NULL(shape_ptr); + EXPECT_EQ(shape_ptr->isa(), true); + auto shape = shape_ptr->cast(); + MS_EXCEPTION_IF_NULL(shape); + auto shape_vec = shape->shape(); + auto type = abstract->BuildType(); + MS_EXCEPTION_IF_NULL(type); + EXPECT_EQ(type->isa(), true); + auto tensor_type = type->cast(); + MS_EXCEPTION_IF_NULL(tensor_type); + auto data_type = tensor_type->element(); + MS_EXCEPTION_IF_NULL(data_type); + EXPECT_EQ(data_type->type_id(), kNumberTypeFloat32); + EXPECT_EQ(shape_vec.size(), 4); + EXPECT_EQ(shape_vec[0], 2); + EXPECT_EQ(shape_vec[1], 3); + EXPECT_EQ(shape_vec[2], 4); + EXPECT_EQ(shape_vec[3], 5); +} +} // namespace ops +} // namespace mindspore diff --git a/tests/ut/cpp/ops/test_ops_leakyrelu.cc b/tests/ut/cpp/ops/test_ops_leakyrelu.cc new file mode 100644 index 00000000000..fa07d25f42b --- /dev/null +++ b/tests/ut/cpp/ops/test_ops_leakyrelu.cc @@ -0,0 +1,61 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include "common/common_test.h" +#include "ops/leaky_relu.h" +#include "ir/dtype/type.h" +#include "ir/value.h" +#include "abstract/dshape.h" +#include "utils/tensor_construct_utils.h" + +namespace mindspore { +namespace ops { +class TestLeakyRelu : public UT::Common { + public: + TestLeakyRelu() {} + void SetUp() {} + void TearDown() {} +}; + +TEST_F(TestLeakyRelu, test_ops_leakyrelu1) { + auto leakyrelu = std::make_shared(); + auto tensor_x = TensorConstructUtils::CreateOnesTensor(kNumberTypeFloat32, std::vector{3, 4, 5}); + MS_EXCEPTION_IF_NULL(tensor_x); + auto abstract = leakyrelu->Infer({tensor_x->ToAbstract()}); + MS_EXCEPTION_IF_NULL(abstract); + EXPECT_EQ(abstract->isa(), true); + auto shape_ptr = abstract->BuildShape(); + MS_EXCEPTION_IF_NULL(shape_ptr); + EXPECT_EQ(shape_ptr->isa(), true); + auto shape = shape_ptr->cast(); + MS_EXCEPTION_IF_NULL(shape); + auto shape_vec = shape->shape(); + auto type = abstract->BuildType(); + MS_EXCEPTION_IF_NULL(type); + EXPECT_EQ(type->isa(), true); + auto tensor_type = type->cast(); + MS_EXCEPTION_IF_NULL(tensor_type); + auto data_type = tensor_type->element(); + MS_EXCEPTION_IF_NULL(data_type); + EXPECT_EQ(data_type->type_id(), kNumberTypeFloat32); + EXPECT_EQ(shape_vec.size(), 3); + EXPECT_EQ(shape_vec[0], 3); + EXPECT_EQ(shape_vec[1], 4); + EXPECT_EQ(shape_vec[2], 5); +} +} // namespace ops +} // namespace mindspore diff --git a/tests/ut/cpp/ops/test_ops_less.cc b/tests/ut/cpp/ops/test_ops_less.cc new file mode 100644 index 00000000000..ea807144d9f --- /dev/null +++ b/tests/ut/cpp/ops/test_ops_less.cc @@ -0,0 +1,63 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include "common/common_test.h" +#include "ops/less.h" +#include "ir/dtype/type.h" +#include "ir/value.h" +#include "abstract/dshape.h" +#include "utils/tensor_construct_utils.h" + +namespace mindspore { +namespace ops { + +class TestLess : public UT::Common { + public: + TestLess() {} + void SetUp() {} + void TearDown() {} +}; + +TEST_F(TestLess, test_ops_less1) { + auto less = std::make_shared(); + auto input0 = TensorConstructUtils::CreateOnesTensor(kNumberTypeInt32, std::vector{3}); + auto input1 = TensorConstructUtils::CreateOnesTensor(kNumberTypeInt32, std::vector{3}); + MS_EXCEPTION_IF_NULL(input0); + MS_EXCEPTION_IF_NULL(input1); + auto abstract = less->Infer({input0->ToAbstract(), input1->ToAbstract()}); + MS_EXCEPTION_IF_NULL(abstract); + EXPECT_EQ(abstract->isa(), true); + auto shape_ptr = abstract->BuildShape(); + MS_EXCEPTION_IF_NULL(shape_ptr); + EXPECT_EQ(shape_ptr->isa(), true); + auto shape = shape_ptr->cast(); + MS_EXCEPTION_IF_NULL(shape); + auto shape_vec = shape->shape(); + EXPECT_EQ(shape_vec.size(), 1); + EXPECT_EQ(shape_vec[0], 3); + auto type = abstract->BuildType(); + MS_EXCEPTION_IF_NULL(type); + EXPECT_EQ(type->isa(), true); + auto tensor_type = type->cast(); + MS_EXCEPTION_IF_NULL(tensor_type); + auto data_type = tensor_type->element(); + MS_EXCEPTION_IF_NULL(data_type); + EXPECT_EQ(data_type->type_id(), kNumberTypeBool); +} + +} // namespace ops +} // namespace mindspore diff --git a/tests/ut/cpp/ops/test_ops_localresponsenormalization.cc b/tests/ut/cpp/ops/test_ops_localresponsenormalization.cc new file mode 100644 index 00000000000..c9b3baf695f --- /dev/null +++ b/tests/ut/cpp/ops/test_ops_localresponsenormalization.cc @@ -0,0 +1,62 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include "common/common_test.h" +#include "ops/local_response_normalization.h" +#include "ir/dtype/type.h" +#include "ir/value.h" +#include "abstract/dshape.h" +#include "utils/tensor_construct_utils.h" + +namespace mindspore { +namespace ops { +class TestLocalResponseNormalization : public UT::Common { + public: + TestLocalResponseNormalization() {} + void SetUp() {} + void TearDown() {} +}; + +TEST_F(TestLocalResponseNormalization, test_ops_local_response_norm1) { + auto local_response_norm = std::make_shared(); + auto tensor_x = TensorConstructUtils::CreateOnesTensor(kNumberTypeFloat16, std::vector{2, 3, 4, 5}); + MS_EXCEPTION_IF_NULL(tensor_x); + auto abstract = local_response_norm->Infer({tensor_x->ToAbstract()}); + MS_EXCEPTION_IF_NULL(abstract); + EXPECT_EQ(abstract->isa(), true); + auto shape_ptr = abstract->BuildShape(); + MS_EXCEPTION_IF_NULL(shape_ptr); + EXPECT_EQ(shape_ptr->isa(), true); + auto shape = shape_ptr->cast(); + MS_EXCEPTION_IF_NULL(shape); + auto shape_vec = shape->shape(); + auto type = abstract->BuildType(); + MS_EXCEPTION_IF_NULL(type); + EXPECT_EQ(type->isa(), true); + auto tensor_type = type->cast(); + MS_EXCEPTION_IF_NULL(tensor_type); + auto data_type = tensor_type->element(); + MS_EXCEPTION_IF_NULL(data_type); + EXPECT_EQ(data_type->type_id(), kNumberTypeFloat16); + EXPECT_EQ(shape_vec.size(), 4); + EXPECT_EQ(shape_vec[0], 2); + EXPECT_EQ(shape_vec[1], 3); + EXPECT_EQ(shape_vec[2], 4); + EXPECT_EQ(shape_vec[3], 5); +} +} // namespace ops +} // namespace mindspore diff --git a/tests/ut/cpp/ops/test_ops_log.cc b/tests/ut/cpp/ops/test_ops_log.cc new file mode 100644 index 00000000000..66a71747790 --- /dev/null +++ b/tests/ut/cpp/ops/test_ops_log.cc @@ -0,0 +1,59 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include "common/common_test.h" +#include "ops/log.h" +#include "ir/dtype/type.h" +#include "abstract/dshape.h" +#include "utils/tensor_construct_utils.h" +namespace mindspore { +namespace ops { +class TestLog : public UT::Common { + public: + TestLog() {} + void SetUp() {} + void TearDown() {} +}; + +TEST_F(TestLog, test_ops_log) { + auto log = std::make_shared(); + // log->Init(); + auto tensor_x = TensorConstructUtils::CreateOnesTensor(kNumberTypeFloat32, std::vector{1, 3}); + MS_EXCEPTION_IF_NULL(tensor_x); + auto log_abstract = log->Infer({tensor_x->ToAbstract()}); + MS_EXCEPTION_IF_NULL(log_abstract); + EXPECT_EQ(log_abstract->isa(), true); + auto shape_ptr = log_abstract->BuildShape(); + MS_EXCEPTION_IF_NULL(shape_ptr); + EXPECT_EQ(shape_ptr->isa(), true); + auto log_shape = shape_ptr->cast(); + MS_EXCEPTION_IF_NULL(log_shape); + auto shape_vec = log_shape->shape(); + auto type = log_abstract->BuildType(); + MS_EXCEPTION_IF_NULL(type); + EXPECT_EQ(type->isa(), true); + auto tensor_type = type->cast(); + MS_EXCEPTION_IF_NULL(tensor_type); + auto elem_type = tensor_type->element(); + EXPECT_EQ(elem_type->type_id(), kNumberTypeFloat32); + EXPECT_EQ(shape_vec.size(), 2); + EXPECT_EQ(shape_vec[0], 1); + EXPECT_EQ(shape_vec[1], 3); +} + +} // namespace ops +} // namespace mindspore diff --git a/tests/ut/cpp/ops/test_ops_logical_not.cc b/tests/ut/cpp/ops/test_ops_logical_not.cc new file mode 100644 index 00000000000..a9bc10efa2b --- /dev/null +++ b/tests/ut/cpp/ops/test_ops_logical_not.cc @@ -0,0 +1,77 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include "common/common_test.h" +#include "ops/logical_not.h" +#include "ir/dtype/type.h" +#include "ir/value.h" +#include "abstract/dshape.h" +#include "utils/tensor_construct_utils.h" + +namespace mindspore { +namespace ops { + +namespace { +template +void SetTensorData(void *data, T num, size_t data_length) { + MS_EXCEPTION_IF_NULL(data); + auto tensor_data = reinterpret_cast(data); + MS_EXCEPTION_IF_NULL(tensor_data); + for (size_t index = 0; index < data_length; ++index) { + *tensor_data = num; + ++tensor_data; + } +} +} // namespace + +class TestLogicalNot : public UT::Common { + public: + TestLogicalNot() {} + void SetUp() {} + void TearDown() {} +}; + +TEST_F(TestLogicalNot, test_ops_logical_not1) { + auto logical_not = std::make_shared(); + logical_not->Init(); + auto tensor = std::make_shared(kNumberTypeBool, std::vector{3}); + MS_EXCEPTION_IF_NULL(tensor); + auto mem_size = IntToSize(tensor->ElementsNum()); + SetTensorData(tensor->data_c(), true, mem_size); + auto abstract = logical_not->Infer({tensor->ToAbstract()}); + MS_EXCEPTION_IF_NULL(abstract); + EXPECT_EQ(abstract->isa(), true); + auto shape_ptr = abstract->BuildShape(); + MS_EXCEPTION_IF_NULL(shape_ptr); + EXPECT_EQ(shape_ptr->isa(), true); + auto shape = shape_ptr->cast(); + MS_EXCEPTION_IF_NULL(shape); + auto shape_vec = shape->shape(); + auto type = abstract->BuildType(); + MS_EXCEPTION_IF_NULL(type); + EXPECT_EQ(type->isa(), true); + auto tensor_type = type->cast(); + MS_EXCEPTION_IF_NULL(tensor_type); + auto data_type = tensor_type->element(); + MS_EXCEPTION_IF_NULL(data_type); + EXPECT_EQ(data_type->type_id(), kNumberTypeBool); + EXPECT_EQ(shape_vec.size(), 1); + EXPECT_EQ(shape_vec[0], 3); +} + +} // namespace ops +} // namespace mindspore diff --git a/tests/ut/cpp/ops/test_ops_lsh_projection.cc b/tests/ut/cpp/ops/test_ops_lsh_projection.cc new file mode 100644 index 00000000000..5d8a48d12f7 --- /dev/null +++ b/tests/ut/cpp/ops/test_ops_lsh_projection.cc @@ -0,0 +1,125 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include "common/common_test.h" +#include "ops/lsh_projection.h" +#include "ir/dtype/type.h" +#include "ir/value.h" +#include "abstract/dshape.h" +#include "utils/tensor_construct_utils.h" + +namespace mindspore { +namespace ops { + +class TestLshProjection : public UT::Common { + public: + TestLshProjection() {} + void SetUp() {} + void TearDown() {} +}; + +TEST_F(TestLshProjection, test_ops_lsh_projection1) { + auto lsh_projection = std::make_shared(); + lsh_projection->Init(LshProjectionType::SPARSE); + EXPECT_EQ(lsh_projection->get_type(), LshProjectionType::SPARSE); + auto input0 = TensorConstructUtils::CreateOnesTensor(kNumberTypeInt32, std::vector{4, 3}); + auto input1 = TensorConstructUtils::CreateOnesTensor(kNumberTypeInt32, std::vector{1, 1}); + MS_EXCEPTION_IF_NULL(input0); + MS_EXCEPTION_IF_NULL(input1); + auto abstract = lsh_projection->Infer({input0->ToAbstract(), input1->ToAbstract()}); + MS_EXCEPTION_IF_NULL(abstract); + EXPECT_EQ(abstract->isa(), true); + auto shape_ptr = abstract->BuildShape(); + MS_EXCEPTION_IF_NULL(shape_ptr); + EXPECT_EQ(shape_ptr->isa(), true); + auto shape = shape_ptr->cast(); + MS_EXCEPTION_IF_NULL(shape); + auto shape_vec = shape->shape(); + EXPECT_EQ(shape_vec.size(), 1); + EXPECT_EQ(shape_vec[0], 4); + auto type = abstract->BuildType(); + MS_EXCEPTION_IF_NULL(type); + EXPECT_EQ(type->isa(), true); + auto tensor_type = type->cast(); + MS_EXCEPTION_IF_NULL(tensor_type); + auto data_type = tensor_type->element(); + MS_EXCEPTION_IF_NULL(data_type); + EXPECT_EQ(data_type->type_id(), kNumberTypeInt32); +} + +TEST_F(TestLshProjection, test_ops_lsh_projection2) { + auto lsh_projection = std::make_shared(); + lsh_projection->Init(LshProjectionType::DENSE); + EXPECT_EQ(lsh_projection->get_type(), LshProjectionType::DENSE); + auto input0 = TensorConstructUtils::CreateOnesTensor(kNumberTypeInt32, std::vector{4, 3}); + auto input1 = TensorConstructUtils::CreateOnesTensor(kNumberTypeInt32, std::vector{1, 1}); + MS_EXCEPTION_IF_NULL(input0); + MS_EXCEPTION_IF_NULL(input1); + auto abstract = lsh_projection->Infer({input0->ToAbstract(), input1->ToAbstract()}); + MS_EXCEPTION_IF_NULL(abstract); + EXPECT_EQ(abstract->isa(), true); + auto shape_ptr = abstract->BuildShape(); + MS_EXCEPTION_IF_NULL(shape_ptr); + EXPECT_EQ(shape_ptr->isa(), true); + auto shape = shape_ptr->cast(); + MS_EXCEPTION_IF_NULL(shape); + auto shape_vec = shape->shape(); + EXPECT_EQ(shape_vec.size(), 1); + EXPECT_EQ(shape_vec[0], 4 * 3); + auto type = abstract->BuildType(); + MS_EXCEPTION_IF_NULL(type); + EXPECT_EQ(type->isa(), true); + auto tensor_type = type->cast(); + MS_EXCEPTION_IF_NULL(tensor_type); + auto data_type = tensor_type->element(); + MS_EXCEPTION_IF_NULL(data_type); + EXPECT_EQ(data_type->type_id(), kNumberTypeInt32); +} + +TEST_F(TestLshProjection, test_ops_lsh_projection3) { + auto lsh_projection = std::make_shared(); + lsh_projection->Init(LshProjectionType::DENSE); + EXPECT_EQ(lsh_projection->get_type(), LshProjectionType::DENSE); + auto input0 = TensorConstructUtils::CreateOnesTensor(kNumberTypeInt32, std::vector{4, 3}); + auto input1 = TensorConstructUtils::CreateOnesTensor(kNumberTypeInt32, std::vector{5, 1}); + auto input2 = TensorConstructUtils::CreateOnesTensor(kNumberTypeInt32, std::vector{5}); + MS_EXCEPTION_IF_NULL(input0); + MS_EXCEPTION_IF_NULL(input1); + MS_EXCEPTION_IF_NULL(input2); + auto abstract = lsh_projection->Infer({input0->ToAbstract(), input1->ToAbstract(), input2->ToAbstract()}); + MS_EXCEPTION_IF_NULL(abstract); + EXPECT_EQ(abstract->isa(), true); + auto shape_ptr = abstract->BuildShape(); + MS_EXCEPTION_IF_NULL(shape_ptr); + EXPECT_EQ(shape_ptr->isa(), true); + auto shape = shape_ptr->cast(); + MS_EXCEPTION_IF_NULL(shape); + auto shape_vec = shape->shape(); + EXPECT_EQ(shape_vec.size(), 1); + EXPECT_EQ(shape_vec[0], 4 * 3); + auto type = abstract->BuildType(); + MS_EXCEPTION_IF_NULL(type); + EXPECT_EQ(type->isa(), true); + auto tensor_type = type->cast(); + MS_EXCEPTION_IF_NULL(tensor_type); + auto data_type = tensor_type->element(); + MS_EXCEPTION_IF_NULL(data_type); + EXPECT_EQ(data_type->type_id(), kNumberTypeInt32); +} + +} // namespace ops +} // namespace mindspore diff --git a/tests/ut/cpp/ops/test_ops_matrix_diag.cc b/tests/ut/cpp/ops/test_ops_matrix_diag.cc new file mode 100644 index 00000000000..d34361e401c --- /dev/null +++ b/tests/ut/cpp/ops/test_ops_matrix_diag.cc @@ -0,0 +1,65 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include "common/common_test.h" +#include "ops/matrix_diag.h" +#include "ir/dtype/type.h" +#include "ir/value.h" +#include "abstract/dshape.h" +#include "utils/tensor_construct_utils.h" + +namespace mindspore { +namespace ops { + +class TestMatrixDiag : public UT::Common { + public: + TestMatrixDiag() {} + void SetUp() {} + void TearDown() {} +}; + +TEST_F(TestMatrixDiag, test_ops_matrix_diag1) { + auto matrix_diag = std::make_shared(); + auto input0 = TensorConstructUtils::CreateOnesTensor(kNumberTypeFloat32, std::vector{2}); + auto input1 = TensorConstructUtils::CreateOnesTensor(kNumberTypeFloat32, std::vector{3, 2, 2}); + MS_EXCEPTION_IF_NULL(input0); + MS_EXCEPTION_IF_NULL(input1); + auto abstract = matrix_diag->Infer({input0->ToAbstract(), input1->ToAbstract()}); + MS_EXCEPTION_IF_NULL(abstract); + EXPECT_EQ(abstract->isa(), true); + auto shape_ptr = abstract->BuildShape(); + MS_EXCEPTION_IF_NULL(shape_ptr); + EXPECT_EQ(shape_ptr->isa(), true); + auto shape = shape_ptr->cast(); + MS_EXCEPTION_IF_NULL(shape); + auto shape_vec = shape->shape(); + EXPECT_EQ(shape_vec.size(), 3); + EXPECT_EQ(shape_vec[0], 3); + EXPECT_EQ(shape_vec[1], 2); + EXPECT_EQ(shape_vec[2], 2); + auto type = abstract->BuildType(); + MS_EXCEPTION_IF_NULL(type); + EXPECT_EQ(type->isa(), true); + auto tensor_type = type->cast(); + MS_EXCEPTION_IF_NULL(tensor_type); + auto data_type = tensor_type->element(); + MS_EXCEPTION_IF_NULL(data_type); + EXPECT_EQ(data_type->type_id(), kNumberTypeFloat32); +} + +} // namespace ops +} // namespace mindspore diff --git a/tests/ut/cpp/ops/test_ops_max_pool_grad.cc b/tests/ut/cpp/ops/test_ops_max_pool_grad.cc new file mode 100644 index 00000000000..d43bbb3c21d --- /dev/null +++ b/tests/ut/cpp/ops/test_ops_max_pool_grad.cc @@ -0,0 +1,66 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include "common/common_test.h" +#include "ops/grad/max_pool_grad.h" +#include "ir/dtype/type.h" +#include "ir/value.h" +#include "abstract/dshape.h" +#include "utils/tensor_construct_utils.h" + +namespace mindspore { +namespace ops { +class TestMaxPoolGrad : public UT::Common { + public: + TestMaxPoolGrad() {} + void SetUp() {} + void TearDown() {} +}; + +TEST_F(TestMaxPoolGrad, test_ops_max_pool_grad1) { + auto max_pool_grad = std::make_shared(); + max_pool_grad->Init({1}, {1}, VALID, NCHW); + EXPECT_EQ(max_pool_grad->get_pad_mode(), VALID); + EXPECT_EQ(max_pool_grad->get_data_format(), NCHW); + auto input0 = TensorConstructUtils::CreateOnesTensor(kNumberTypeFloat32, std::vector{1}); + auto input1 = TensorConstructUtils::CreateOnesTensor(kNumberTypeFloat32, std::vector{1}); + auto input2 = TensorConstructUtils::CreateOnesTensor(kNumberTypeFloat32, std::vector{1}); + MS_EXCEPTION_IF_NULL(input0); + MS_EXCEPTION_IF_NULL(input1); + MS_EXCEPTION_IF_NULL(input2); + auto abstract = max_pool_grad->Infer({input0->ToAbstract(), input1->ToAbstract(), input2->ToAbstract()}); + MS_EXCEPTION_IF_NULL(abstract); + EXPECT_EQ(abstract->isa(), true); + auto shape_ptr = abstract->BuildShape(); + MS_EXCEPTION_IF_NULL(shape_ptr); + EXPECT_EQ(shape_ptr->isa(), true); + auto shape = shape_ptr->cast(); + MS_EXCEPTION_IF_NULL(shape); + auto shape_vec = shape->shape(); + EXPECT_EQ(shape_vec.size(), 1); + EXPECT_EQ(shape_vec[0], 1); + auto type = abstract->BuildType(); + MS_EXCEPTION_IF_NULL(type); + EXPECT_EQ(type->isa(), true); + auto tensor_type = type->cast(); + MS_EXCEPTION_IF_NULL(tensor_type); + auto data_type = tensor_type->element(); + MS_EXCEPTION_IF_NULL(data_type); + EXPECT_EQ(data_type->type_id(), kNumberTypeFloat32); +} +} // namespace ops +} // namespace mindspore diff --git a/tests/ut/cpp/ops/test_ops_maximum.cc b/tests/ut/cpp/ops/test_ops_maximum.cc new file mode 100644 index 00000000000..aec14bf5764 --- /dev/null +++ b/tests/ut/cpp/ops/test_ops_maximum.cc @@ -0,0 +1,61 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include "common/common_test.h" +#include "ops/maximum.h" +#include "ir/dtype/type.h" +#include "abstract/dshape.h" +#include "utils/tensor_construct_utils.h" +namespace mindspore { +namespace ops { +class TestMaximum : public UT::Common { + public: + TestMaximum() {} + void SetUp() {} + void TearDown() {} +}; + +TEST_F(TestMaximum, test_ops_maximum) { + auto maximum = std::make_shared(); + maximum->Init(); + auto tensor_x = TensorConstructUtils::CreateOnesTensor(kNumberTypeFloat32, std::vector{1, 3}); + auto tensor_y = TensorConstructUtils::CreateOnesTensor(kNumberTypeFloat32, std::vector{1, 3}); + MS_EXCEPTION_IF_NULL(tensor_x); + MS_EXCEPTION_IF_NULL(tensor_y); + auto maximum_abstract = maximum->Infer({tensor_x->ToAbstract(), tensor_y->ToAbstract()}); + MS_EXCEPTION_IF_NULL(maximum_abstract); + EXPECT_EQ(maximum_abstract->isa(), true); + auto shape_ptr = maximum_abstract->BuildShape(); + MS_EXCEPTION_IF_NULL(shape_ptr); + EXPECT_EQ(shape_ptr->isa(), true); + auto maximum_shape = shape_ptr->cast(); + MS_EXCEPTION_IF_NULL(maximum_shape); + auto shape_vec = maximum_shape->shape(); + auto type = maximum_abstract->BuildType(); + MS_EXCEPTION_IF_NULL(type); + EXPECT_EQ(type->isa(), true); + auto tensor_type = type->cast(); + MS_EXCEPTION_IF_NULL(tensor_type); + auto elem_type = tensor_type->element(); + EXPECT_EQ(elem_type->type_id(), kNumberTypeFloat32); + EXPECT_EQ(shape_vec.size(), 2); + EXPECT_EQ(shape_vec[0], 1); + EXPECT_EQ(shape_vec[1], 3); +} + +} // namespace ops +} // namespace mindspore diff --git a/tests/ut/cpp/ops/test_ops_merge.cc b/tests/ut/cpp/ops/test_ops_merge.cc new file mode 100644 index 00000000000..b59b0b449d3 --- /dev/null +++ b/tests/ut/cpp/ops/test_ops_merge.cc @@ -0,0 +1,74 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include "common/common_test.h" +#include "ops/merge.h" +#include "ir/dtype/type.h" +#include "ir/value.h" +#include "abstract/dshape.h" +#include "utils/tensor_construct_utils.h" + +namespace mindspore { +namespace ops { + +class TestMerge : public UT::Common { + public: + TestMerge() {} + void SetUp() {} + void TearDown() {} +}; + +TEST_F(TestMerge, test_ops_merge1) { + auto merge = std::make_shared(); + merge->Init(); + auto input_x = TensorConstructUtils::CreateOnesTensor(kNumberTypeFloat32, std::vector{2, 4}); + auto input_y = TensorConstructUtils::CreateOnesTensor(kNumberTypeFloat32, std::vector{2, 4}); + MS_EXCEPTION_IF_NULL(input_x); + MS_EXCEPTION_IF_NULL(input_y); + std::vector inputs_ = {input_x, input_y}; + auto input = std::make_shared(inputs_); + auto abstract = merge->Infer({input->ToAbstract()}); + MS_EXCEPTION_IF_NULL(abstract); + auto shape_ptr = abstract->BuildShape(); + MS_EXCEPTION_IF_NULL(shape_ptr); + EXPECT_EQ(shape_ptr->isa(), true); + auto shape = shape_ptr->cast(); + MS_EXCEPTION_IF_NULL(shape); + auto shape_vec = shape->shape(); + EXPECT_EQ(shape_vec.size(), 2); + auto shape1 = shape_vec[0]->cast()->shape(); + EXPECT_EQ(shape1.size(), 2); + EXPECT_EQ(shape1[0], 2); + EXPECT_EQ(shape1[1], 4); + auto shape2 = shape_vec[1]->cast()->shape(); + EXPECT_EQ(shape2.size(), 1); + EXPECT_EQ(shape2[0], 1); + auto type_ptr = abstract->BuildType(); + MS_EXCEPTION_IF_NULL(type_ptr); + auto type = type_ptr->cast(); + auto type_vec = type->elements(); + MS_EXCEPTION_IF_NULL(type_vec[0]); + auto data_type1 = type_vec[0]->cast()->element(); + MS_EXCEPTION_IF_NULL(data_type1); + EXPECT_EQ(data_type1->type_id(), kNumberTypeFloat32); + auto data_type2 = type_vec[1]->cast()->element(); + MS_EXCEPTION_IF_NULL(data_type2); + EXPECT_EQ(data_type2->type_id(), kNumberTypeInt32); +} + +} // namespace ops +} // namespace mindspore diff --git a/tests/ut/cpp/ops/test_ops_mfcc.cc b/tests/ut/cpp/ops/test_ops_mfcc.cc new file mode 100644 index 00000000000..32b849b9de7 --- /dev/null +++ b/tests/ut/cpp/ops/test_ops_mfcc.cc @@ -0,0 +1,64 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include "common/common_test.h" +#include "ops/mfcc.h" +#include "ir/dtype/type.h" +#include "abstract/dshape.h" +#include "utils/tensor_construct_utils.h" +namespace mindspore { +namespace ops { +class TestMfcc : public UT::Common { + public: + TestMfcc() {} + void SetUp() {} + void TearDown() {} +}; + +TEST_F(TestMfcc, test_ops_mfcc) { + auto mfcc = std::make_shared(); + mfcc->Init(0, 0, 0, 0); + mfcc->set_dct_coeff_num(4); + EXPECT_EQ(mfcc->get_dct_coeff_num(), 4); + auto tensor_x = TensorConstructUtils::CreateOnesTensor(kNumberTypeFloat32, std::vector{1, 2, 3}); + auto tensor_y = TensorConstructUtils::CreateOnesTensor(kNumberTypeFloat32, std::vector{3}); + MS_EXCEPTION_IF_NULL(tensor_x); + MS_EXCEPTION_IF_NULL(tensor_x); + auto mfcc_abstract = mfcc->Infer({tensor_x->ToAbstract(), tensor_y->ToAbstract()}); + MS_EXCEPTION_IF_NULL(mfcc_abstract); + EXPECT_EQ(mfcc_abstract->isa(), true); + auto shape_ptr = mfcc_abstract->BuildShape(); + MS_EXCEPTION_IF_NULL(shape_ptr); + EXPECT_EQ(shape_ptr->isa(), true); + auto mfcc_shape = shape_ptr->cast(); + MS_EXCEPTION_IF_NULL(mfcc_shape); + auto shape_vec = mfcc_shape->shape(); + auto type = mfcc_abstract->BuildType(); + MS_EXCEPTION_IF_NULL(type); + EXPECT_EQ(type->isa(), true); + auto tensor_type = type->cast(); + MS_EXCEPTION_IF_NULL(tensor_type); + auto elem_type = tensor_type->element(); + EXPECT_EQ(elem_type->type_id(), kNumberTypeFloat32); + EXPECT_EQ(shape_vec.size(), 3); + EXPECT_EQ(shape_vec[0], 1); + EXPECT_EQ(shape_vec[1], 2); + EXPECT_EQ(shape_vec[2], 4); +} + +} // namespace ops +} // namespace mindspore diff --git a/tests/ut/cpp/ops/test_ops_minimum.cc b/tests/ut/cpp/ops/test_ops_minimum.cc new file mode 100644 index 00000000000..b92d365fdff --- /dev/null +++ b/tests/ut/cpp/ops/test_ops_minimum.cc @@ -0,0 +1,64 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include "common/common_test.h" +#include "ops/minimum.h" +#include "ir/dtype/type.h" +#include "ir/value.h" +#include "abstract/dshape.h" +#include "utils/tensor_construct_utils.h" + +namespace mindspore { +namespace ops { +class TestMinimum : public UT::Common { + public: + TestMinimum() {} + void SetUp() {} + void TearDown() {} +}; + +TEST_F(TestMinimum, test_ops_minimum1) { + auto minimum = std::make_shared(); + auto tensor_x1 = TensorConstructUtils::CreateOnesTensor(kNumberTypeFloat16, std::vector{2, 3, 4, 5}); + auto tensor_x2 = TensorConstructUtils::CreateOnesTensor(kNumberTypeFloat16, std::vector{4, 5}); + MS_EXCEPTION_IF_NULL(tensor_x1); + MS_EXCEPTION_IF_NULL(tensor_x2); + auto abstract = minimum->Infer({tensor_x1->ToAbstract(), tensor_x2->ToAbstract()}); + MS_EXCEPTION_IF_NULL(abstract); + EXPECT_EQ(abstract->isa(), true); + auto shape_ptr = abstract->BuildShape(); + MS_EXCEPTION_IF_NULL(shape_ptr); + EXPECT_EQ(shape_ptr->isa(), true); + auto shape = shape_ptr->cast(); + MS_EXCEPTION_IF_NULL(shape); + auto shape_vec = shape->shape(); + auto type = abstract->BuildType(); + MS_EXCEPTION_IF_NULL(type); + EXPECT_EQ(type->isa(), true); + auto tensor_type = type->cast(); + MS_EXCEPTION_IF_NULL(tensor_type); + auto data_type = tensor_type->element(); + MS_EXCEPTION_IF_NULL(data_type); + EXPECT_EQ(data_type->type_id(), kNumberTypeFloat16); + EXPECT_EQ(shape_vec.size(), 4); + EXPECT_EQ(shape_vec[0], 2); + EXPECT_EQ(shape_vec[1], 3); + EXPECT_EQ(shape_vec[2], 4); + EXPECT_EQ(shape_vec[3], 5); +} +} // namespace ops +} // namespace mindspore diff --git a/tests/ut/cpp/ops/test_ops_mul.cc b/tests/ut/cpp/ops/test_ops_mul.cc new file mode 100644 index 00000000000..0dcc62fb3e2 --- /dev/null +++ b/tests/ut/cpp/ops/test_ops_mul.cc @@ -0,0 +1,61 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include "common/common_test.h" +#include "ops/mul.h" +#include "ir/dtype/type.h" +#include "abstract/dshape.h" +#include "utils/tensor_construct_utils.h" +namespace mindspore { +namespace ops { +class TestMul : public UT::Common { + public: + TestMul() {} + void SetUp() {} + void TearDown() {} +}; + +TEST_F(TestMul, test_ops_mul) { + auto mul = std::make_shared(); + mul->Init(); + auto tensor_x = TensorConstructUtils::CreateOnesTensor(kNumberTypeFloat32, std::vector{1, 3}); + auto tensor_y = TensorConstructUtils::CreateOnesTensor(kNumberTypeFloat32, std::vector{1, 3}); + MS_EXCEPTION_IF_NULL(tensor_x); + MS_EXCEPTION_IF_NULL(tensor_y); + auto mul_abstract = mul->Infer({tensor_x->ToAbstract(), tensor_y->ToAbstract()}); + MS_EXCEPTION_IF_NULL(mul_abstract); + EXPECT_EQ(mul_abstract->isa(), true); + auto shape_ptr = mul_abstract->BuildShape(); + MS_EXCEPTION_IF_NULL(shape_ptr); + EXPECT_EQ(shape_ptr->isa(), true); + auto mul_shape = shape_ptr->cast(); + MS_EXCEPTION_IF_NULL(mul_shape); + auto shape_vec = mul_shape->shape(); + auto type = mul_abstract->BuildType(); + MS_EXCEPTION_IF_NULL(type); + EXPECT_EQ(type->isa(), true); + auto tensor_type = type->cast(); + MS_EXCEPTION_IF_NULL(tensor_type); + auto elem_type = tensor_type->element(); + EXPECT_EQ(elem_type->type_id(), kNumberTypeFloat32); + EXPECT_EQ(shape_vec.size(), 2); + EXPECT_EQ(shape_vec[0], 1); + EXPECT_EQ(shape_vec[1], 3); +} + +} // namespace ops +} // namespace mindspore diff --git a/tests/ut/cpp/ops/test_ops_neg.cc b/tests/ut/cpp/ops/test_ops_neg.cc new file mode 100644 index 00000000000..4f875baaef2 --- /dev/null +++ b/tests/ut/cpp/ops/test_ops_neg.cc @@ -0,0 +1,62 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include "common/common_test.h" +#include "ops/neg.h" +#include "ir/dtype/type.h" +#include "ir/value.h" +#include "abstract/dshape.h" +#include "utils/tensor_construct_utils.h" + +namespace mindspore { +namespace ops { +class TestNeg : public UT::Common { + public: + TestNeg() {} + void SetUp() {} + void TearDown() {} +}; + +TEST_F(TestNeg, test_ops_neg1) { + auto neg = std::make_shared(); + auto tensor_x = TensorConstructUtils::CreateOnesTensor(kNumberTypeFloat32, std::vector{2, 3, 4, 5}); + MS_EXCEPTION_IF_NULL(tensor_x); + auto abstract = neg->Infer({tensor_x->ToAbstract()}); + MS_EXCEPTION_IF_NULL(abstract); + EXPECT_EQ(abstract->isa(), true); + auto shape_ptr = abstract->BuildShape(); + MS_EXCEPTION_IF_NULL(shape_ptr); + EXPECT_EQ(shape_ptr->isa(), true); + auto shape = shape_ptr->cast(); + MS_EXCEPTION_IF_NULL(shape); + auto shape_vec = shape->shape(); + auto type = abstract->BuildType(); + MS_EXCEPTION_IF_NULL(type); + EXPECT_EQ(type->isa(), true); + auto tensor_type = type->cast(); + MS_EXCEPTION_IF_NULL(tensor_type); + auto data_type = tensor_type->element(); + MS_EXCEPTION_IF_NULL(data_type); + EXPECT_EQ(data_type->type_id(), kNumberTypeFloat32); + EXPECT_EQ(shape_vec.size(), 4); + EXPECT_EQ(shape_vec[0], 2); + EXPECT_EQ(shape_vec[1], 3); + EXPECT_EQ(shape_vec[2], 4); + EXPECT_EQ(shape_vec[3], 5); +} +} // namespace ops +} // namespace mindspore diff --git a/tests/ut/cpp/ops/test_ops_non_max_suppression.cc b/tests/ut/cpp/ops/test_ops_non_max_suppression.cc new file mode 100644 index 00000000000..fd35c776f40 --- /dev/null +++ b/tests/ut/cpp/ops/test_ops_non_max_suppression.cc @@ -0,0 +1,62 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include "common/common_test.h" +#include "ops/non_max_suppression.h" +#include "ir/dtype/type.h" +#include "ir/value.h" +#include "abstract/dshape.h" +#include "utils/tensor_construct_utils.h" + +namespace mindspore { +namespace ops { + +class TestNonMaxSuppression : public UT::Common { + public: + TestNonMaxSuppression() {} + void SetUp() {} + void TearDown() {} +}; + +TEST_F(TestNonMaxSuppression, test_ops_non_max_suppression1) { + auto non_max_suppression = std::make_shared(); + non_max_suppression->Init(); + EXPECT_EQ(non_max_suppression->get_center_point_box(), 0); + auto input0 = TensorConstructUtils::CreateOnesTensor(kNumberTypeInt32, std::vector{1}); + MS_EXCEPTION_IF_NULL(input0); + auto abstract = non_max_suppression->Infer({input0->ToAbstract()}); + MS_EXCEPTION_IF_NULL(abstract); + EXPECT_EQ(abstract->isa(), true); + auto shape_ptr = abstract->BuildShape(); + MS_EXCEPTION_IF_NULL(shape_ptr); + EXPECT_EQ(shape_ptr->isa(), true); + auto shape = shape_ptr->cast(); + MS_EXCEPTION_IF_NULL(shape); + auto shape_vec = shape->shape(); + EXPECT_EQ(shape_vec.size(), 0); + auto type = abstract->BuildType(); + MS_EXCEPTION_IF_NULL(type); + EXPECT_EQ(type->isa(), true); + auto tensor_type = type->cast(); + MS_EXCEPTION_IF_NULL(tensor_type); + auto data_type = tensor_type->element(); + MS_EXCEPTION_IF_NULL(data_type); + EXPECT_EQ(data_type->type_id(), kNumberTypeInt32); +} + +} // namespace ops +} // namespace mindspore diff --git a/tests/ut/cpp/ops/test_ops_one_hot.cc b/tests/ut/cpp/ops/test_ops_one_hot.cc new file mode 100644 index 00000000000..15421c14b23 --- /dev/null +++ b/tests/ut/cpp/ops/test_ops_one_hot.cc @@ -0,0 +1,70 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include "common/common_test.h" +#include "ops/one_hot.h" +#include "ir/dtype/type.h" +#include "ir/value.h" +#include "abstract/dshape.h" +#include "utils/tensor_construct_utils.h" + +namespace mindspore { +namespace ops { + +class TestOneHot : public UT::Common { + public: + TestOneHot() {} + void SetUp() {} + void TearDown() {} +}; + +TEST_F(TestOneHot, test_ops_one_hot1) { + auto one_hot = std::make_shared(); + one_hot->Init(-1); + EXPECT_EQ(one_hot->get_axis(), -1); + auto indice = TensorConstructUtils::CreateOnesTensor(kNumberTypeInt32, std::vector{3}); + auto depth = MakeValue(3); + auto on_value = TensorConstructUtils::CreateOnesTensor(kNumberTypeFloat32, std::vector{1}); + auto off_value = TensorConstructUtils::CreateZerosTensor(kNumberTypeFloat32, std::vector{1}); + MS_EXCEPTION_IF_NULL(indice); + MS_EXCEPTION_IF_NULL(depth); + MS_EXCEPTION_IF_NULL(on_value); + MS_EXCEPTION_IF_NULL(off_value); + auto abstract = + one_hot->Infer({indice->ToAbstract(), depth->ToAbstract(), on_value->ToAbstract(), off_value->ToAbstract()}); + MS_EXCEPTION_IF_NULL(abstract); + EXPECT_EQ(abstract->isa(), true); + auto shape_ptr = abstract->BuildShape(); + MS_EXCEPTION_IF_NULL(shape_ptr); + EXPECT_EQ(shape_ptr->isa(), true); + auto shape = shape_ptr->cast(); + MS_EXCEPTION_IF_NULL(shape); + auto shape_vec = shape->shape(); + auto type = abstract->BuildType(); + MS_EXCEPTION_IF_NULL(type); + EXPECT_EQ(type->isa(), true); + auto tensor_type = type->cast(); + MS_EXCEPTION_IF_NULL(tensor_type); + auto data_type = tensor_type->element(); + MS_EXCEPTION_IF_NULL(data_type); + EXPECT_EQ(data_type->type_id(), kNumberTypeFloat32); + EXPECT_EQ(shape_vec.size(), 2); + EXPECT_EQ(shape_vec[0], 3); +} + +} // namespace ops +} // namespace mindspore diff --git a/tests/ut/cpp/ops/test_ops_pack.cc b/tests/ut/cpp/ops/test_ops_pack.cc new file mode 100644 index 00000000000..721389e2ae3 --- /dev/null +++ b/tests/ut/cpp/ops/test_ops_pack.cc @@ -0,0 +1,67 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include "common/common_test.h" +#include "ops/pack.h" +#include "ir/dtype/type.h" +#include "ir/value.h" +#include "abstract/dshape.h" +#include "utils/tensor_construct_utils.h" + +namespace mindspore { +namespace ops { + +class TestPack : public UT::Common { + public: + TestPack() {} + void SetUp() {} + void TearDown() {} +}; + +TEST_F(TestPack, test_ops_pack1) { + auto pack = std::make_shared(); + pack->Init(); + EXPECT_EQ(pack->get_axis(), 0); + auto input0 = TensorConstructUtils::CreateOnesTensor(kNumberTypeFloat32, std::vector{2}); + auto input1 = TensorConstructUtils::CreateOnesTensor(kNumberTypeFloat32, std::vector{2}); + MS_EXCEPTION_IF_NULL(input0); + MS_EXCEPTION_IF_NULL(input1); + auto input_ = std::make_shared(std::vector{input0, input1}); + auto abstract = pack->Infer({input_->ToAbstract()}); + MS_EXCEPTION_IF_NULL(abstract); + EXPECT_EQ(abstract->isa(), true); + auto shape_ptr = abstract->BuildShape(); + MS_EXCEPTION_IF_NULL(shape_ptr); + EXPECT_EQ(shape_ptr->isa(), true); + auto shape = shape_ptr->cast(); + MS_EXCEPTION_IF_NULL(shape); + auto shape_vec = shape->shape(); + EXPECT_EQ(shape_vec.size(), 2); + EXPECT_EQ(shape_vec[0], 2); + EXPECT_EQ(shape_vec[1], 2); + auto type = abstract->BuildType(); + MS_EXCEPTION_IF_NULL(type); + EXPECT_EQ(type->isa(), true); + auto tensor_type = type->cast(); + MS_EXCEPTION_IF_NULL(tensor_type); + auto data_type = tensor_type->element(); + MS_EXCEPTION_IF_NULL(data_type); + EXPECT_EQ(data_type->type_id(), kNumberTypeFloat32); +} + +} // namespace ops +} // namespace mindspore diff --git a/tests/ut/cpp/ops/test_ops_pooling_grad.cc b/tests/ut/cpp/ops/test_ops_pooling_grad.cc new file mode 100644 index 00000000000..6cffdd8da3b --- /dev/null +++ b/tests/ut/cpp/ops/test_ops_pooling_grad.cc @@ -0,0 +1,74 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include "common/common_test.h" +#include "ops/grad/pooling_grad.h" +#include "ir/dtype/type.h" +#include "ir/value.h" +#include "abstract/dshape.h" +#include "utils/tensor_construct_utils.h" + +namespace mindspore { +namespace ops { +class TestPoolingGrad : public UT::Common { + public: + TestPoolingGrad() {} + void SetUp() {} + void TearDown() {} +}; + +TEST_F(TestPoolingGrad, test_ops_pooling_grad1) { + auto pooling_grad = std::make_shared(); + pooling_grad->Init(MAX_POOLING, std::vector{1, 1}, std::vector{1, 1}, VALID, + std::vector{1, 1, 1, 1}, FLOOR, NCHW, false); + EXPECT_EQ(pooling_grad->get_pool_mode(), MAX_POOLING); + // EXPECT_EQ(pooling_grad->get_window(), std::vector{1, 1}); + EXPECT_EQ(pooling_grad->get_pad_mode(), VALID); + // EXPECT_EQ(pooling_grad->get_stride(), std::vector{1, 1}); + // EXPECT_EQ(pooling_grad->get_pad_list(), std::vector{1, 1, 1, 1}); + EXPECT_EQ(pooling_grad->get_round_mode(), FLOOR); + EXPECT_EQ(pooling_grad->get_format(), NCHW); + EXPECT_EQ(pooling_grad->get_global(), false); + auto input0 = TensorConstructUtils::CreateOnesTensor(kNumberTypeFloat32, std::vector{1}); + auto input1 = TensorConstructUtils::CreateOnesTensor(kNumberTypeFloat32, std::vector{1}); + auto input2 = TensorConstructUtils::CreateOnesTensor(kNumberTypeFloat32, std::vector{3, 3}); + MS_EXCEPTION_IF_NULL(input0); + MS_EXCEPTION_IF_NULL(input1); + MS_EXCEPTION_IF_NULL(input2); + auto abstract = pooling_grad->Infer({input0->ToAbstract(), input1->ToAbstract(), input2->ToAbstract()}); + MS_EXCEPTION_IF_NULL(abstract); + EXPECT_EQ(abstract->isa(), true); + auto shape_ptr = abstract->BuildShape(); + MS_EXCEPTION_IF_NULL(shape_ptr); + EXPECT_EQ(shape_ptr->isa(), true); + auto shape = shape_ptr->cast(); + MS_EXCEPTION_IF_NULL(shape); + auto shape_vec = shape->shape(); + EXPECT_EQ(shape_vec.size(), 2); + EXPECT_EQ(shape_vec[0], 3); + EXPECT_EQ(shape_vec[1], 3); + auto type = abstract->BuildType(); + MS_EXCEPTION_IF_NULL(type); + EXPECT_EQ(type->isa(), true); + auto tensor_type = type->cast(); + MS_EXCEPTION_IF_NULL(tensor_type); + auto data_type = tensor_type->element(); + MS_EXCEPTION_IF_NULL(data_type); + EXPECT_EQ(data_type->type_id(), kNumberTypeFloat32); +} +} // namespace ops +} // namespace mindspore diff --git a/tests/ut/cpp/ops/test_ops_pow.cc b/tests/ut/cpp/ops/test_ops_pow.cc new file mode 100644 index 00000000000..940159ffa0c --- /dev/null +++ b/tests/ut/cpp/ops/test_ops_pow.cc @@ -0,0 +1,63 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include "common/common_test.h" +#include "ops/pow.h" +#include "ir/dtype/type.h" +#include "abstract/dshape.h" +#include "utils/tensor_construct_utils.h" +namespace mindspore { +namespace ops { +class TestPow : public UT::Common { + public: + TestPow() {} + void SetUp() {} + void TearDown() {} +}; + +TEST_F(TestPow, test_ops_pow) { + auto pow = std::make_shared(); + // pow->Init(); + auto tensor_x = TensorConstructUtils::CreateOnesTensor(kNumberTypeFloat32, std::vector{1, 3, 4, 5}); + auto tensor_y = TensorConstructUtils::CreateOnesTensor(kNumberTypeFloat32, std::vector{1, 3, 4, 5}); + MS_EXCEPTION_IF_NULL(tensor_x); + MS_EXCEPTION_IF_NULL(tensor_y); + auto pow_abstract = pow->Infer({tensor_x->ToAbstract(), tensor_y->ToAbstract()}); + MS_EXCEPTION_IF_NULL(pow_abstract); + EXPECT_EQ(pow_abstract->isa(), true); + auto shape_ptr = pow_abstract->BuildShape(); + MS_EXCEPTION_IF_NULL(shape_ptr); + EXPECT_EQ(shape_ptr->isa(), true); + auto pow_shape = shape_ptr->cast(); + MS_EXCEPTION_IF_NULL(pow_shape); + auto shape_vec = pow_shape->shape(); + auto type = pow_abstract->BuildType(); + MS_EXCEPTION_IF_NULL(type); + EXPECT_EQ(type->isa(), true); + auto tensor_type = type->cast(); + MS_EXCEPTION_IF_NULL(tensor_type); + auto elem_type = tensor_type->element(); + EXPECT_EQ(elem_type->type_id(), kNumberTypeFloat32); + EXPECT_EQ(shape_vec.size(), 4); + EXPECT_EQ(shape_vec[0], 1); + EXPECT_EQ(shape_vec[1], 3); + EXPECT_EQ(shape_vec[2], 4); + EXPECT_EQ(shape_vec[3], 5); +} + +} // namespace ops +} // namespace mindspore diff --git a/tests/ut/cpp/ops/test_ops_prelu.cc b/tests/ut/cpp/ops/test_ops_prelu.cc new file mode 100644 index 00000000000..2dd3116e669 --- /dev/null +++ b/tests/ut/cpp/ops/test_ops_prelu.cc @@ -0,0 +1,93 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include "common/common_test.h" +#include "ops/prelu.h" +#include "ir/dtype/type.h" +#include "ir/value.h" +#include "abstract/dshape.h" +#include "utils/tensor_construct_utils.h" + +namespace mindspore { +namespace ops { +class TestPReLU : public UT::Common { + public: + TestPReLU() {} + void SetUp() {} + void TearDown() {} +}; + +TEST_F(TestPReLU, test_ops_prelu1) { + auto prelu = std::make_shared(); + auto tensor_x = TensorConstructUtils::CreateOnesTensor(kNumberTypeFloat32, std::vector{2, 3, 4}); + auto tensor_w = TensorConstructUtils::CreateOnesTensor(kNumberTypeFloat32, std::vector{3}); + MS_EXCEPTION_IF_NULL(tensor_x); + MS_EXCEPTION_IF_NULL(tensor_w); + auto abstract = prelu->Infer({tensor_x->ToAbstract(), tensor_w->ToAbstract()}); + MS_EXCEPTION_IF_NULL(abstract); + EXPECT_EQ(abstract->isa(), true); + auto shape_ptr = abstract->BuildShape(); + MS_EXCEPTION_IF_NULL(shape_ptr); + EXPECT_EQ(shape_ptr->isa(), true); + auto shape = shape_ptr->cast(); + MS_EXCEPTION_IF_NULL(shape); + auto shape_vec = shape->shape(); + auto type = abstract->BuildType(); + MS_EXCEPTION_IF_NULL(type); + EXPECT_EQ(type->isa(), true); + auto tensor_type = type->cast(); + MS_EXCEPTION_IF_NULL(tensor_type); + auto data_type = tensor_type->element(); + MS_EXCEPTION_IF_NULL(data_type); + EXPECT_EQ(data_type->type_id(), kNumberTypeFloat32); + EXPECT_EQ(shape_vec.size(), 3); + EXPECT_EQ(shape_vec[0], 2); + EXPECT_EQ(shape_vec[1], 3); + EXPECT_EQ(shape_vec[2], 4); +} + +TEST_F(TestPReLU, test_ops_prelu2) { + auto prelu = std::make_shared(); + auto tensor_x = TensorConstructUtils::CreateOnesTensor(kNumberTypeFloat16, std::vector{5, 6, 7, 8}); + auto tensor_w = TensorConstructUtils::CreateOnesTensor(kNumberTypeFloat16, std::vector{1}); + MS_EXCEPTION_IF_NULL(tensor_x); + MS_EXCEPTION_IF_NULL(tensor_w); + auto abstract = prelu->Infer({tensor_x->ToAbstract(), tensor_w->ToAbstract()}); + MS_EXCEPTION_IF_NULL(abstract); + EXPECT_EQ(abstract->isa(), true); + auto shape_ptr = abstract->BuildShape(); + MS_EXCEPTION_IF_NULL(shape_ptr); + EXPECT_EQ(shape_ptr->isa(), true); + auto shape = shape_ptr->cast(); + MS_EXCEPTION_IF_NULL(shape); + auto shape_vec = shape->shape(); + auto type = abstract->BuildType(); + MS_EXCEPTION_IF_NULL(type); + EXPECT_EQ(type->isa(), true); + auto tensor_type = type->cast(); + MS_EXCEPTION_IF_NULL(tensor_type); + auto data_type = tensor_type->element(); + MS_EXCEPTION_IF_NULL(data_type); + EXPECT_EQ(data_type->type_id(), kNumberTypeFloat16); + EXPECT_EQ(shape_vec.size(), 4); + EXPECT_EQ(shape_vec[0], 5); + EXPECT_EQ(shape_vec[1], 6); + EXPECT_EQ(shape_vec[2], 7); + EXPECT_EQ(shape_vec[3], 8); +} +} // namespace ops +} // namespace mindspore diff --git a/tests/ut/cpp/ops/test_ops_prior_box.cc b/tests/ut/cpp/ops/test_ops_prior_box.cc new file mode 100644 index 00000000000..425e23e4f4a --- /dev/null +++ b/tests/ut/cpp/ops/test_ops_prior_box.cc @@ -0,0 +1,76 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include "common/common_test.h" +#include "ops/prior_box.h" +#include "ir/dtype/type.h" +#include "ir/value.h" +#include "abstract/dshape.h" +#include "utils/tensor_construct_utils.h" + +namespace mindspore { +namespace ops { + +class TestPriorBox : public UT::Common { + public: + TestPriorBox() {} + void SetUp() {} + void TearDown() {} +}; + +TEST_F(TestPriorBox, test_ops_prior_box1) { + auto prior_box = std::make_shared(); + prior_box->Init({1}, {2}, {0.1}, {0.1}, 1, 1, 0.1, 0.1, false, false, 0.1); + EXPECT_EQ(prior_box->get_min_sizes(), std::vector{1}); + EXPECT_EQ(prior_box->get_max_sizes(), std::vector{2}); + EXPECT_EQ(prior_box->get_aspect_ratios(), std::vector{0.1}); + EXPECT_EQ(prior_box->get_variances(), std::vector{0.1}); + EXPECT_EQ(prior_box->get_image_size_w(), 1); + EXPECT_EQ(prior_box->get_image_size_h(), 1); + EXPECT_EQ((int64_t)(prior_box->get_step_w() - 0.1), 0); + EXPECT_EQ((int64_t)(prior_box->get_step_h() - 0.1), 0); + EXPECT_EQ(prior_box->get_clip(), false); + EXPECT_EQ(prior_box->get_flip(), false); + EXPECT_EQ((int64_t)(prior_box->get_offset() - 0.1), 0); + auto input0 = TensorConstructUtils::CreateOnesTensor(kNumberTypeFloat32, std::vector{1, 1}); + MS_EXCEPTION_IF_NULL(input0); + auto abstract = prior_box->Infer({input0->ToAbstract()}); + MS_EXCEPTION_IF_NULL(abstract); + EXPECT_EQ(abstract->isa(), true); + auto shape_ptr = abstract->BuildShape(); + MS_EXCEPTION_IF_NULL(shape_ptr); + EXPECT_EQ(shape_ptr->isa(), true); + auto shape = shape_ptr->cast(); + MS_EXCEPTION_IF_NULL(shape); + auto shape_vec = shape->shape(); + EXPECT_EQ(shape_vec.size(), 4); + EXPECT_EQ(shape_vec[0], 1); + EXPECT_EQ(shape_vec[1], 12); + EXPECT_EQ(shape_vec[2], 1); + EXPECT_EQ(shape_vec[3], 2); + auto type = abstract->BuildType(); + MS_EXCEPTION_IF_NULL(type); + EXPECT_EQ(type->isa(), true); + auto tensor_type = type->cast(); + MS_EXCEPTION_IF_NULL(tensor_type); + auto data_type = tensor_type->element(); + MS_EXCEPTION_IF_NULL(data_type); + EXPECT_EQ(data_type->type_id(), kNumberTypeFloat32); +} + +} // namespace ops +} // namespace mindspore diff --git a/tests/ut/cpp/ops/test_ops_quantd_type_cast.cc b/tests/ut/cpp/ops/test_ops_quantd_type_cast.cc new file mode 100644 index 00000000000..91fb13dcf2f --- /dev/null +++ b/tests/ut/cpp/ops/test_ops_quantd_type_cast.cc @@ -0,0 +1,65 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include "common/common_test.h" +#include "ops/quant_dtype_cast.h" +#include "ir/dtype/type.h" +#include "ir/value.h" +#include "abstract/dshape.h" +#include "utils/tensor_construct_utils.h" + +namespace mindspore { +namespace ops { + +class TestQuantDTypeCast : public UT::Common { + public: + TestQuantDTypeCast() {} + void SetUp() {} + void TearDown() {} +}; + +TEST_F(TestQuantDTypeCast, test_ops_quantd_type_cast1) { + auto quantd_type_cast = std::make_shared(); + quantd_type_cast->Init(1, 35); + EXPECT_EQ(quantd_type_cast->get_src_t(), 1); + EXPECT_EQ(quantd_type_cast->get_dst_t(), 35); + auto input0 = TensorConstructUtils::CreateOnesTensor(kNumberTypeInt64, std::vector{4, 3}); + MS_EXCEPTION_IF_NULL(input0); + auto abstract = quantd_type_cast->Infer({input0->ToAbstract()}); + MS_EXCEPTION_IF_NULL(abstract); + EXPECT_EQ(abstract->isa(), true); + auto shape_ptr = abstract->BuildShape(); + MS_EXCEPTION_IF_NULL(shape_ptr); + EXPECT_EQ(shape_ptr->isa(), true); + auto shape = shape_ptr->cast(); + MS_EXCEPTION_IF_NULL(shape); + auto shape_vec = shape->shape(); + EXPECT_EQ(shape_vec.size(), 2); + EXPECT_EQ(shape_vec[0], 4); + EXPECT_EQ(shape_vec[1], 3); + auto type = abstract->BuildType(); + MS_EXCEPTION_IF_NULL(type); + EXPECT_EQ(type->isa(), true); + auto tensor_type = type->cast(); + MS_EXCEPTION_IF_NULL(tensor_type); + auto data_type = tensor_type->element(); + MS_EXCEPTION_IF_NULL(data_type); + EXPECT_EQ(data_type->type_id(), kNumberTypeInt64); +} + +} // namespace ops +} // namespace mindspore diff --git a/tests/ut/cpp/ops/test_ops_range.cc b/tests/ut/cpp/ops/test_ops_range.cc new file mode 100644 index 00000000000..dd583c88093 --- /dev/null +++ b/tests/ut/cpp/ops/test_ops_range.cc @@ -0,0 +1,124 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include "common/common_test.h" +#include "ops/range.h" +#include "ir/dtype/type.h" +#include "ir/value.h" +#include "abstract/dshape.h" +#include "utils/tensor_construct_utils.h" + +namespace mindspore { +namespace ops { +class TestRange : public UT::Common { + public: + TestRange() {} + void SetUp() {} + void TearDown() {} +}; + +TEST_F(TestRange, test_ops_range1) { + auto range = std::make_shared(); + range->Init(1, 3, 34, 4); + EXPECT_EQ(range->get_d_type(), 1); + EXPECT_EQ(range->get_start(), 3); + EXPECT_EQ(range->get_limit(), 34); + EXPECT_EQ(range->get_delta(), 4); + range->set_d_type(1); + range->set_start(3); + range->set_limit(34); + range->set_delta(4); + auto abstract = range->Infer({}); + MS_EXCEPTION_IF_NULL(abstract); + EXPECT_EQ(abstract->isa(), true); + auto shape_ptr = abstract->BuildShape(); + MS_EXCEPTION_IF_NULL(shape_ptr); + EXPECT_EQ(shape_ptr->isa(), true); + auto shape = shape_ptr->cast(); + MS_EXCEPTION_IF_NULL(shape); + auto shape_vec = shape->shape(); + auto type = abstract->BuildType(); + MS_EXCEPTION_IF_NULL(type); + EXPECT_EQ(type->isa(), true); + auto tensor_type = type->cast(); + MS_EXCEPTION_IF_NULL(tensor_type); + auto data_type = tensor_type->element(); + MS_EXCEPTION_IF_NULL(data_type); + EXPECT_EQ(data_type->type_id(), kNumberTypeInt32); + EXPECT_EQ(shape_vec.size(), 1); + EXPECT_EQ(shape_vec[0], 8); + EXPECT_EQ(range->get_d_type(), 1); + EXPECT_EQ(range->get_start(), 3); + EXPECT_EQ(range->get_limit(), 34); + EXPECT_EQ(range->get_delta(), 4); +} + +TEST_F(TestRange, test_ops_range2) { + auto range = std::make_shared(); + range->Init(1, 1, 1, 1); + EXPECT_EQ(range->get_d_type(), 1); + EXPECT_EQ(range->get_start(), 1); + EXPECT_EQ(range->get_limit(), 1); + EXPECT_EQ(range->get_delta(), 1); + range->set_d_type(1); + range->set_start(1); + range->set_limit(1); + range->set_delta(1); + auto tensor_x1 = std::make_shared(kNumberTypeFloat32, std::vector{1}); + auto tensor_x2 = std::make_shared(kNumberTypeFloat32, std::vector{1}); + auto tensor_x3 = std::make_shared(kNumberTypeFloat32, std::vector{1}); + MS_EXCEPTION_IF_NULL(tensor_x1); + MS_EXCEPTION_IF_NULL(tensor_x2); + MS_EXCEPTION_IF_NULL(tensor_x3); + auto data_x1 = tensor_x1->data_c(); + MS_EXCEPTION_IF_NULL(data_x1); + auto val_x1 = reinterpret_cast(data_x1); + *val_x1 = 1.0; + auto data_x2 = tensor_x2->data_c(); + MS_EXCEPTION_IF_NULL(data_x2); + auto val_x2 = reinterpret_cast(data_x2); + *val_x2 = 42.0; + auto data_x3 = tensor_x3->data_c(); + MS_EXCEPTION_IF_NULL(data_x3); + auto val_x3 = reinterpret_cast(data_x3); + *val_x3 = 3.0; + auto abstract = range->Infer({tensor_x1->ToAbstract(), tensor_x2->ToAbstract(), tensor_x3->ToAbstract()}); + MS_EXCEPTION_IF_NULL(abstract); + EXPECT_EQ(abstract->isa(), true); + auto shape_ptr = abstract->BuildShape(); + MS_EXCEPTION_IF_NULL(shape_ptr); + EXPECT_EQ(shape_ptr->isa(), true); + auto shape = shape_ptr->cast(); + MS_EXCEPTION_IF_NULL(shape); + auto shape_vec = shape->shape(); + auto type = abstract->BuildType(); + MS_EXCEPTION_IF_NULL(type); + EXPECT_EQ(type->isa(), true); + auto tensor_type = type->cast(); + MS_EXCEPTION_IF_NULL(tensor_type); + auto data_type = tensor_type->element(); + MS_EXCEPTION_IF_NULL(data_type); + EXPECT_EQ(data_type->type_id(), kNumberTypeFloat32); + EXPECT_EQ(shape_vec.size(), 1); + EXPECT_EQ(shape_vec[0], 14); + EXPECT_EQ(range->get_d_type(), 1); + EXPECT_EQ(range->get_start(), 1); + EXPECT_EQ(range->get_limit(), 1); + EXPECT_EQ(range->get_delta(), 1); +} +} // namespace ops +} // namespace mindspore diff --git a/tests/ut/cpp/ops/test_ops_rank.cc b/tests/ut/cpp/ops/test_ops_rank.cc new file mode 100644 index 00000000000..6e598f6c0f5 --- /dev/null +++ b/tests/ut/cpp/ops/test_ops_rank.cc @@ -0,0 +1,62 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include "common/common_test.h" +#include "ops/rank.h" +#include "ir/dtype/type.h" +#include "ir/value.h" +#include "abstract/dshape.h" +#include "utils/tensor_construct_utils.h" + +namespace mindspore { +namespace ops { + +class TestRank : public UT::Common { + public: + TestRank() {} + void SetUp() {} + void TearDown() {} +}; + +TEST_F(TestRank, test_ops_rank1) { + auto rank = std::make_shared(); + rank->Init(); + auto x = TensorConstructUtils::CreateOnesTensor(kNumberTypeFloat32, std::vector{2, 2}); + ; + MS_EXCEPTION_IF_NULL(x); + auto abstract = rank->Infer({x->ToAbstract()}); + MS_EXCEPTION_IF_NULL(abstract); + EXPECT_EQ(abstract->isa(), true); + auto shape_ptr = abstract->BuildShape(); + MS_EXCEPTION_IF_NULL(shape_ptr); + EXPECT_EQ(shape_ptr->isa(), true); + auto shape = shape_ptr->cast(); + MS_EXCEPTION_IF_NULL(shape); + auto shape_vec = shape->shape(); + EXPECT_EQ(shape_vec.size(), 0); + auto type = abstract->BuildType(); + MS_EXCEPTION_IF_NULL(type); + EXPECT_EQ(type->isa(), true); + auto tensor_type = type->cast(); + MS_EXCEPTION_IF_NULL(tensor_type); + auto data_type = tensor_type->element(); + MS_EXCEPTION_IF_NULL(data_type); + EXPECT_EQ(data_type->type_id(), kMetaTypeNone); +} + +} // namespace ops +} // namespace mindspore diff --git a/tests/ut/cpp/ops/test_ops_realdiv.cc b/tests/ut/cpp/ops/test_ops_realdiv.cc new file mode 100644 index 00000000000..03223f57afe --- /dev/null +++ b/tests/ut/cpp/ops/test_ops_realdiv.cc @@ -0,0 +1,64 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include "common/common_test.h" +#include "ops/real_div.h" +#include "ir/dtype/type.h" +#include "ir/value.h" +#include "abstract/dshape.h" +#include "utils/tensor_construct_utils.h" + +namespace mindspore { +namespace ops { +class TestRealDiv : public UT::Common { + public: + TestRealDiv() {} + void SetUp() {} + void TearDown() {} +}; + +TEST_F(TestRealDiv, test_ops_realdiv1) { + auto realdiv = std::make_shared(); + auto tensor_x1 = TensorConstructUtils::CreateOnesTensor(kNumberTypeFloat32, std::vector{2, 3, 4, 5}); + auto tensor_x2 = TensorConstructUtils::CreateOnesTensor(kNumberTypeFloat32, std::vector{2, 3, 4, 5}); + MS_EXCEPTION_IF_NULL(tensor_x1); + MS_EXCEPTION_IF_NULL(tensor_x2); + auto abstract = realdiv->Infer({tensor_x1->ToAbstract(), tensor_x2->ToAbstract()}); + MS_EXCEPTION_IF_NULL(abstract); + EXPECT_EQ(abstract->isa(), true); + auto shape_ptr = abstract->BuildShape(); + MS_EXCEPTION_IF_NULL(shape_ptr); + EXPECT_EQ(shape_ptr->isa(), true); + auto shape = shape_ptr->cast(); + MS_EXCEPTION_IF_NULL(shape); + auto shape_vec = shape->shape(); + auto type = abstract->BuildType(); + MS_EXCEPTION_IF_NULL(type); + EXPECT_EQ(type->isa(), true); + auto tensor_type = type->cast(); + MS_EXCEPTION_IF_NULL(tensor_type); + auto data_type = tensor_type->element(); + MS_EXCEPTION_IF_NULL(data_type); + EXPECT_EQ(data_type->type_id(), kNumberTypeFloat32); + EXPECT_EQ(shape_vec.size(), 4); + EXPECT_EQ(shape_vec[0], 2); + EXPECT_EQ(shape_vec[1], 3); + EXPECT_EQ(shape_vec[2], 4); + EXPECT_EQ(shape_vec[3], 5); +} +} // namespace ops +} // namespace mindspore diff --git a/tests/ut/cpp/ops/test_ops_reshape.cc b/tests/ut/cpp/ops/test_ops_reshape.cc new file mode 100644 index 00000000000..3bb08be183a --- /dev/null +++ b/tests/ut/cpp/ops/test_ops_reshape.cc @@ -0,0 +1,90 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include "common/common_test.h" +#include "ops/reshape.h" +#include "ir/dtype/type.h" +#include "ir/value.h" +#include "abstract/dshape.h" +#include "utils/tensor_construct_utils.h" + +namespace mindspore { +namespace ops { +class TestReshape : public UT::Common { + public: + TestReshape() {} + void SetUp() {} + void TearDown() {} +}; + +TEST_F(TestReshape, test_ops_reshape1) { + auto reshape = std::make_shared(); + auto tensor_x = TensorConstructUtils::CreateOnesTensor(kNumberTypeFloat32, std::vector{2, 3, 4, 5}); + auto input_shape = MakeValue(std::vector{6, 2, 10}); + MS_EXCEPTION_IF_NULL(tensor_x); + auto abstract = reshape->Infer({tensor_x->ToAbstract(), input_shape->ToAbstract()}); + MS_EXCEPTION_IF_NULL(abstract); + EXPECT_EQ(abstract->isa(), true); + auto shape_ptr = abstract->BuildShape(); + MS_EXCEPTION_IF_NULL(shape_ptr); + EXPECT_EQ(shape_ptr->isa(), true); + auto shape = shape_ptr->cast(); + MS_EXCEPTION_IF_NULL(shape); + auto shape_vec = shape->shape(); + auto type = abstract->BuildType(); + MS_EXCEPTION_IF_NULL(type); + EXPECT_EQ(type->isa(), true); + auto tensor_type = type->cast(); + MS_EXCEPTION_IF_NULL(tensor_type); + auto data_type = tensor_type->element(); + MS_EXCEPTION_IF_NULL(data_type); + EXPECT_EQ(data_type->type_id(), kNumberTypeFloat32); + EXPECT_EQ(shape_vec.size(), 3); + EXPECT_EQ(shape_vec[0], 6); + EXPECT_EQ(shape_vec[1], 2); + EXPECT_EQ(shape_vec[2], 10); +} + +TEST_F(TestReshape, test_ops_reshape2) { + auto reshape = std::make_shared(); + auto tensor_x = TensorConstructUtils::CreateOnesTensor(kNumberTypeFloat16, std::vector{2, 3, 4, 5}); + auto input_shape = MakeValue(std::vector{6, 2, -1}); + MS_EXCEPTION_IF_NULL(tensor_x); + auto abstract = reshape->Infer({tensor_x->ToAbstract(), input_shape->ToAbstract()}); + MS_EXCEPTION_IF_NULL(abstract); + EXPECT_EQ(abstract->isa(), true); + auto shape_ptr = abstract->BuildShape(); + MS_EXCEPTION_IF_NULL(shape_ptr); + EXPECT_EQ(shape_ptr->isa(), true); + auto shape = shape_ptr->cast(); + MS_EXCEPTION_IF_NULL(shape); + auto shape_vec = shape->shape(); + auto type = abstract->BuildType(); + MS_EXCEPTION_IF_NULL(type); + EXPECT_EQ(type->isa(), true); + auto tensor_type = type->cast(); + MS_EXCEPTION_IF_NULL(tensor_type); + auto data_type = tensor_type->element(); + MS_EXCEPTION_IF_NULL(data_type); + EXPECT_EQ(data_type->type_id(), kNumberTypeFloat16); + EXPECT_EQ(shape_vec.size(), 3); + EXPECT_EQ(shape_vec[0], 6); + EXPECT_EQ(shape_vec[1], 2); + EXPECT_EQ(shape_vec[2], 10); +} +} // namespace ops +} // namespace mindspore diff --git a/tests/ut/cpp/ops/test_ops_reverse_v2.cc b/tests/ut/cpp/ops/test_ops_reverse_v2.cc new file mode 100644 index 00000000000..64c5a52100d --- /dev/null +++ b/tests/ut/cpp/ops/test_ops_reverse_v2.cc @@ -0,0 +1,64 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include "common/common_test.h" +#include "ops/reverse_v2.h" +#include "ir/dtype/type.h" +#include "ir/value.h" +#include "abstract/dshape.h" +#include "utils/tensor_construct_utils.h" + +namespace mindspore { +namespace ops { + +class TestReverseV2 : public UT::Common { + public: + TestReverseV2() {} + void SetUp() {} + void TearDown() {} +}; + +TEST_F(TestReverseV2, test_ops_reverse_v2_1) { + auto reverse_v2 = std::make_shared(); + reverse_v2->Init(std::vector{2}); + EXPECT_EQ(reverse_v2->get_axis(), std::vector{2}); + auto input0 = TensorConstructUtils::CreateOnesTensor(kNumberTypeInt32, std::vector{2, 4}); + MS_EXCEPTION_IF_NULL(input0); + auto abstract = reverse_v2->Infer({input0->ToAbstract()}); + MS_EXCEPTION_IF_NULL(abstract); + EXPECT_EQ(abstract->isa(), true); + auto shape_ptr = abstract->BuildShape(); + MS_EXCEPTION_IF_NULL(shape_ptr); + EXPECT_EQ(shape_ptr->isa(), true); + auto shape = shape_ptr->cast(); + MS_EXCEPTION_IF_NULL(shape); + auto shape_vec = shape->shape(); + EXPECT_EQ(shape_vec.size(), 2); + EXPECT_EQ(shape_vec[0], 2); + EXPECT_EQ(shape_vec[1], 4); + auto type = abstract->BuildType(); + MS_EXCEPTION_IF_NULL(type); + EXPECT_EQ(type->isa(), true); + auto tensor_type = type->cast(); + MS_EXCEPTION_IF_NULL(tensor_type); + auto data_type = tensor_type->element(); + MS_EXCEPTION_IF_NULL(data_type); + EXPECT_EQ(data_type->type_id(), kNumberTypeInt32); +} + +} // namespace ops +} // namespace mindspore diff --git a/tests/ut/cpp/ops/test_ops_rfft.cc b/tests/ut/cpp/ops/test_ops_rfft.cc new file mode 100644 index 00000000000..63eb6d03f83 --- /dev/null +++ b/tests/ut/cpp/ops/test_ops_rfft.cc @@ -0,0 +1,63 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include "common/common_test.h" +#include "ops/rfft.h" +#include "ir/dtype/type.h" +#include "abstract/dshape.h" +#include "utils/tensor_construct_utils.h" +namespace mindspore { +namespace ops { +class TestRfft : public UT::Common { + public: + TestRfft() {} + void SetUp() {} + void TearDown() {} +}; + +TEST_F(TestRfft, test_ops_rfft) { + auto rfft = std::make_shared(); + rfft->Init(2); + EXPECT_EQ(rfft->get_fft_length(), 2); + rfft->set_fft_length(2); + auto tensor_x = TensorConstructUtils::CreateOnesTensor(kNumberTypeFloat32, std::vector{1, 3}); + MS_EXCEPTION_IF_NULL(tensor_x); + auto rfft_abstract = rfft->Infer({tensor_x->ToAbstract()}); + MS_EXCEPTION_IF_NULL(rfft_abstract); + EXPECT_EQ(rfft_abstract->isa(), true); + auto shape_ptr = rfft_abstract->BuildShape(); + MS_EXCEPTION_IF_NULL(shape_ptr); + EXPECT_EQ(shape_ptr->isa(), true); + auto rfft_shape = shape_ptr->cast(); + MS_EXCEPTION_IF_NULL(rfft_shape); + auto shape_vec = rfft_shape->shape(); + auto type = rfft_abstract->BuildType(); + MS_EXCEPTION_IF_NULL(type); + EXPECT_EQ(type->isa(), true); + auto tensor_type = type->cast(); + MS_EXCEPTION_IF_NULL(tensor_type); + auto elem_type = tensor_type->element(); + EXPECT_EQ(elem_type->type_id(), kNumberTypeComplex64); + EXPECT_EQ(shape_vec.size(), 3); + EXPECT_EQ(shape_vec[0], 1); + EXPECT_EQ(shape_vec[1], 2); + EXPECT_EQ(shape_vec[2], 2); + EXPECT_EQ(rfft->get_fft_length(), 2); +} + +} // namespace ops +} // namespace mindspore diff --git a/tests/ut/cpp/ops/test_ops_round.cc b/tests/ut/cpp/ops/test_ops_round.cc new file mode 100644 index 00000000000..1d92d6075f3 --- /dev/null +++ b/tests/ut/cpp/ops/test_ops_round.cc @@ -0,0 +1,61 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include "common/common_test.h" +#include "ops/round.h" +#include "ir/dtype/type.h" +#include "ir/value.h" +#include "abstract/dshape.h" +#include "utils/tensor_construct_utils.h" + +namespace mindspore { +namespace ops { + +class TestRound : public UT::Common { + public: + TestRound() {} + void SetUp() {} + void TearDown() {} +}; + +TEST_F(TestRound, test_ops_round1) { + auto round = std::make_shared(); + auto input0 = TensorConstructUtils::CreateOnesTensor(kNumberTypeFloat32, std::vector{5}); + MS_EXCEPTION_IF_NULL(input0); + auto abstract = round->Infer({input0->ToAbstract()}); + MS_EXCEPTION_IF_NULL(abstract); + EXPECT_EQ(abstract->isa(), true); + auto shape_ptr = abstract->BuildShape(); + MS_EXCEPTION_IF_NULL(shape_ptr); + EXPECT_EQ(shape_ptr->isa(), true); + auto shape = shape_ptr->cast(); + MS_EXCEPTION_IF_NULL(shape); + auto shape_vec = shape->shape(); + EXPECT_EQ(shape_vec.size(), 1); + EXPECT_EQ(shape_vec[0], 5); + auto type = abstract->BuildType(); + MS_EXCEPTION_IF_NULL(type); + EXPECT_EQ(type->isa(), true); + auto tensor_type = type->cast(); + MS_EXCEPTION_IF_NULL(tensor_type); + auto data_type = tensor_type->element(); + MS_EXCEPTION_IF_NULL(data_type); + EXPECT_EQ(data_type->type_id(), kNumberTypeFloat32); +} + +} // namespace ops +} // namespace mindspore diff --git a/tests/ut/cpp/ops/test_ops_sin.cc b/tests/ut/cpp/ops/test_ops_sin.cc new file mode 100644 index 00000000000..a4bb3f08d56 --- /dev/null +++ b/tests/ut/cpp/ops/test_ops_sin.cc @@ -0,0 +1,59 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include "common/common_test.h" +#include "ops/sin.h" +#include "ir/dtype/type.h" +#include "abstract/dshape.h" +#include "utils/tensor_construct_utils.h" +namespace mindspore { +namespace ops { +class TestSin : public UT::Common { + public: + TestSin() {} + void SetUp() {} + void TearDown() {} +}; + +TEST_F(TestSin, test_ops_sin) { + auto sin = std::make_shared(); + sin->Init(); + auto tensor_x = TensorConstructUtils::CreateOnesTensor(kNumberTypeFloat32, std::vector{1, 4}); + MS_EXCEPTION_IF_NULL(tensor_x); + auto sin_abstract = sin->Infer({tensor_x->ToAbstract()}); + MS_EXCEPTION_IF_NULL(sin_abstract); + EXPECT_EQ(sin_abstract->isa(), true); + auto shape_ptr = sin_abstract->BuildShape(); + MS_EXCEPTION_IF_NULL(shape_ptr); + EXPECT_EQ(shape_ptr->isa(), true); + auto sin_shape = shape_ptr->cast(); + MS_EXCEPTION_IF_NULL(sin_shape); + auto shape_vec = sin_shape->shape(); + auto type = sin_abstract->BuildType(); + MS_EXCEPTION_IF_NULL(type); + EXPECT_EQ(type->isa(), true); + auto tensor_type = type->cast(); + MS_EXCEPTION_IF_NULL(tensor_type); + auto elem_type = tensor_type->element(); + EXPECT_EQ(elem_type->type_id(), kNumberTypeFloat32); + EXPECT_EQ(shape_vec.size(), 2); + EXPECT_EQ(shape_vec[0], 1); + EXPECT_EQ(shape_vec[1], 4); +} + +} // namespace ops +} // namespace mindspore diff --git a/tests/ut/cpp/ops/test_ops_slice_fusion.cc b/tests/ut/cpp/ops/test_ops_slice_fusion.cc new file mode 100644 index 00000000000..ae66891f397 --- /dev/null +++ b/tests/ut/cpp/ops/test_ops_slice_fusion.cc @@ -0,0 +1,69 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include "common/common_test.h" +#include "ops/fusion/slice_fusion.h" +#include "ir/dtype/type.h" +#include "ir/value.h" +#include "abstract/dshape.h" +#include "utils/tensor_construct_utils.h" + +namespace mindspore { +namespace ops { + +class TestSliceFusion : public UT::Common { + public: + TestSliceFusion() {} + void SetUp() {} + void TearDown() {} +}; + +TEST_F(TestSliceFusion, test_ops_slice_fusion1) { + auto slice_fusion = std::make_shared(); + slice_fusion->Init(std::vector{1}); + EXPECT_EQ(slice_fusion->get_axes(), std::vector{1}); + auto input0 = TensorConstructUtils::CreateOnesTensor(kNumberTypeInt32, std::vector{3, 3, 3}); + auto input1 = MakeValue(std::vector{1, 0, 0}); + auto input2 = MakeValue(std::vector{1, 1, 3}); + MS_EXCEPTION_IF_NULL(input0); + MS_EXCEPTION_IF_NULL(input1); + MS_EXCEPTION_IF_NULL(input2); + auto abstract = slice_fusion->Infer({input0->ToAbstract(), input1->ToAbstract(), input2->ToAbstract()}); + MS_EXCEPTION_IF_NULL(abstract); + EXPECT_EQ(abstract->isa(), true); + auto shape_ptr = abstract->BuildShape(); + MS_EXCEPTION_IF_NULL(shape_ptr); + EXPECT_EQ(shape_ptr->isa(), true); + auto shape = shape_ptr->cast(); + MS_EXCEPTION_IF_NULL(shape); + auto shape_vec = shape->shape(); + EXPECT_EQ(shape_vec.size(), 3); + EXPECT_EQ(shape_vec[0], 1); + EXPECT_EQ(shape_vec[1], 1); + EXPECT_EQ(shape_vec[2], 3); + auto type = abstract->BuildType(); + MS_EXCEPTION_IF_NULL(type); + EXPECT_EQ(type->isa(), true); + auto tensor_type = type->cast(); + MS_EXCEPTION_IF_NULL(tensor_type); + auto data_type = tensor_type->element(); + MS_EXCEPTION_IF_NULL(data_type); + EXPECT_EQ(data_type->type_id(), kNumberTypeInt32); +} + +} // namespace ops +} // namespace mindspore diff --git a/tests/ut/cpp/ops/test_ops_softmax.cc b/tests/ut/cpp/ops/test_ops_softmax.cc new file mode 100644 index 00000000000..be7b81d3131 --- /dev/null +++ b/tests/ut/cpp/ops/test_ops_softmax.cc @@ -0,0 +1,124 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include "common/common_test.h" +#include "ops/softmax.h" +#include "ir/dtype/type.h" +#include "ir/value.h" +#include "abstract/dshape.h" +#include "utils/tensor_construct_utils.h" + +namespace mindspore { +namespace ops { +class TestSoftMax : public UT::Common { + public: + TestSoftMax() {} + void SetUp() {} + void TearDown() {} +}; + +TEST_F(TestSoftMax, test_ops_softmax1) { + auto softmax = std::make_shared(); + std::vector init_data = {-1}; + softmax->Init(-1); + EXPECT_EQ(softmax->get_axis(), init_data); + softmax->set_axis(init_data); + EXPECT_EQ(softmax->get_axis(), init_data); + auto input1 = TensorConstructUtils::CreateOnesTensor(kNumberTypeFloat16, std::vector{1, 2, 3, 4, 5}); + MS_EXCEPTION_IF_NULL(input1); + auto abstract = softmax->Infer({input1->ToAbstract()}); + MS_EXCEPTION_IF_NULL(abstract); + EXPECT_EQ(abstract->isa(), true); + auto shape_ptr = abstract->BuildShape(); + MS_EXCEPTION_IF_NULL(shape_ptr); + EXPECT_EQ(shape_ptr->isa(), true); + auto shape = shape_ptr->cast(); + MS_EXCEPTION_IF_NULL(shape); + auto shape_vec = shape->shape(); + auto type = abstract->BuildType(); + MS_EXCEPTION_IF_NULL(type); + EXPECT_EQ(type->isa(), true); + auto tensor_type = type->cast(); + MS_EXCEPTION_IF_NULL(tensor_type); + auto data_type = tensor_type->element(); + MS_EXCEPTION_IF_NULL(data_type); + EXPECT_EQ(data_type->type_id(), kNumberTypeFloat16); + EXPECT_EQ(shape_vec.size(), 5); + EXPECT_EQ(shape_vec[0], 1); +} + +TEST_F(TestSoftMax, test_ops_softmax2) { + auto softmax = std::make_shared(); + std::vector init_data = {-1}; + softmax->Init(-1); + EXPECT_EQ(softmax->get_axis(), init_data); + softmax->set_axis(init_data); + EXPECT_EQ(softmax->get_axis(), init_data); + auto input1 = TensorConstructUtils::CreateOnesTensor(kNumberTypeFloat32, std::vector{1, 2, 3, 4, 5}); + MS_EXCEPTION_IF_NULL(input1); + auto abstract = softmax->Infer({input1->ToAbstract()}); + MS_EXCEPTION_IF_NULL(abstract); + EXPECT_EQ(abstract->isa(), true); + auto shape_ptr = abstract->BuildShape(); + MS_EXCEPTION_IF_NULL(shape_ptr); + EXPECT_EQ(shape_ptr->isa(), true); + auto shape = shape_ptr->cast(); + MS_EXCEPTION_IF_NULL(shape); + auto shape_vec = shape->shape(); + auto type = abstract->BuildType(); + MS_EXCEPTION_IF_NULL(type); + EXPECT_EQ(type->isa(), true); + auto tensor_type = type->cast(); + MS_EXCEPTION_IF_NULL(tensor_type); + auto data_type = tensor_type->element(); + MS_EXCEPTION_IF_NULL(data_type); + EXPECT_EQ(data_type->type_id(), kNumberTypeFloat32); + EXPECT_EQ(shape_vec.size(), 5); + EXPECT_EQ(shape_vec[0], 1); +} + +TEST_F(TestSoftMax, test_ops_softmax3) { + auto softmax = std::make_shared(); + std::vector init_data = {-1}; + softmax->Init(-1); + EXPECT_EQ(softmax->get_axis(), init_data); + softmax->set_axis(init_data); + EXPECT_EQ(softmax->get_axis(), init_data); + auto input1 = TensorConstructUtils::CreateOnesTensor(kNumberTypeFloat64, std::vector{1, 2, 3, 4, 5}); + MS_EXCEPTION_IF_NULL(input1); + auto abstract = softmax->Infer({input1->ToAbstract()}); + MS_EXCEPTION_IF_NULL(abstract); + EXPECT_EQ(abstract->isa(), true); + auto shape_ptr = abstract->BuildShape(); + MS_EXCEPTION_IF_NULL(shape_ptr); + EXPECT_EQ(shape_ptr->isa(), true); + auto shape = shape_ptr->cast(); + MS_EXCEPTION_IF_NULL(shape); + auto shape_vec = shape->shape(); + auto type = abstract->BuildType(); + MS_EXCEPTION_IF_NULL(type); + EXPECT_EQ(type->isa(), true); + auto tensor_type = type->cast(); + MS_EXCEPTION_IF_NULL(tensor_type); + auto data_type = tensor_type->element(); + MS_EXCEPTION_IF_NULL(data_type); + EXPECT_EQ(data_type->type_id(), kNumberTypeFloat64); + EXPECT_EQ(shape_vec.size(), 5); + EXPECT_EQ(shape_vec[0], 1); +} +} // namespace ops +} // namespace mindspore diff --git a/tests/ut/cpp/ops/test_ops_sqrt.cc b/tests/ut/cpp/ops/test_ops_sqrt.cc new file mode 100644 index 00000000000..db9dceaf160 --- /dev/null +++ b/tests/ut/cpp/ops/test_ops_sqrt.cc @@ -0,0 +1,59 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include "common/common_test.h" +#include "ops/sqrt.h" +#include "ir/dtype/type.h" +#include "abstract/dshape.h" +#include "utils/tensor_construct_utils.h" +namespace mindspore { +namespace ops { +class TestSqrt : public UT::Common { + public: + TestSqrt() {} + void SetUp() {} + void TearDown() {} +}; + +TEST_F(TestSqrt, test_ops_sqrt) { + auto sqrt = std::make_shared(); + sqrt->Init(); + auto tensor_x = TensorConstructUtils::CreateOnesTensor(kNumberTypeFloat32, std::vector{1, 4}); + MS_EXCEPTION_IF_NULL(tensor_x); + auto sqrt_abstract = sqrt->Infer({tensor_x->ToAbstract()}); + MS_EXCEPTION_IF_NULL(sqrt_abstract); + EXPECT_EQ(sqrt_abstract->isa(), true); + auto shape_ptr = sqrt_abstract->BuildShape(); + MS_EXCEPTION_IF_NULL(shape_ptr); + EXPECT_EQ(shape_ptr->isa(), true); + auto sqrt_shape = shape_ptr->cast(); + MS_EXCEPTION_IF_NULL(sqrt_shape); + auto shape_vec = sqrt_shape->shape(); + auto type = sqrt_abstract->BuildType(); + MS_EXCEPTION_IF_NULL(type); + EXPECT_EQ(type->isa(), true); + auto tensor_type = type->cast(); + MS_EXCEPTION_IF_NULL(tensor_type); + auto elem_type = tensor_type->element(); + EXPECT_EQ(elem_type->type_id(), kNumberTypeFloat32); + EXPECT_EQ(shape_vec.size(), 2); + EXPECT_EQ(shape_vec[0], 1); + EXPECT_EQ(shape_vec[1], 4); +} + +} // namespace ops +} // namespace mindspore diff --git a/tests/ut/cpp/ops/test_ops_square.cc b/tests/ut/cpp/ops/test_ops_square.cc new file mode 100644 index 00000000000..36eb17362db --- /dev/null +++ b/tests/ut/cpp/ops/test_ops_square.cc @@ -0,0 +1,60 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include "common/common_test.h" +#include "ops/square.h" +#include "ir/dtype/type.h" +#include "ir/value.h" +#include "abstract/dshape.h" +#include "utils/tensor_construct_utils.h" + +namespace mindspore { +namespace ops { +class TestSquare : public UT::Common { + public: + TestSquare() {} + void SetUp() {} + void TearDown() {} +}; + +TEST_F(TestSquare, test_ops_square1) { + auto square = std::make_shared(); + square->Init(); + auto input0 = TensorConstructUtils::CreateOnesTensor(kNumberTypeFloat32, std::vector{3}); + MS_EXCEPTION_IF_NULL(input0); + auto abstract = square->Infer({input0->ToAbstract()}); + MS_EXCEPTION_IF_NULL(abstract); + EXPECT_EQ(abstract->isa(), true); + auto shape_ptr = abstract->BuildShape(); + MS_EXCEPTION_IF_NULL(shape_ptr); + EXPECT_EQ(shape_ptr->isa(), true); + auto shape = shape_ptr->cast(); + MS_EXCEPTION_IF_NULL(shape); + auto shape_vec = shape->shape(); + EXPECT_EQ(shape_vec.size(), 1); + EXPECT_EQ(shape_vec[0], 3); + auto type = abstract->BuildType(); + MS_EXCEPTION_IF_NULL(type); + EXPECT_EQ(type->isa(), true); + auto tensor_type = type->cast(); + MS_EXCEPTION_IF_NULL(tensor_type); + auto data_type = tensor_type->element(); + MS_EXCEPTION_IF_NULL(data_type); + EXPECT_EQ(data_type->type_id(), kNumberTypeFloat32); +} +} // namespace ops +} // namespace mindspore diff --git a/tests/ut/cpp/ops/test_ops_squareddifference.cc b/tests/ut/cpp/ops/test_ops_squareddifference.cc new file mode 100644 index 00000000000..e6eb4e228fc --- /dev/null +++ b/tests/ut/cpp/ops/test_ops_squareddifference.cc @@ -0,0 +1,61 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include "common/common_test.h" +#include "ops/squared_difference.h" +#include "ir/dtype/type.h" +#include "abstract/dshape.h" +#include "utils/tensor_construct_utils.h" +namespace mindspore { +namespace ops { +class TestSquaredDifference : public UT::Common { + public: + TestSquaredDifference() {} + void SetUp() {} + void TearDown() {} +}; + +TEST_F(TestSquaredDifference, test_ops_squareddifference) { + auto squareddifference = std::make_shared(); + squareddifference->Init(); + auto tensor_x = TensorConstructUtils::CreateOnesTensor(kNumberTypeFloat32, std::vector{1, 3}); + auto tensor_y = TensorConstructUtils::CreateOnesTensor(kNumberTypeFloat32, std::vector{1, 3}); + MS_EXCEPTION_IF_NULL(tensor_x); + MS_EXCEPTION_IF_NULL(tensor_y); + auto squareddifference_abstract = squareddifference->Infer({tensor_x->ToAbstract(), tensor_y->ToAbstract()}); + MS_EXCEPTION_IF_NULL(squareddifference_abstract); + EXPECT_EQ(squareddifference_abstract->isa(), true); + auto shape_ptr = squareddifference_abstract->BuildShape(); + MS_EXCEPTION_IF_NULL(shape_ptr); + EXPECT_EQ(shape_ptr->isa(), true); + auto squareddifference_shape = shape_ptr->cast(); + MS_EXCEPTION_IF_NULL(squareddifference_shape); + auto shape_vec = squareddifference_shape->shape(); + auto type = squareddifference_abstract->BuildType(); + MS_EXCEPTION_IF_NULL(type); + EXPECT_EQ(type->isa(), true); + auto tensor_type = type->cast(); + MS_EXCEPTION_IF_NULL(tensor_type); + auto elem_type = tensor_type->element(); + EXPECT_EQ(elem_type->type_id(), kNumberTypeFloat32); + EXPECT_EQ(shape_vec.size(), 2); + EXPECT_EQ(shape_vec[0], 1); + EXPECT_EQ(shape_vec[1], 3); +} + +} // namespace ops +} // namespace mindspore diff --git a/tests/ut/cpp/ops/test_ops_strided_slice.cc b/tests/ut/cpp/ops/test_ops_strided_slice.cc new file mode 100644 index 00000000000..00190bb80bb --- /dev/null +++ b/tests/ut/cpp/ops/test_ops_strided_slice.cc @@ -0,0 +1,204 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include "common/common_test.h" +#include "ops/strided_slice.h" +#include "ir/dtype/type.h" +#include "ir/value.h" +#include "abstract/dshape.h" +#include "utils/tensor_construct_utils.h" + +namespace mindspore { +namespace ops { +namespace { +template +void SetTensorData(void *data, std::vector num) { + MS_EXCEPTION_IF_NULL(data); + auto tensor_data = reinterpret_cast(data); + MS_EXCEPTION_IF_NULL(tensor_data); + for (size_t index = 0; index < num.size(); ++index) { + *tensor_data = num[index]; + } +} +} // namespace +class TestStridedSlice : public UT::Common { + public: + TestStridedSlice() {} + void SetUp() {} + void TearDown() {} +}; + +TEST_F(TestStridedSlice, test_ops_stridedslice1) { + auto stridedslice = std::make_shared(); + stridedslice->Init(0, 0, 0, 0, 0); + EXPECT_EQ(stridedslice->get_begin_mask(), 0); + EXPECT_EQ(stridedslice->get_end_mask(), 0); + EXPECT_EQ(stridedslice->get_ellipsis_mask(), 0); + EXPECT_EQ(stridedslice->get_new_axis_mask(), 0); + EXPECT_EQ(stridedslice->get_shrink_axis_mask(), 0); + auto tensor_x = TensorConstructUtils::CreateOnesTensor(kNumberTypeFloat32, std::vector{3, 3, 3}); + auto begin = MakeValue(std::vector{1, 0, 0}); + auto end = MakeValue(std::vector{2, 1, 3}); + auto strides = MakeValue(std::vector{1, 1, 1}); + MS_EXCEPTION_IF_NULL(tensor_x); + MS_EXCEPTION_IF_NULL(begin); + MS_EXCEPTION_IF_NULL(end); + MS_EXCEPTION_IF_NULL(strides); + auto abstract = + stridedslice->Infer({tensor_x->ToAbstract(), begin->ToAbstract(), end->ToAbstract(), strides->ToAbstract()}); + MS_EXCEPTION_IF_NULL(abstract); + EXPECT_EQ(abstract->isa(), true); + auto shape_ptr = abstract->BuildShape(); + MS_EXCEPTION_IF_NULL(shape_ptr); + EXPECT_EQ(shape_ptr->isa(), true); + auto shape = shape_ptr->cast(); + MS_EXCEPTION_IF_NULL(shape); + auto shape_vec = shape->shape(); + auto type = abstract->BuildType(); + MS_EXCEPTION_IF_NULL(type); + EXPECT_EQ(type->isa(), true); + auto tensor_type = type->cast(); + MS_EXCEPTION_IF_NULL(tensor_type); + auto data_type = tensor_type->element(); + MS_EXCEPTION_IF_NULL(data_type); + EXPECT_EQ(data_type->type_id(), kNumberTypeFloat32); + EXPECT_EQ(shape_vec.size(), 3); + EXPECT_EQ(shape_vec[0], 1); + EXPECT_EQ(shape_vec[1], 1); + EXPECT_EQ(shape_vec[2], 3); +} +/* +TEST_F(TestStridedSlice, test_ops_stridedslice2) { +auto stridedslice = std::make_shared(); +stridedslice->Init(0, 0, 0, 0, 0); +EXPECT_EQ(stridedslice->get_begin_mask(), 0); +EXPECT_EQ(stridedslice->get_end_mask(), 0); +EXPECT_EQ(stridedslice->get_ellipsis_mask(), 0); +EXPECT_EQ(stridedslice->get_new_axis_mask(), 0); +EXPECT_EQ(stridedslice->get_shrink_axis_mask(), 0); +auto tensor_x = TensorConstructUtils::CreateOnesTensor(kNumberTypeFloat32, std::vector{3,3,3}); +auto begin = MakeValue(std::vector{1,0,0}); +auto end = MakeValue(std::vector{2,2,3}); +auto strides =MakeValue(std::vector{1,1,1}); +MS_EXCEPTION_IF_NULL(tensor_x); +MS_EXCEPTION_IF_NULL(begin); +MS_EXCEPTION_IF_NULL(end); +MS_EXCEPTION_IF_NULL(strides); +auto abstract = +stridedslice->Infer({tensor_x->ToAbstract(),begin->ToAbstract(),end->ToAbstract(),strides->ToAbstract()}); +MS_EXCEPTION_IF_NULL(abstract); +EXPECT_EQ(abstract->isa(), true); +auto shape_ptr = abstract->BuildShape(); +MS_EXCEPTION_IF_NULL(shape_ptr); +EXPECT_EQ(shape_ptr->isa(), true); +auto shape = shape_ptr->cast(); +MS_EXCEPTION_IF_NULL(shape); +auto shape_vec = shape->shape(); +auto type = abstract->BuildType(); +MS_EXCEPTION_IF_NULL(type); +EXPECT_EQ(type->isa(), true); +auto tensor_type = type->cast(); +MS_EXCEPTION_IF_NULL(tensor_type); +auto data_type = tensor_type->element(); +MS_EXCEPTION_IF_NULL(data_type); +EXPECT_EQ(data_type->type_id(), kNumberTypeFloat32); +EXPECT_EQ(shape_vec.size(), 3); +EXPECT_EQ(shape_vec[0], 1); +EXPECT_EQ(shape_vec[1], 2); +EXPECT_EQ(shape_vec[2], 3); +} + +TEST_F(TestStridedSlice, test_ops_stridedslice3) { +auto stridedslice = std::make_shared(); +stridedslice->Init(0, 0, 0, 0, 0); +EXPECT_EQ(stridedslice->get_begin_mask(), 0); +EXPECT_EQ(stridedslice->get_end_mask(), 0); +EXPECT_EQ(stridedslice->get_ellipsis_mask(), 0); +EXPECT_EQ(stridedslice->get_new_axis_mask(), 0); +EXPECT_EQ(stridedslice->get_shrink_axis_mask(), 0); +auto tensor_x = TensorConstructUtils::CreateOnesTensor(kNumberTypeFloat32, std::vector{3,3,3}); +auto begin = MakeValue(std::vector{1,0,0}); +auto end = MakeValue(std::vector{2,-3,3}); +auto strides =MakeValue(std::vector{1,-1,1}); +MS_EXCEPTION_IF_NULL(tensor_x); +MS_EXCEPTION_IF_NULL(begin); +MS_EXCEPTION_IF_NULL(end); +MS_EXCEPTION_IF_NULL(strides); +auto abstract = +stridedslice->Infer({tensor_x->ToAbstract(),begin->ToAbstract(),end->ToAbstract(),strides->ToAbstract()}); +MS_EXCEPTION_IF_NULL(abstract); +EXPECT_EQ(abstract->isa(), true); +auto shape_ptr = abstract->BuildShape(); +MS_EXCEPTION_IF_NULL(shape_ptr); +EXPECT_EQ(shape_ptr->isa(), true); +auto shape = shape_ptr->cast(); +MS_EXCEPTION_IF_NULL(shape); +auto shape_vec = shape->shape(); +auto type = abstract->BuildType(); +MS_EXCEPTION_IF_NULL(type); +EXPECT_EQ(type->isa(), true); +auto tensor_type = type->cast(); +MS_EXCEPTION_IF_NULL(tensor_type); +auto data_type = tensor_type->element(); +MS_EXCEPTION_IF_NULL(data_type); +EXPECT_EQ(data_type->type_id(), kNumberTypeFloat32); +EXPECT_EQ(shape_vec.size(), 3); +EXPECT_EQ(shape_vec[0], 1); +EXPECT_EQ(shape_vec[1], 2); +EXPECT_EQ(shape_vec[2], 3); +} + +TEST_F(TestStridedSlice, test_ops_stridedslice4) { +auto stridedslice = std::make_shared(); +stridedslice->Init(0, 0, 0, 0, 0); +EXPECT_EQ(stridedslice->get_begin_mask(), 0); +EXPECT_EQ(stridedslice->get_end_mask(), 0); +EXPECT_EQ(stridedslice->get_ellipsis_mask(), 0); +EXPECT_EQ(stridedslice->get_new_axis_mask(), 0); +EXPECT_EQ(stridedslice->get_shrink_axis_mask(), 0); + +auto tensor_x = TensorConstructUtils::CreateOnesTensor(kNumberTypeFloat32, std::vector{5}); +auto begin = MakeValue(std::vector{1}); +auto end = MakeValue(std::vector{-2}); +auto strides =MakeValue(std::vector{1}); +MS_EXCEPTION_IF_NULL(tensor_x); +MS_EXCEPTION_IF_NULL(begin); +MS_EXCEPTION_IF_NULL(end); +MS_EXCEPTION_IF_NULL(strides); +auto abstract = +stridedslice->Infer({tensor_x->ToAbstract(),begin->ToAbstract(),end->ToAbstract(),strides->ToAbstract()}); +MS_EXCEPTION_IF_NULL(abstract); +EXPECT_EQ(abstract->isa(), true); +auto shape_ptr = abstract->BuildShape(); +MS_EXCEPTION_IF_NULL(shape_ptr); +EXPECT_EQ(shape_ptr->isa(), true); +auto shape = shape_ptr->cast(); +MS_EXCEPTION_IF_NULL(shape); +auto shape_vec = shape->shape(); +auto type = abstract->BuildType(); +MS_EXCEPTION_IF_NULL(type); +EXPECT_EQ(type->isa(), true); +auto tensor_type = type->cast(); +MS_EXCEPTION_IF_NULL(tensor_type); +auto data_type = tensor_type->element(); +MS_EXCEPTION_IF_NULL(data_type); +EXPECT_EQ(data_type->type_id(), kNumberTypeFloat32); +EXPECT_EQ(shape_vec.size(), 1); +EXPECT_EQ(shape_vec[0], 2); +}*/ +} // namespace ops +} // namespace mindspore \ No newline at end of file diff --git a/tests/ut/cpp/ops/test_ops_sub.cc b/tests/ut/cpp/ops/test_ops_sub.cc new file mode 100644 index 00000000000..628f793cd95 --- /dev/null +++ b/tests/ut/cpp/ops/test_ops_sub.cc @@ -0,0 +1,61 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include "common/common_test.h" +#include "ops/sub.h" +#include "ir/dtype/type.h" +#include "abstract/dshape.h" +#include "utils/tensor_construct_utils.h" +namespace mindspore { +namespace ops { +class TestSub : public UT::Common { + public: + TestSub() {} + void SetUp() {} + void TearDown() {} +}; + +TEST_F(TestSub, test_ops_sub) { + auto sub = std::make_shared(); + sub->Init(); + auto tensor_x = TensorConstructUtils::CreateOnesTensor(kNumberTypeFloat32, std::vector{1, 3}); + auto tensor_y = TensorConstructUtils::CreateOnesTensor(kNumberTypeFloat32, std::vector{1, 3}); + MS_EXCEPTION_IF_NULL(tensor_x); + MS_EXCEPTION_IF_NULL(tensor_y); + auto sub_abstract = sub->Infer({tensor_x->ToAbstract(), tensor_y->ToAbstract()}); + MS_EXCEPTION_IF_NULL(sub_abstract); + EXPECT_EQ(sub_abstract->isa(), true); + auto shape_ptr = sub_abstract->BuildShape(); + MS_EXCEPTION_IF_NULL(shape_ptr); + EXPECT_EQ(shape_ptr->isa(), true); + auto sub_shape = shape_ptr->cast(); + MS_EXCEPTION_IF_NULL(sub_shape); + auto shape_vec = sub_shape->shape(); + auto type = sub_abstract->BuildType(); + MS_EXCEPTION_IF_NULL(type); + EXPECT_EQ(type->isa(), true); + auto tensor_type = type->cast(); + MS_EXCEPTION_IF_NULL(tensor_type); + auto elem_type = tensor_type->element(); + EXPECT_EQ(elem_type->type_id(), kNumberTypeFloat32); + EXPECT_EQ(shape_vec.size(), 2); + EXPECT_EQ(shape_vec[0], 1); + EXPECT_EQ(shape_vec[1], 3); +} + +} // namespace ops +} // namespace mindspore diff --git a/tests/ut/cpp/ops/test_ops_topk.cc b/tests/ut/cpp/ops/test_ops_topk.cc new file mode 100644 index 00000000000..955ae007dee --- /dev/null +++ b/tests/ut/cpp/ops/test_ops_topk.cc @@ -0,0 +1,84 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include "common/common_test.h" +#include "ops/topk.h" +#include "ir/dtype/type.h" +#include "ir/value.h" +#include "abstract/dshape.h" +#include "utils/tensor_construct_utils.h" + +namespace mindspore { +namespace ops { +class TestTopK : public UT::Common { + public: + TestTopK() {} + void SetUp() {} + void TearDown() {} +}; + +TEST_F(TestTopK, test_topk) { + auto topk = std::make_shared(); + bool sorted = true; + topk->Init(sorted); + EXPECT_EQ(topk->get_sorted(), true); + auto tensor_x = std::make_shared(kNumberTypeFloat16, std::vector{5}); + MS_EXCEPTION_IF_NULL(tensor_x); + auto tensor_x_data = reinterpret_cast(tensor_x->data_c()); + *tensor_x_data = 1; + tensor_x_data++; + *tensor_x_data = 2; + tensor_x_data++; + *tensor_x_data = 3; + tensor_x_data++; + *tensor_x_data = 4; + tensor_x_data++; + *tensor_x_data = 5; + tensor_x_data++; + auto k = MakeValue(3); + MS_EXCEPTION_IF_NULL(k); + auto abstract = topk->Infer({tensor_x->ToAbstract(), k->ToAbstract()}); + MS_EXCEPTION_IF_NULL(abstract); + EXPECT_EQ(abstract->isa(), true); + auto shape_ptr = abstract->BuildShape(); + MS_EXCEPTION_IF_NULL(shape_ptr); + EXPECT_EQ(shape_ptr->isa(), true); + auto shape = shape_ptr->cast(); + MS_EXCEPTION_IF_NULL(shape); + auto shape_vec = shape->shape(); + EXPECT_EQ(shape_vec.size(), 2); + auto shape1 = shape_vec[0]->cast()->shape(); + EXPECT_EQ(shape1.size(), 1); + EXPECT_EQ(shape1[0], 3); + auto shape2 = shape_vec[1]->cast()->shape(); + EXPECT_EQ(shape2.size(), 1); + EXPECT_EQ(shape2[0], 3); + auto type_ptr = abstract->BuildType(); + MS_EXCEPTION_IF_NULL(type_ptr); + auto type = type_ptr->cast(); + auto type_vec = type->elements(); + EXPECT_EQ(type_vec.size(), 2); + MS_EXCEPTION_IF_NULL(type_vec[0]); + auto data_type1 = type_vec[0]->cast()->element(); + MS_EXCEPTION_IF_NULL(data_type1); + EXPECT_EQ(data_type1->type_id(), kNumberTypeFloat16); + auto data_type2 = type_vec[1]->cast()->element(); + MS_EXCEPTION_IF_NULL(data_type2); + EXPECT_EQ(data_type2->type_id(), kNumberTypeInt32); +} +} // namespace ops +} // namespace mindspore diff --git a/tests/ut/cpp/ops/test_ops_unpack.cc b/tests/ut/cpp/ops/test_ops_unpack.cc new file mode 100644 index 00000000000..d653daaff8a --- /dev/null +++ b/tests/ut/cpp/ops/test_ops_unpack.cc @@ -0,0 +1,73 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include "common/common_test.h" +#include "ops/unpack.h" +#include "ir/dtype/type.h" +#include "ir/value.h" +#include "abstract/dshape.h" +#include "utils/tensor_construct_utils.h" + +namespace mindspore { +namespace ops { + +class TestUnpack : public UT::Common { + public: + TestUnpack() {} + void SetUp() {} + void TearDown() {} +}; + +TEST_F(TestUnpack, test_ops_unpack1) { + auto unpack = std::make_shared(); + unpack->Init(); + EXPECT_EQ(unpack->get_axis(), 0); + auto input = TensorConstructUtils::CreateOnesTensor(kNumberTypeInt32, std::vector{2, 4}); + MS_EXCEPTION_IF_NULL(input); + auto abstract = unpack->Infer({input->ToAbstract()}); + MS_EXCEPTION_IF_NULL(abstract); + EXPECT_EQ(abstract->isa(), true); + auto shape_ptr = abstract->BuildShape(); + MS_EXCEPTION_IF_NULL(shape_ptr); + EXPECT_EQ(shape_ptr->isa(), true); + auto shape = shape_ptr->cast(); + MS_EXCEPTION_IF_NULL(shape); + auto shape_vec = shape->shape(); + EXPECT_EQ(shape_vec.size(), 2); + auto shape1 = shape_vec[0]->cast()->shape(); + EXPECT_EQ(shape1.size(), 1); + EXPECT_EQ(shape1[0], 4); + auto shape2 = shape_vec[1]->cast()->shape(); + EXPECT_EQ(shape2.size(), 1); + EXPECT_EQ(shape2[0], 4); + auto type_ptr = abstract->BuildType(); + MS_EXCEPTION_IF_NULL(type_ptr); + auto type = type_ptr->cast(); + MS_EXCEPTION_IF_NULL(type); + auto type_vec = type->elements(); + MS_EXCEPTION_IF_NULL(type_vec[0]); + auto data_type = type_vec[0]->cast()->element(); + MS_EXCEPTION_IF_NULL(data_type); + EXPECT_EQ(data_type->type_id(), kNumberTypeInt32); + MS_EXCEPTION_IF_NULL(type_vec[1]); + auto data1_type = type_vec[1]->cast()->element(); + MS_EXCEPTION_IF_NULL(data1_type); + EXPECT_EQ(data1_type->type_id(), kNumberTypeInt32); +} + +} // namespace ops +} // namespace mindspore diff --git a/tests/ut/cpp/ops/test_ops_unsorted_segment_sum.cc b/tests/ut/cpp/ops/test_ops_unsorted_segment_sum.cc new file mode 100644 index 00000000000..795e4d1e72b --- /dev/null +++ b/tests/ut/cpp/ops/test_ops_unsorted_segment_sum.cc @@ -0,0 +1,65 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include "common/common_test.h" +#include "ops/unsorted_segment_sum.h" +#include "ir/dtype/type.h" +#include "ir/value.h" +#include "abstract/dshape.h" +#include "utils/tensor_construct_utils.h" + +namespace mindspore { +namespace ops { +class TestUnsortedSegmentSum : public UT::Common { + public: + TestUnsortedSegmentSum() {} + void SetUp() {} + void TearDown() {} +}; + +TEST_F(TestUnsortedSegmentSum, test_cops_unsortedsegmentsum) { + auto unsortedsegmentsum = std::make_shared(); + auto tensor_x = TensorConstructUtils::CreateOnesTensor(kNumberTypeFloat32, std::vector{1, 4}); + auto segment_ids = TensorConstructUtils::CreateOnesTensor(kNumberTypeInt32, std::vector{1, 4}); + auto num_segments = MakeValue(4); + MS_EXCEPTION_IF_NULL(tensor_x); + MS_EXCEPTION_IF_NULL(segment_ids); + MS_EXCEPTION_IF_NULL(num_segments); + auto abstract = + unsortedsegmentsum->Infer({tensor_x->ToAbstract(), segment_ids->ToAbstract(), num_segments->ToAbstract()}); + MS_EXCEPTION_IF_NULL(abstract); + EXPECT_EQ(abstract->isa(), true); + auto shape_ptr = abstract->BuildShape(); + MS_EXCEPTION_IF_NULL(shape_ptr); + EXPECT_EQ(shape_ptr->isa(), true); + auto shape = shape_ptr->cast(); + MS_EXCEPTION_IF_NULL(shape); + auto shape_vec = shape->shape(); + auto type = abstract->BuildType(); + MS_EXCEPTION_IF_NULL(type); + EXPECT_EQ(type->isa(), true); + auto tensor_type = type->cast(); + MS_EXCEPTION_IF_NULL(tensor_type); + auto data_type = tensor_type->element(); + MS_EXCEPTION_IF_NULL(data_type); + EXPECT_EQ(data_type->type_id(), kNumberTypeFloat32); + EXPECT_EQ(shape_vec.size(), 1); + EXPECT_EQ(shape_vec[0], 4); +} + +} // namespace ops +} // namespace mindspore diff --git a/tests/ut/cpp/ops/test_ops_unsqueeze.cc b/tests/ut/cpp/ops/test_ops_unsqueeze.cc new file mode 100644 index 00000000000..7fcf6829e0d --- /dev/null +++ b/tests/ut/cpp/ops/test_ops_unsqueeze.cc @@ -0,0 +1,112 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include "common/common_test.h" +#include "ops/unsqueeze.h" +#include "ir/dtype/type.h" +#include "ir/value.h" +#include "abstract/dshape.h" +#include "utils/tensor_construct_utils.h" + +namespace mindspore { +namespace ops { +class TestUnsqueeze : public UT::Common { + public: + TestUnsqueeze() {} + void SetUp() {} + void TearDown() {} +}; + +/*TEST_F(TestUnsqueeze, test_unsqueeze_1) { + auto unsqueeze = std::make_shared(); + std::vector axis = {}; + unsqueeze->Init(axis); + auto tensor_x = std::make_shared(kNumberTypeFloat16, std::vector{1, 3, 2}); + MS_EXCEPTION_IF_NULL(tensor_x); + auto tensor_x_data = reinterpret_cast(tensor_x->data_c()); + *tensor_x_data = 1; + tensor_x_data++; + *tensor_x_data = 2; + tensor_x_data++; + *tensor_x_data = 3; + tensor_x_data++; + *tensor_x_data = 4; + tensor_x_data++; + *tensor_x_data = 5; + tensor_x_data++; + *tensor_x_data = 6; + tensor_x_data++; + auto abstract = unsqueeze->Infer({tensor_x->ToAbstract()}); + MS_EXCEPTION_IF_NULL(abstract); + EXPECT_EQ(abstract->isa(), true); + auto shape_ptr = abstract->BuildShape(); + MS_EXCEPTION_IF_NULL(shape_ptr); + EXPECT_EQ(shape_ptr->isa(), true); + auto shape = shape_ptr->cast(); + MS_EXCEPTION_IF_NULL(shape); + auto shape_vec = shape->shape(); + EXPECT_EQ(shape_vec.size(), 2); + EXPECT_EQ(shape_vec[0], 3); + EXPECT_EQ(shape_vec[1], 2); + auto type = abstract->BuildType(); + MS_EXCEPTION_IF_NULL(type); + EXPECT_EQ(type->isa(), true); + auto tensor_type = type->cast(); + MS_EXCEPTION_IF_NULL(tensor_type); + auto data_type = tensor_type->element(); + MS_EXCEPTION_IF_NULL(data_type); + EXPECT_EQ(data_type->type_id(), kNumberTypeFloat16); +} +*/ +TEST_F(TestUnsqueeze, test_unsqueeze_1) { + auto unsqueeze = std::make_shared(); + std::vector axis = {1, 2}; + unsqueeze->Init(axis); + auto tensor_x = std::make_shared(kNumberTypeFloat32, std::vector{1, 3}); + MS_EXCEPTION_IF_NULL(tensor_x); + auto tensor_x_data = reinterpret_cast(tensor_x->data_c()); + *tensor_x_data = 1; + tensor_x_data++; + *tensor_x_data = 2; + tensor_x_data++; + *tensor_x_data = 3; + tensor_x_data++; + auto abstract = unsqueeze->Infer({tensor_x->ToAbstract()}); + MS_EXCEPTION_IF_NULL(abstract); + EXPECT_EQ(abstract->isa(), true); + auto shape_ptr = abstract->BuildShape(); + MS_EXCEPTION_IF_NULL(shape_ptr); + EXPECT_EQ(shape_ptr->isa(), true); + auto shape = shape_ptr->cast(); + MS_EXCEPTION_IF_NULL(shape); + auto shape_vec = shape->shape(); + EXPECT_EQ(shape_vec.size(), 4); + EXPECT_EQ(shape_vec[0], 1); + EXPECT_EQ(shape_vec[1], 1); + EXPECT_EQ(shape_vec[2], 1); + EXPECT_EQ(shape_vec[3], 3); + auto type = abstract->BuildType(); + MS_EXCEPTION_IF_NULL(type); + EXPECT_EQ(type->isa(), true); + auto tensor_type = type->cast(); + MS_EXCEPTION_IF_NULL(tensor_type); + auto data_type = tensor_type->element(); + MS_EXCEPTION_IF_NULL(data_type); + EXPECT_EQ(data_type->type_id(), kNumberTypeFloat32); +} +} // namespace ops +} // namespace mindspore diff --git a/tests/ut/cpp/ops/test_ops_unstack.cc b/tests/ut/cpp/ops/test_ops_unstack.cc new file mode 100644 index 00000000000..ae2171e34a2 --- /dev/null +++ b/tests/ut/cpp/ops/test_ops_unstack.cc @@ -0,0 +1,73 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include "common/common_test.h" +#include "ops/unstack.h" +#include "ir/dtype/type.h" +#include "ir/value.h" +#include "abstract/dshape.h" +#include "utils/tensor_construct_utils.h" + +namespace mindspore { +namespace ops { + +class TestUnstack : public UT::Common { + public: + TestUnstack() {} + void SetUp() {} + void TearDown() {} +}; + +TEST_F(TestUnstack, test_ops_unstack1) { + auto unstack = std::make_shared(); + unstack->Init(); + EXPECT_EQ(unstack->get_axis(), 0); + auto input = TensorConstructUtils::CreateOnesTensor(kNumberTypeInt32, std::vector{2, 4}); + MS_EXCEPTION_IF_NULL(input); + auto abstract = unstack->Infer({input->ToAbstract()}); + MS_EXCEPTION_IF_NULL(abstract); + EXPECT_EQ(abstract->isa(), true); + auto shape_ptr = abstract->BuildShape(); + MS_EXCEPTION_IF_NULL(shape_ptr); + EXPECT_EQ(shape_ptr->isa(), true); + auto shape = shape_ptr->cast(); + MS_EXCEPTION_IF_NULL(shape); + auto shape_vec = shape->shape(); + EXPECT_EQ(shape_vec.size(), 2); + auto shape1 = shape_vec[0]->cast()->shape(); + EXPECT_EQ(shape1.size(), 1); + EXPECT_EQ(shape1[0], 4); + auto shape2 = shape_vec[1]->cast()->shape(); + EXPECT_EQ(shape2.size(), 1); + EXPECT_EQ(shape2[0], 4); + auto type_ptr = abstract->BuildType(); + MS_EXCEPTION_IF_NULL(type_ptr); + auto type = type_ptr->cast(); + MS_EXCEPTION_IF_NULL(type); + auto type_vec = type->elements(); + MS_EXCEPTION_IF_NULL(type_vec[0]); + auto data_type = type_vec[0]->cast()->element(); + MS_EXCEPTION_IF_NULL(data_type); + EXPECT_EQ(data_type->type_id(), kNumberTypeInt32); + MS_EXCEPTION_IF_NULL(type_vec[1]); + auto data1_type = type_vec[1]->cast()->element(); + MS_EXCEPTION_IF_NULL(data1_type); + EXPECT_EQ(data1_type->type_id(), kNumberTypeInt32); +} + +} // namespace ops +} // namespace mindspore diff --git a/tests/ut/cpp/ops/test_ops_where.cc b/tests/ut/cpp/ops/test_ops_where.cc new file mode 100644 index 00000000000..629e780cee6 --- /dev/null +++ b/tests/ut/cpp/ops/test_ops_where.cc @@ -0,0 +1,95 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include "common/common_test.h" +#include "ops/where.h" +#include "ir/dtype/type.h" +#include "ir/value.h" +#include "abstract/dshape.h" +#include "utils/tensor_construct_utils.h" + +namespace mindspore { +namespace ops { + +class TestWhere : public UT::Common { + public: + TestWhere() {} + void SetUp() {} + void TearDown() {} +}; + +TEST_F(TestWhere, test_ops_where1) { + // auto where = std::make_shared(); + // where->Init(); + // auto inputs0 = TensorConstructUtils::CreateOnesTensor(kNumberTypeInt64, std::vector{2, 3}); + // auto inputs1 = TensorConstructUtils::CreateOnesTensor(kNumberTypeInt64, std::vector{2, 3}); + // MS_EXCEPTION_IF_NULL(inputs0); + // MS_EXCEPTION_IF_NULL(inputs1); + // auto abstract = where->Infer({inputs0->ToAbstract(), inputs1->ToAbstract()}); + // MS_EXCEPTION_IF_NULL(abstract); + // EXPECT_EQ(abstract->isa(), true); + // auto shape_ptr = abstract->BuildShape(); + // MS_EXCEPTION_IF_NULL(shape_ptr); + // EXPECT_EQ(shape_ptr->isa(), true); + // auto shape = shape_ptr->cast(); + // MS_EXCEPTION_IF_NULL(shape); + // auto shape_vec = shape->shape(); + // EXPECT_EQ(shape_vec.size(), 2); + // EXPECT_EQ(shape_vec[0], 2); + // EXPECT_EQ(shape_vec[1], 3); + // auto type = abstract->BuildType(); + // MS_EXCEPTION_IF_NULL(type); + // EXPECT_EQ(type->isa(), true); + // auto tensor_type = type->cast(); + // MS_EXCEPTION_IF_NULL(tensor_type); + // auto data_type = tensor_type->element(); + // MS_EXCEPTION_IF_NULL(data_type); + // EXPECT_EQ(data_type->type_id(), kNumberTypeInt64); +} + +TEST_F(TestWhere, test_ops_where2) { + auto where = std::make_shared(); + where->Init(); + auto inputs0 = TensorConstructUtils::CreateOnesTensor(kNumberTypeInt64, std::vector{1}); + auto inputs1 = TensorConstructUtils::CreateOnesTensor(kNumberTypeInt64, std::vector{4}); + auto inputs2 = TensorConstructUtils::CreateOnesTensor(kNumberTypeInt64, std::vector{1}); + MS_EXCEPTION_IF_NULL(inputs0); + MS_EXCEPTION_IF_NULL(inputs1); + MS_EXCEPTION_IF_NULL(inputs2); + auto abstract = where->Infer({inputs0->ToAbstract(), inputs1->ToAbstract(), inputs2->ToAbstract()}); + MS_EXCEPTION_IF_NULL(abstract); + EXPECT_EQ(abstract->isa(), true); + auto shape_ptr = abstract->BuildShape(); + MS_EXCEPTION_IF_NULL(shape_ptr); + EXPECT_EQ(shape_ptr->isa(), true); + auto shape = shape_ptr->cast(); + MS_EXCEPTION_IF_NULL(shape); + auto shape_vec = shape->shape(); + EXPECT_EQ(shape_vec.size(), 1); + EXPECT_EQ(shape_vec[0], 4); + auto type = abstract->BuildType(); + MS_EXCEPTION_IF_NULL(type); + EXPECT_EQ(type->isa(), true); + auto tensor_type = type->cast(); + MS_EXCEPTION_IF_NULL(tensor_type); + auto data_type = tensor_type->element(); + MS_EXCEPTION_IF_NULL(data_type); + EXPECT_EQ(data_type->type_id(), kNumberTypeInt64); +} + +} // namespace ops +} // namespace mindspore