update Pool's attr kernel_size, pad_mode

This commit is contained in:
yuchaojie 2021-01-15 17:16:11 +08:00
parent dfa6daaa57
commit b51b3a6764
57 changed files with 216 additions and 214 deletions

View File

@ -151,10 +151,10 @@
{"op_name": "GeluGrad", "inputs": [{"index": 0, "name": "dy", "need_compile": false, "param_type": "required", "shape": "all"}, {"index": 1, "name": "x", "need_compile": false, "param_type": "required", "shape": "all"}, {"index": 2, "name": "y", "need_compile": false, "param_type": "required", "shape": "all"}], "outputs": [{"index": 0, "name": "z", "need_compile": true, "param_type": "required", "shape": "all"}], "attr": [], "fusion_type": "ELEMWISE", "dtype_format": [[["float16", "DefaultFormat"], ["float16", "DefaultFormat"], ["float16", "DefaultFormat"], ["float16", "DefaultFormat"]], [["float16", "NC1HWC0"], ["float16", "NC1HWC0"], ["float16", "NC1HWC0"], ["float16", "NC1HWC0"]], [["float16", "FRACTAL_NZ"], ["float16", "FRACTAL_NZ"], ["float16", "FRACTAL_NZ"], ["float16", "FRACTAL_NZ"]], [["float32", "DefaultFormat"], ["float32", "DefaultFormat"], ["float32", "DefaultFormat"], ["float32", "DefaultFormat"]], [["float32", "NC1HWC0"], ["float32", "NC1HWC0"], ["float32", "NC1HWC0"], ["float32", "NC1HWC0"]], [["float32", "FRACTAL_NZ"], ["float32", "FRACTAL_NZ"], ["float32", "FRACTAL_NZ"], ["float32", "FRACTAL_NZ"]]], "imply_type": "TBE", "async_flag": false, "binfile_name": "gelu_grad.so", "compute_cost": 10, "kernel_name": "gelu_grad", "partial_flag": true, "reshape_type": "", "dynamic_format": false, "dynamic_shape": false, "op_pattern": ""}
{"op_name": "FastGelu", "inputs": [{"index": 0, "name": "x", "need_compile": false, "param_type": "required", "shape": "all"}], "outputs": [{"index": 0, "name": "y", "need_compile": false, "param_type": "required", "shape": "all"}], "attr": [], "fusion_type": "ELEMWISE", "dtype_format": [[["float16", ""], ["float16", ""]], [["float32", ""], ["float32", ""]]], "imply_type": "TBE", "async_flag": false, "binfile_name": "fast_gelu.so", "compute_cost": 10, "kernel_name": "fast_gelu", "partial_flag": true, "reshape_type": "", "dynamic_format": false, "dynamic_shape": false, "op_pattern": "formatAgnostic"}
{"op_name": "FastGeluGrad", "inputs": [{"index": 0, "name": "dy", "need_compile": false, "param_type": "required", "shape": "all"}, {"index": 1, "name": "x", "need_compile": false, "param_type": "required", "shape": "all"}], "outputs": [{"index": 0, "name": "z", "need_compile": true, "param_type": "required", "shape": "all"}], "attr": [], "fusion_type": "ELEMWISE", "dtype_format": [[["float16", "DefaultFormat"], ["float16", "DefaultFormat"], ["float16", "DefaultFormat"]], [["float16", "NC1HWC0"], ["float16", "NC1HWC0"], ["float16", "NC1HWC0"]], [["float16", "FRACTAL_NZ"], ["float16", "FRACTAL_NZ"], ["float16", "FRACTAL_NZ"]], [["float32", "DefaultFormat"], ["float32", "DefaultFormat"], ["float32", "DefaultFormat"]], [["float32", "NC1HWC0"], ["float32", "NC1HWC0"], ["float32", "NC1HWC0"]], [["float32", "FRACTAL_NZ"], ["float32", "FRACTAL_NZ"], ["float32", "FRACTAL_NZ"]]], "imply_type": "TBE", "async_flag": false, "binfile_name": "fast_gelu_grad.so", "compute_cost": 10, "kernel_name": "fast_gelu_grad", "partial_flag": true, "reshape_type": "", "dynamic_format": false, "dynamic_shape": false, "op_pattern": ""}
{"op_name": "MaxPool", "inputs": [{"index": 0, "name": "input_data", "need_compile": false, "param_type": "required", "shape": "all"}], "outputs": [{"index": 0, "name": "output_data", "need_compile": false, "param_type": "required", "shape": "all"}], "attr": [{"name": "ksize", "param_type": "required", "type": "listInt", "value": "all"}, {"name": "strides", "param_type": "required", "type": "listInt", "value": "all"}, {"name": "padding", "param_type": "required", "type": "str", "value": "all"}, {"name": "data_format", "param_type": "required", "type": "str", "value": "all"}], "fusion_type": "OPAQUE", "dtype_format": [[["float16", "NC1HWC0"], ["float16", "NC1HWC0"]]], "imply_type": "TBE", "async_flag": false, "binfile_name": "max_pool.so", "compute_cost": 10, "kernel_name": "max_pool", "partial_flag": true, "reshape_type": "", "dynamic_format": false, "dynamic_shape": false, "op_pattern": ""}
{"op_name": "MaxPoolGrad", "inputs": [{"index": 0, "name": "x1", "need_compile": false, "param_type": "required", "shape": "all"}, {"index": 1, "name": "x2", "need_compile": false, "param_type": "required", "shape": "all"}, {"index": 2, "name": "grad", "need_compile": false, "param_type": "required", "shape": "all"}], "outputs": [{"index": 0, "name": "y", "need_compile": false, "param_type": "required", "shape": "all"}], "attr": [{"name": "ksize", "param_type": "required", "type": "listInt", "value": "all"}, {"name": "strides", "param_type": "required", "type": "listInt", "value": "all"}, {"name": "padding", "param_type": "required", "type": "str", "value": "all"}], "fusion_type": "OPAQUE", "dtype_format": [[["float16", "NC1HWC0"], ["float16", "NC1HWC0"], ["float16", "NC1HWC0"], ["float16", "NC1HWC0"]]], "imply_type": "TBE", "async_flag": false, "binfile_name": "max_pool_grad.so", "compute_cost": 10, "kernel_name": "max_pool_grad", "partial_flag": true, "reshape_type": "", "dynamic_format": false, "dynamic_shape": false, "op_pattern": ""}
{"op_name": "MaxPoolGradWithArgmax", "inputs": [{"index": 0, "name": "x", "need_compile": false, "param_type": "required", "shape": "all"}, {"index": 1, "name": "grad", "need_compile": false, "param_type": "required", "shape": "all"}, {"index": 2, "name": "argmax", "need_compile": false, "param_type": "optional", "shape": "all"}], "outputs": [{"index": 0, "name": "y", "need_compile": false, "param_type": "required", "shape": "all"}], "attr": [{"name": "ksize", "param_type": "required", "type": "listInt", "value": "all"}, {"name": "strides", "param_type": "required", "type": "listInt", "value": "all"}, {"name": "padding", "param_type": "required", "type": "str", "value": "all"}], "fusion_type": "OPAQUE", "dtype_format": [[["float16", "NC1HWC0"], ["float16", "NC1HWC0"], ["uint16", "NC1HWC0"], ["float16", "NC1HWC0"]], [["float16", "NC1HWC0"], ["float16", "NC1HWC0"], ["int64", "NC1HWC0"], ["float16", "NC1HWC0"]]], "imply_type": "TBE", "async_flag": false, "binfile_name": "max_pool_grad_with_argmax.so", "compute_cost": 10, "kernel_name": "max_pool_grad_with_argmax", "partial_flag": true, "reshape_type": "", "dynamic_format": false, "dynamic_shape": false, "op_pattern": ""}
{"op_name": "MaxPoolWithArgmax", "inputs": [{"index": 0, "name": "x", "need_compile": false, "param_type": "required", "shape": "all"}], "outputs": [{"index": 0, "name": "y", "need_compile": false, "param_type": "required", "shape": "all"}, {"index": 1, "name": "argmax", "need_compile": false, "param_type": "required", "shape": "all"}], "attr": [{"name": "ksize", "param_type": "required", "type": "listInt", "value": "all"}, {"name": "strides", "param_type": "required", "type": "listInt", "value": "all"}, {"name": "padding", "param_type": "required", "type": "str", "value": "all"}], "fusion_type": "CONVLUTION", "dtype_format": [[["float16", "NC1HWC0"], ["float16", "NC1HWC0"], ["uint16", "NC1HWC0"]]], "imply_type": "TBE", "async_flag": false, "binfile_name": "max_pool_with_argmax.so", "compute_cost": 10, "kernel_name": "max_pool_with_argmax", "partial_flag": true, "reshape_type": "", "dynamic_format": false, "dynamic_shape": false, "op_pattern": ""}
{"op_name": "MaxPool", "inputs": [{"index": 0, "name": "input_data", "need_compile": false, "param_type": "required", "shape": "all"}], "outputs": [{"index": 0, "name": "output_data", "need_compile": false, "param_type": "required", "shape": "all"}], "attr": [{"name": "kernel_size", "param_type": "required", "type": "listInt", "value": "all"}, {"name": "strides", "param_type": "required", "type": "listInt", "value": "all"}, {"name": "pad_mode", "param_type": "required", "type": "str", "value": "all"}, {"name": "data_format", "param_type": "required", "type": "str", "value": "all"}], "fusion_type": "OPAQUE", "dtype_format": [[["float16", "NC1HWC0"], ["float16", "NC1HWC0"]]], "imply_type": "TBE", "async_flag": false, "binfile_name": "max_pool.so", "compute_cost": 10, "kernel_name": "max_pool", "partial_flag": true, "reshape_type": "", "dynamic_format": false, "dynamic_shape": false, "op_pattern": ""}
{"op_name": "MaxPoolGrad", "inputs": [{"index": 0, "name": "x1", "need_compile": false, "param_type": "required", "shape": "all"}, {"index": 1, "name": "x2", "need_compile": false, "param_type": "required", "shape": "all"}, {"index": 2, "name": "grad", "need_compile": false, "param_type": "required", "shape": "all"}], "outputs": [{"index": 0, "name": "y", "need_compile": false, "param_type": "required", "shape": "all"}], "attr": [{"name": "kernel_size", "param_type": "required", "type": "listInt", "value": "all"}, {"name": "strides", "param_type": "required", "type": "listInt", "value": "all"}, {"name": "pad_mode", "param_type": "required", "type": "str", "value": "all"}], "fusion_type": "OPAQUE", "dtype_format": [[["float16", "NC1HWC0"], ["float16", "NC1HWC0"], ["float16", "NC1HWC0"], ["float16", "NC1HWC0"]]], "imply_type": "TBE", "async_flag": false, "binfile_name": "max_pool_grad.so", "compute_cost": 10, "kernel_name": "max_pool_grad", "partial_flag": true, "reshape_type": "", "dynamic_format": false, "dynamic_shape": false, "op_pattern": ""}
{"op_name": "MaxPoolGradWithArgmax", "inputs": [{"index": 0, "name": "x", "need_compile": false, "param_type": "required", "shape": "all"}, {"index": 1, "name": "grad", "need_compile": false, "param_type": "required", "shape": "all"}, {"index": 2, "name": "argmax", "need_compile": false, "param_type": "optional", "shape": "all"}], "outputs": [{"index": 0, "name": "y", "need_compile": false, "param_type": "required", "shape": "all"}], "attr": [{"name": "kernel_size", "param_type": "required", "type": "listInt", "value": "all"}, {"name": "strides", "param_type": "required", "type": "listInt", "value": "all"}, {"name": "pad_mode", "param_type": "required", "type": "str", "value": "all"}], "fusion_type": "OPAQUE", "dtype_format": [[["float16", "NC1HWC0"], ["float16", "NC1HWC0"], ["uint16", "NC1HWC0"], ["float16", "NC1HWC0"]], [["float16", "NC1HWC0"], ["float16", "NC1HWC0"], ["int64", "NC1HWC0"], ["float16", "NC1HWC0"]]], "imply_type": "TBE", "async_flag": false, "binfile_name": "max_pool_grad_with_argmax.so", "compute_cost": 10, "kernel_name": "max_pool_grad_with_argmax", "partial_flag": true, "reshape_type": "", "dynamic_format": false, "dynamic_shape": false, "op_pattern": ""}
{"op_name": "MaxPoolWithArgmax", "inputs": [{"index": 0, "name": "x", "need_compile": false, "param_type": "required", "shape": "all"}], "outputs": [{"index": 0, "name": "y", "need_compile": false, "param_type": "required", "shape": "all"}, {"index": 1, "name": "argmax", "need_compile": false, "param_type": "required", "shape": "all"}], "attr": [{"name": "kernel_size", "param_type": "required", "type": "listInt", "value": "all"}, {"name": "strides", "param_type": "required", "type": "listInt", "value": "all"}, {"name": "pad_mode", "param_type": "required", "type": "str", "value": "all"}], "fusion_type": "CONVLUTION", "dtype_format": [[["float16", "NC1HWC0"], ["float16", "NC1HWC0"], ["uint16", "NC1HWC0"]]], "imply_type": "TBE", "async_flag": false, "binfile_name": "max_pool_with_argmax.so", "compute_cost": 10, "kernel_name": "max_pool_with_argmax", "partial_flag": true, "reshape_type": "", "dynamic_format": false, "dynamic_shape": false, "op_pattern": ""}
{"op_name": "Mul", "inputs": [{"index": 0, "name": "x", "need_compile": false, "param_type": "required", "shape": "all"}, {"index": 1, "name": "y", "need_compile": false, "param_type": "required", "shape": "all"}], "outputs": [{"index": 0, "name": "output", "need_compile": false, "param_type": "required", "shape": "all"}], "attr": [], "fusion_type": "ELEMWISE", "dtype_format": [[["", ""], ["", ""], ["", ""]]], "imply_type": "TBE", "async_flag": false, "binfile_name": "mul.so", "compute_cost": 10, "kernel_name": "mul", "partial_flag": true, "reshape_type": "", "dynamic_format": false, "dynamic_shape": false, "op_pattern": "dynamicFormat"}
{"op_name": "Mul", "inputs": [{"index": 0, "name": "x", "need_compile": false, "param_type": "required", "shape": "all"}, {"index": 1, "name": "y", "need_compile": false, "param_type": "required", "shape": "all"}], "outputs": [{"index": 0, "name": "output", "need_compile": false, "param_type": "required", "shape": "all"}], "attr": [], "fusion_type": "ELEMWISE", "dtype_format": [[["", ""], ["", ""], ["", ""]]], "imply_type": "TBE", "async_flag": false, "binfile_name": "mul.so", "compute_cost": 10, "kernel_name": "mul", "partial_flag": true, "reshape_type": "", "dynamic_format": false, "dynamic_shape": true, "op_pattern": "dynamicFormat"}
{"op_name": "RealDiv", "inputs": [{"index": 0, "name": "x", "need_compile": false, "param_type": "required", "shape": "all"}, {"index": 1, "name": "y", "need_compile": false, "param_type": "required", "shape": "all"}], "outputs": [{"index": 0, "name": "z", "need_compile": false, "param_type": "required", "shape": "all"}], "attr": [], "fusion_type": "OPAQUE", "dtype_format": [[["float16", ""], ["float16", ""], ["float16", ""]], [["float32", ""], ["float32", ""], ["float32", ""]]], "imply_type": "TBE", "async_flag": false, "binfile_name": "realdiv.so", "compute_cost": 10, "kernel_name": "real_div", "partial_flag": true, "reshape_type": "", "dynamic_format": false, "dynamic_shape": false, "op_pattern": "broadcast"}
@ -305,9 +305,9 @@
{"op_name": "NotEqual", "inputs": [{"index": 0, "name": "x1", "need_compile": false, "param_type": "required", "shape": "all"}, {"index": 1, "name": "x2", "need_compile": false, "param_type": "required", "shape": "all"}], "outputs": [{"index": 0, "name": "y", "need_compile": false, "param_type": "required", "shape": "all"}], "attr": [], "fusion_type": "ELEMWISE", "dtype_format": [[["int8", ""], ["int8", ""], ["bool", ""]], [["uint8", ""], ["uint8", ""], ["bool", ""]], [["int32", ""], ["int32", ""], ["bool", ""]], [["float16", ""], ["float16", ""], ["bool", ""]], [["float32", ""], ["float32", ""], ["bool", ""]]], "imply_type": "TBE", "async_flag": false, "binfile_name": "not_equal.so", "compute_cost": 10, "kernel_name": "not_equal", "partial_flag": true, "reshape_type": "", "dynamic_format": false, "dynamic_shape": false, "op_pattern": "broadcast"}
{"op_name": "FloorMod", "inputs": [{"index": 0, "name": "x1", "need_compile": false, "param_type": "required", "shape": "all"}, {"index": 1, "name": "x2", "need_compile": false, "param_type": "required", "shape": "all"}], "outputs": [{"index": 0, "name": "y", "need_compile": false, "param_type": "required", "shape": "all"}], "attr": [], "fusion_type": "ELEMWISE", "dtype_format": [[["float16", ""], ["float16", ""], ["float16", ""]], [["float32", ""], ["float32", ""], ["float32", ""]], [["int32", ""], ["int32", ""], ["int32", ""]]], "imply_type": "TBE", "async_flag": false, "binfile_name": "floor_mod.so", "compute_cost": 10, "kernel_name": "floor_mod", "partial_flag": true, "reshape_type": "", "dynamic_format": false, "dynamic_shape": false, "op_pattern": "broadcast"}
{"op_name": "ScatterNdUpdate", "inputs": [{"index": 0, "name": "var", "need_compile": false, "param_type": "required", "shape": "all"}, {"index": 1, "name": "indices", "need_compile": false, "param_type": "required", "shape": "all"}, {"index": 1, "name": "updates", "need_compile": false, "param_type": "required", "shape": "all"}], "outputs": [{"index": 0, "name": "var", "need_compile": false, "param_type": "required", "shape": "all"}], "attr": [{"name": "use_locking", "param_type": "optional", "type": "bool", "value": "all"}], "fusion_type": "ELEMWISE", "dtype_format": [[["float16", "DefaultFormat"], ["int32", "DefaultFormat"], ["float16", "DefaultFormat"], ["float16", "DefaultFormat"]], [["float32", "DefaultFormat"], ["int32", "DefaultFormat"], ["float32", "DefaultFormat"], ["float32", "DefaultFormat"]], [["int8", "DefaultFormat"], ["int32", "DefaultFormat"], ["int8", "DefaultFormat"], ["int8", "DefaultFormat"]], [["uint8", "DefaultFormat"], ["int32", "DefaultFormat"], ["uint8", "DefaultFormat"], ["uint8", "DefaultFormat"]], [["bool", "DefaultFormat"], ["int32", "DefaultFormat"], ["bool", "DefaultFormat"], ["bool", "DefaultFormat"]]], "imply_type": "TBE", "async_flag": false, "binfile_name": "scatter_nd_update.so", "compute_cost": 10, "kernel_name": "scatter_nd_update", "partial_flag": true, "reshape_type": "", "dynamic_format": false, "dynamic_shape": false, "op_pattern": ""}
{"op_name": "AvgPool", "inputs": [{"index": 0, "name": "x", "need_compile": false, "param_type": "required", "shape": "all"}, {"index": 1, "name": "filter", "need_compile": false, "param_type": "optional", "shape": "all"}, {"index": 2, "name": "bias", "need_compile": false, "param_type": "optional", "shape": "all"}], "outputs": [{"index": 0, "name": "y", "need_compile": false, "param_type": "required", "shape": "all"}], "attr": [{"name": "ksize", "param_type": "required", "type": "listInt", "value": "all"}, {"name": "strides", "param_type": "required", "type": "listInt", "value": "all"}, {"name": "padding", "param_type": "required", "type": "str", "value": "all"}, {"name": "data_format", "param_type": "optional", "type": "str", "value": "all"}], "fusion_type": "OPAQUE", "dtype_format": [[["float16", "NC1HWC0"], ["float16", "FracZ"], ["float16", "DefaultFormat"], ["float16", "NC1HWC0"]], [["int8", "NC1HWC0"], ["int8", "C1HWNCoC0"], ["int32", "DefaultFormat"], ["int32", "NC1HWC0"]]], "imply_type": "TBE", "async_flag": false, "binfile_name": "avg_pool.so", "compute_cost": 10, "kernel_name": "avg_pool", "partial_flag": true, "reshape_type": "", "dynamic_format": false, "dynamic_shape": false, "op_pattern": ""}
{"op_name": "AvgPoolGrad", "inputs": [{"index": 0, "name": "input_grad", "need_compile": false, "param_type": "required", "shape": "all"}, {"index": 1, "name": "mean_matrix", "need_compile": false, "param_type": "optional", "shape": "all"}, {"index": 2, "name": "kernel_matrix", "need_compile": false, "param_type": "optional", "shape": "all"}], "outputs": [{"index": 0, "name": "out_grad", "need_compile": true, "param_type": "required", "shape": "all"}], "attr": [{"name": "x_origin", "param_type": "required", "type": "listInt", "value": "all"}, {"name": "ksize", "param_type": "required", "type": "listInt", "value": "all"}, {"name": "strides", "param_type": "required", "type": "listInt", "value": "all"}, {"name": "padding", "param_type": "required", "type": "str", "value": "all"}, {"name": "data_format", "param_type": "optional", "type": "str", "value": "all"}], "fusion_type": "OPAQUE", "dtype_format": [[["float16", "NC1HWC0"], ["float16", "NC1HWC0"], ["float16", "C1HWNCoC0"], ["float16", "NC1HWC0"]]], "imply_type": "TBE", "async_flag": false, "binfile_name": "avg_pool_grad_d.so", "compute_cost": 10, "kernel_name": "avg_pool_grad_d", "partial_flag": true, "reshape_type": "", "dynamic_format": false, "dynamic_shape": false, "op_pattern": ""}
{"op_name": "AvgPoolGradVm", "inputs": [{"index": 0, "name": "input_grad", "need_compile": false, "param_type": "required", "shape": "all"}, {"index": 1, "name": "mean_matrix", "need_compile": false, "param_type": "optional", "shape": "all"}, {"index": 2, "name": "kernel_matrix", "need_compile": false, "param_type": "optional", "shape": "all"}], "outputs": [{"index": 0, "name": "out_grad", "need_compile": true, "param_type": "required", "shape": "all"}], "attr": [{"name": "x_origin", "param_type": "required", "type": "listInt", "value": "all"}, {"name": "ksize", "param_type": "required", "type": "listInt", "value": "all"}, {"name": "strides", "param_type": "required", "type": "listInt", "value": "all"}, {"name": "padding", "param_type": "required", "type": "str", "value": "all"}, {"name": "data_format", "param_type": "optional", "type": "str", "value": "all"}], "fusion_type": "OPAQUE", "dtype_format": [[["float16", "NC1HWC0"], ["float16", "NC1HWC0"], ["float16", "C1HWNCoC0"], ["float16", "NC1HWC0"]]], "imply_type": "TBE", "async_flag": false, "binfile_name": "avg_pool_grad_d.so", "compute_cost": 10, "kernel_name": "avg_pool_grad_d", "partial_flag": true, "reshape_type": "", "dynamic_format": false, "dynamic_shape": false, "op_pattern": ""}
{"op_name": "AvgPool", "inputs": [{"index": 0, "name": "x", "need_compile": false, "param_type": "required", "shape": "all"}, {"index": 1, "name": "filter", "need_compile": false, "param_type": "optional", "shape": "all"}, {"index": 2, "name": "bias", "need_compile": false, "param_type": "optional", "shape": "all"}], "outputs": [{"index": 0, "name": "y", "need_compile": false, "param_type": "required", "shape": "all"}], "attr": [{"name": "kernel_size", "param_type": "required", "type": "listInt", "value": "all"}, {"name": "strides", "param_type": "required", "type": "listInt", "value": "all"}, {"name": "pad_mode", "param_type": "required", "type": "str", "value": "all"}, {"name": "data_format", "param_type": "optional", "type": "str", "value": "all"}], "fusion_type": "OPAQUE", "dtype_format": [[["float16", "NC1HWC0"], ["float16", "FracZ"], ["float16", "DefaultFormat"], ["float16", "NC1HWC0"]], [["int8", "NC1HWC0"], ["int8", "C1HWNCoC0"], ["int32", "DefaultFormat"], ["int32", "NC1HWC0"]]], "imply_type": "TBE", "async_flag": false, "binfile_name": "avg_pool.so", "compute_cost": 10, "kernel_name": "avg_pool", "partial_flag": true, "reshape_type": "", "dynamic_format": false, "dynamic_shape": false, "op_pattern": ""}
{"op_name": "AvgPoolGrad", "inputs": [{"index": 0, "name": "input_grad", "need_compile": false, "param_type": "required", "shape": "all"}, {"index": 1, "name": "mean_matrix", "need_compile": false, "param_type": "optional", "shape": "all"}, {"index": 2, "name": "kernel_matrix", "need_compile": false, "param_type": "optional", "shape": "all"}], "outputs": [{"index": 0, "name": "out_grad", "need_compile": true, "param_type": "required", "shape": "all"}], "attr": [{"name": "x_origin", "param_type": "required", "type": "listInt", "value": "all"}, {"name": "kernel_size", "param_type": "required", "type": "listInt", "value": "all"}, {"name": "strides", "param_type": "required", "type": "listInt", "value": "all"}, {"name": "pad_mode", "param_type": "required", "type": "str", "value": "all"}, {"name": "data_format", "param_type": "optional", "type": "str", "value": "all"}], "fusion_type": "OPAQUE", "dtype_format": [[["float16", "NC1HWC0"], ["float16", "NC1HWC0"], ["float16", "C1HWNCoC0"], ["float16", "NC1HWC0"]]], "imply_type": "TBE", "async_flag": false, "binfile_name": "avg_pool_grad_d.so", "compute_cost": 10, "kernel_name": "avg_pool_grad_d", "partial_flag": true, "reshape_type": "", "dynamic_format": false, "dynamic_shape": false, "op_pattern": ""}
{"op_name": "AvgPoolGradVm", "inputs": [{"index": 0, "name": "input_grad", "need_compile": false, "param_type": "required", "shape": "all"}, {"index": 1, "name": "mean_matrix", "need_compile": false, "param_type": "optional", "shape": "all"}, {"index": 2, "name": "kernel_matrix", "need_compile": false, "param_type": "optional", "shape": "all"}], "outputs": [{"index": 0, "name": "out_grad", "need_compile": true, "param_type": "required", "shape": "all"}], "attr": [{"name": "x_origin", "param_type": "required", "type": "listInt", "value": "all"}, {"name": "kernel_size", "param_type": "required", "type": "listInt", "value": "all"}, {"name": "strides", "param_type": "required", "type": "listInt", "value": "all"}, {"name": "pad_mode", "param_type": "required", "type": "str", "value": "all"}, {"name": "data_format", "param_type": "optional", "type": "str", "value": "all"}], "fusion_type": "OPAQUE", "dtype_format": [[["float16", "NC1HWC0"], ["float16", "NC1HWC0"], ["float16", "C1HWNCoC0"], ["float16", "NC1HWC0"]]], "imply_type": "TBE", "async_flag": false, "binfile_name": "avg_pool_grad_d.so", "compute_cost": 10, "kernel_name": "avg_pool_grad_d", "partial_flag": true, "reshape_type": "", "dynamic_format": false, "dynamic_shape": false, "op_pattern": ""}
{"op_name": "OnesLike", "inputs": [{"index": 0, "name": "x", "need_compile": false, "param_type": "required", "shape": "all"}], "outputs": [{"index": 0, "name": "y", "need_compile": false, "param_type": "required", "shape": "all"}], "attr": [], "fusion_type": "OPAQUE", "dtype_format": [[["uint8", ""], ["uint8", ""]], [["int8", ""], ["int8", ""]], [["int32", ""], ["int32", ""]], [["float16", ""], ["float16", ""]], [["float32", ""], ["float32", ""]]], "imply_type": "TBE", "async_flag": false, "binfile_name": "ones_like.so", "compute_cost": 10, "kernel_name": "ones_like", "partial_flag": true, "reshape_type": "", "dynamic_format": false, "dynamic_shape": false, "op_pattern": "formatAgnostic"}
{"op_name": "BatchToSpace", "inputs": [{"index": 0, "name": "x", "need_compile": false, "param_type": "required", "shape": "all"}], "outputs": [{"index": 0, "name": "y", "need_compile": false, "param_type": "required", "shape": "all"}], "attr": [{"name": "block_size", "param_type": "required", "type": "int", "value": "all"}, {"name": "crops", "param_type": "required", "type": "listListInt", "value": "all"}], "fusion_type": "OPAQUE", "dtype_format": [[["float16", "NC1HWC0"], ["float16", "NC1HWC0"]], [["float32", "NC1HWC0"], ["float32", "NC1HWC0"]]], "imply_type": "TBE", "async_flag": false, "binfile_name": "batch_to_space_d.so", "compute_cost": 10, "kernel_name": "batch_to_space_d", "partial_flag": true, "reshape_type": "", "dynamic_format": false, "dynamic_shape": false, "op_pattern": ""}
{"op_name": "SpaceToBatch", "inputs": [{"index": 0, "name": "x", "need_compile": false, "param_type": "required", "shape": "all"}], "outputs": [{"index": 0, "name": "y", "need_compile": false, "param_type": "required", "shape": "all"}], "attr": [{"name": "block_size", "param_type": "required", "type": "int", "value": "all"}, {"name": "paddings", "param_type": "required", "type": "listListInt", "value": "all"}], "fusion_type": "OPAQUE", "dtype_format": [[["float16", "NC1HWC0"], ["float16", "NC1HWC0"]], [["float32", "NC1HWC0"], ["float32", "NC1HWC0"]]], "imply_type": "TBE", "async_flag": false, "binfile_name": "space_to_batch_d.so", "compute_cost": 10, "kernel_name": "space_to_batch_d", "partial_flag": true, "reshape_type": "", "dynamic_format": false, "dynamic_shape": false, "op_pattern": ""}
@ -420,8 +420,8 @@
{"op_name": "ScatterMul", "inputs": [{"index": 0, "name": "var", "need_compile": false, "param_type": "required", "shape": "all"}, {"index": 1, "name": "indices", "need_compile": false, "param_type": "required", "shape": "all"}, {"index": 2, "name": "updates", "need_compile": false, "param_type": "required", "shape": "all"}], "outputs": [{"index": 0, "name": "var", "need_compile": false, "param_type": "required", "shape": "all"}], "attr": [{"name": "use_locking", "param_type": "optional", "type": "bool", "value": "all"}], "fusion_type": "ELEMWISE", "dtype_format": [[["float16", "DefaultFormat"], ["int32", "DefaultFormat"], ["float16", "DefaultFormat"], ["float16", "DefaultFormat"]], [["float32", "DefaultFormat"], ["int32", "DefaultFormat"], ["float32", "DefaultFormat"], ["float32", "DefaultFormat"]], [["int32", "DefaultFormat"], ["int32", "DefaultFormat"], ["int32", "DefaultFormat"], ["int32", "DefaultFormat"]], [["int8", "DefaultFormat"], ["int32", "DefaultFormat"], ["int8", "DefaultFormat"], ["int8", "DefaultFormat"]], [["uint8", "DefaultFormat"], ["int32", "DefaultFormat"], ["uint8", "DefaultFormat"], ["uint8", "DefaultFormat"]]], "imply_type": "TBE", "async_flag": false, "binfile_name": "scatter_mul.so", "compute_cost": 10, "kernel_name": "scatter_mul", "partial_flag": true, "reshape_type": "", "dynamic_format": false, "dynamic_shape": false, "op_pattern": ""}
{"op_name": "ScatterDiv", "inputs": [{"index": 0, "name": "var", "need_compile": false, "param_type": "required", "shape": "all"}, {"index": 1, "name": "indices", "need_compile": false, "param_type": "required", "shape": "all"}, {"index": 2, "name": "updates", "need_compile": false, "param_type": "required", "shape": "all"}], "outputs": [{"index": 0, "name": "var", "need_compile": false, "param_type": "required", "shape": "all"}], "attr": [{"name": "use_locking", "param_type": "optional", "type": "bool", "value": "all"}], "fusion_type": "ELEMWISE", "dtype_format": [[["float16", "DefaultFormat"], ["int32", "DefaultFormat"], ["float16", "DefaultFormat"], ["float16", "DefaultFormat"]], [["float32", "DefaultFormat"], ["int32", "DefaultFormat"], ["float32", "DefaultFormat"], ["float32", "DefaultFormat"]], [["int32", "DefaultFormat"], ["int32", "DefaultFormat"], ["int32", "DefaultFormat"], ["int32", "DefaultFormat"]], [["int8", "DefaultFormat"], ["int32", "DefaultFormat"], ["int8", "DefaultFormat"], ["int8", "DefaultFormat"]], [["uint8", "DefaultFormat"], ["int32", "DefaultFormat"], ["uint8", "DefaultFormat"], ["uint8", "DefaultFormat"]]], "imply_type": "TBE", "async_flag": false, "binfile_name": "scatter_div.so", "compute_cost": 10, "kernel_name": "scatter_div", "partial_flag": true, "reshape_type": "", "dynamic_format": false, "dynamic_shape": false, "op_pattern": ""}
{"op_name": "Mod", "inputs": [{"index": 0, "name": "x1", "need_compile": false, "param_type": "required", "shape": "all"}, {"index": 1, "name": "x2", "need_compile": false, "param_type": "required", "shape": "all"}], "outputs": [{"index": 0, "name": "y", "need_compile": false, "param_type": "required", "shape": "all"}], "attr": [], "fusion_type": "ELEMWISE", "dtype_format": [[["int8", ""], ["int8", ""], ["int8", ""]], [["uint8", ""], ["uint8", ""], ["uint8", ""]], [["int32", ""], ["int32", ""], ["int32", ""]], [["float16", ""], ["float16", ""], ["float16", ""]], [["float32", ""], ["float32", ""], ["float32", ""]]], "imply_type": "TBE", "async_flag": false, "binfile_name": "mod.so", "compute_cost": 10, "kernel_name": "mod", "partial_flag": true, "reshape_type": "", "dynamic_format": false, "dynamic_shape": false, "op_pattern": "broadcast"}
{"op_name": "MaxPoolGradGrad", "inputs": [{"index": 0, "name": "x1", "need_compile": false, "param_type": "required", "shape": "all"}, {"index": 1, "name": "x2", "need_compile": false, "param_type": "required", "shape": "all"}, {"index": 2, "name": "grad", "need_compile": false, "param_type": "required", "shape": "all"}], "outputs": [{"index": 0, "name": "y", "need_compile": false, "param_type": "required", "shape": "all"}], "attr": [{"name": "ksize", "param_type": "required", "type": "listInt", "value": "all"}, {"name": "strides", "param_type": "required", "type": "listInt", "value": "all"}, {"name": "padding", "param_type": "required", "type": "str", "value": "all"}, {"name": "data_format", "param_type": "optional", "type": "str", "value": "all"}], "fusion_type": "OPAQUE", "dtype_format": [[["float16", "NC1HWC0"], ["float16", "NC1HWC0"], ["float16", "NC1HWC0"], ["float16", "NC1HWC0"]]], "imply_type": "TBE", "async_flag": false, "binfile_name": "max_pool_grad_grad.so", "compute_cost": 10, "kernel_name": "max_pool_grad_grad", "partial_flag": true, "reshape_type": "", "dynamic_format": false, "dynamic_shape": false, "op_pattern": ""}
{"op_name": "MaxPoolGradGradWithArgmax", "inputs": [{"index": 0, "name": "x", "need_compile": false, "param_type": "required", "shape": "all"}, {"index": 1, "name": "grad", "need_compile": false, "param_type": "required", "shape": "all"}, {"index": 2, "name": "argmax", "need_compile": false, "param_type": "optional", "shape": "all"}], "outputs": [{"index": 0, "name": "y", "need_compile": false, "param_type": "required", "shape": "all"}], "attr": [{"name": "ksize", "param_type": "required", "type": "listInt", "value": "all"}, {"name": "strides", "param_type": "required", "type": "listInt", "value": "all"}, {"name": "padding", "param_type": "required", "type": "str", "value": "all"}], "fusion_type": "OPAQUE", "dtype_format": [[["float16", "NC1HWC0"], ["float16", "NC1HWC0"], ["uint16", "NC1HWC0"], ["float16", "NC1HWC0"]], [["float16", "NC1HWC0"], ["float16", "NC1HWC0"], ["int64", "NC1HWC0"], ["float16", "NC1HWC0"]]], "imply_type": "TBE", "async_flag": false, "binfile_name": "max_pool_grad_grad_with_argmax.so", "compute_cost": 10, "kernel_name": "max_pool_grad_grad_with_argmax", "partial_flag": true, "reshape_type": "", "dynamic_format": false, "dynamic_shape": false, "op_pattern": ""}
{"op_name": "MaxPoolGradGrad", "inputs": [{"index": 0, "name": "x1", "need_compile": false, "param_type": "required", "shape": "all"}, {"index": 1, "name": "x2", "need_compile": false, "param_type": "required", "shape": "all"}, {"index": 2, "name": "grad", "need_compile": false, "param_type": "required", "shape": "all"}], "outputs": [{"index": 0, "name": "y", "need_compile": false, "param_type": "required", "shape": "all"}], "attr": [{"name": "kernel_size", "param_type": "required", "type": "listInt", "value": "all"}, {"name": "strides", "param_type": "required", "type": "listInt", "value": "all"}, {"name": "pad_mode", "param_type": "required", "type": "str", "value": "all"}, {"name": "data_format", "param_type": "optional", "type": "str", "value": "all"}], "fusion_type": "OPAQUE", "dtype_format": [[["float16", "NC1HWC0"], ["float16", "NC1HWC0"], ["float16", "NC1HWC0"], ["float16", "NC1HWC0"]]], "imply_type": "TBE", "async_flag": false, "binfile_name": "max_pool_grad_grad.so", "compute_cost": 10, "kernel_name": "max_pool_grad_grad", "partial_flag": true, "reshape_type": "", "dynamic_format": false, "dynamic_shape": false, "op_pattern": ""}
{"op_name": "MaxPoolGradGradWithArgmax", "inputs": [{"index": 0, "name": "x", "need_compile": false, "param_type": "required", "shape": "all"}, {"index": 1, "name": "grad", "need_compile": false, "param_type": "required", "shape": "all"}, {"index": 2, "name": "argmax", "need_compile": false, "param_type": "optional", "shape": "all"}], "outputs": [{"index": 0, "name": "y", "need_compile": false, "param_type": "required", "shape": "all"}], "attr": [{"name": "kernel_size", "param_type": "required", "type": "listInt", "value": "all"}, {"name": "strides", "param_type": "required", "type": "listInt", "value": "all"}, {"name": "pad_mode", "param_type": "required", "type": "str", "value": "all"}], "fusion_type": "OPAQUE", "dtype_format": [[["float16", "NC1HWC0"], ["float16", "NC1HWC0"], ["uint16", "NC1HWC0"], ["float16", "NC1HWC0"]], [["float16", "NC1HWC0"], ["float16", "NC1HWC0"], ["int64", "NC1HWC0"], ["float16", "NC1HWC0"]]], "imply_type": "TBE", "async_flag": false, "binfile_name": "max_pool_grad_grad_with_argmax.so", "compute_cost": 10, "kernel_name": "max_pool_grad_grad_with_argmax", "partial_flag": true, "reshape_type": "", "dynamic_format": false, "dynamic_shape": false, "op_pattern": ""}
{"op_name": "TensorMove", "inputs": [{"index": 0, "name": "x", "need_compile": false, "param_type": "required", "shape": "all"}], "outputs": [{"index": 0, "name": "y", "need_compile": false, "param_type": "required", "shape": "all"}], "attr": [], "fusion_type": "OPAQUE", "dtype_format": [[["int32", ""], ["int32", ""]], [["float16", ""], ["float16", ""]], [["float32", ""], ["float32", ""]], [["int8", ""], ["int8", ""]], [["uint8", ""], ["uint8", ""]], [["bool", ""], ["bool", ""]]], "imply_type": "TBE", "async_flag": false, "binfile_name": "tensor_move.so", "compute_cost": 10, "kernel_name": "tensor_move", "partial_flag": true, "reshape_type": "", "dynamic_format": false, "dynamic_shape": false, "op_pattern": "formatAgnostic"}
{"op_name": "PopulationCount", "inputs": [{"index": 0, "name": "x", "need_compile": false, "param_type": "required", "shape": "all"}], "outputs": [{"index": 0, "name": "y", "need_compile": false, "param_type": "required", "shape": "all"}], "attr": [], "fusion_type": "OPAQUE", "dtype_format": [[["int16", "NC1HWC0"], ["uint8", "NC1HWC0"]], [["int16", "DefaultFormat"], ["uint8", "DefaultFormat"]], [["uint16", "NC1HWC0"], ["uint8", "NC1HWC0"]], [["uint16", "DefaultFormat"], ["uint8", "DefaultFormat"]]], "imply_type": "TBE", "async_flag": false, "binfile_name": "population_count.so", "compute_cost": 10, "kernel_name": "population_count", "partial_flag": true, "reshape_type": "", "dynamic_format": false, "dynamic_shape": false, "op_pattern": ""}
{"op_name": "ParallelConcat", "inputs": [{"index": 0, "name": "values", "need_compile": false, "param_type": "dynamic", "shape": "all"}], "outputs": [{"index": 0, "name": "output_data", "need_compile": false, "param_type": "required", "shape": "all"}], "attr": [{"name": "shape", "param_type": "required", "type": "listInt", "value": "all"}, {"name": "N", "param_type": "required", "type": "int", "value": "all"}], "fusion_type": "OPAQUE", "dtype_format": [[["bool", "DefaultFormat"], ["bool", "DefaultFormat"]], [["bool", "NC1HWC0"], ["bool", "NC1HWC0"]], [["int8", "DefaultFormat"], ["int8", "DefaultFormat"]], [["int8", "NC1HWC0"], ["int8", "NC1HWC0"]], [["uint8", "DefaultFormat"], ["uint8", "DefaultFormat"]], [["uint8", "NC1HWC0"], ["uint8", "NC1HWC0"]], [["int16", "DefaultFormat"], ["int16", "DefaultFormat"]], [["int16", "NC1HWC0"], ["int16", "NC1HWC0"]], [["uint16", "DefaultFormat"], ["uint16", "DefaultFormat"]], [["uint16", "NC1HWC0"], ["uint16", "NC1HWC0"]], [["int32", "DefaultFormat"], ["int32", "DefaultFormat"]], [["int32", "NC1HWC0"], ["int32", "NC1HWC0"]], [["uint32", "DefaultFormat"], ["uint32", "DefaultFormat"]], [["uint32", "NC1HWC0"], ["uint32", "NC1HWC0"]], [["int64", "DefaultFormat"], ["int64", "DefaultFormat"]], [["int64", "NC1HWC0"], ["int64", "NC1HWC0"]], [["uint64", "DefaultFormat"], ["uint64", "DefaultFormat"]], [["uint64", "NC1HWC0"], ["uint64", "NC1HWC0"]], [["float16", "DefaultFormat"], ["float16", "DefaultFormat"]], [["float16", "NC1HWC0"], ["float16", "NC1HWC0"]], [["float32", "DefaultFormat"], ["float32", "DefaultFormat"]], [["float32", "NC1HWC0"], ["float32", "NC1HWC0"]], [["bool", "NHWC"], ["bool", "NHWC"]], [["bool", "NCHW"], ["bool", "NCHW"]], [["int8", "NHWC"], ["int8", "NHWC"]], [["int8", "NCHW"], ["int8", "NCHW"]], [["uint8", "NHWC"], ["uint8", "NHWC"]], [["uint8", "NCHW"], ["uint8", "NCHW"]], [["int16", "NHWC"], ["int16", "NHWC"]], [["int16", "NCHW"], ["int16", "NCHW"]], [["uint16", "NHWC"], ["uint16", "NHWC"]], [["uint16", "NCHW"], ["uint16", "NCHW"]], [["int32", "NHWC"], ["int32", "NHWC"]], [["int32", "NCHW"], ["int32", "NCHW"]], [["uint32", "NHWC"], ["uint32", "NHWC"]], [["uint32", "NCHW"], ["uint32", "NCHW"]], [["int64", "NHWC"], ["int64", "NHWC"]], [["int64", "NCHW"], ["int64", "NCHW"]], [["uint64", "NHWC"], ["uint64", "NHWC"]], [["uint64", "NCHW"], ["uint64", "NCHW"]], [["float16", "NHWC"], ["float16", "NHWC"]], [["float16", "NCHW"], ["float16", "NCHW"]], [["float32", "NHWC"], ["float32", "NHWC"]], [["float32", "NCHW"], ["float32", "NCHW"]]], "imply_type": "TBE", "async_flag": false, "binfile_name": "parallel_concat.so", "compute_cost": 10, "kernel_name": "parallel_concat", "partial_flag": true, "reshape_type": "", "dynamic_format": false, "dynamic_shape": false, "op_pattern": ""}

View File

@ -30,14 +30,13 @@ using mindspore::kernel::AddressPtr;
using CTask = std::function<void(size_t, size_t)>;
namespace mindspore {
namespace kernel {
const char KSIZE[] = "ksize";
const char KERNEL_SIZE[] = "kernel_size";
const char STRIDE[] = "stride";
const char STRIDES[] = "strides";
const char DILATION[] = "dilation";
const char PAD[] = "pad";
const char PAD_LIST[] = "pad_list";
const char PAD_MODE[] = "pad_mode";
const char PADDING[] = "padding";
const char PAD_MODE_LOWER_SAME[] = "same";
const char PAD_MODE_LOWER_VALID[] = "valid";
const char PAD_MODE_UPPER_SAME[] = "SAME";

View File

@ -31,7 +31,7 @@ void AvgPoolingGradCPUKernel::InitKernel(const CNodePtr &kernel_node) {
dnnl::memory::desc dst_desc = GetDefaultMemDesc(dst_shape);
std::vector<int> origin_kernel_sizes;
std::vector<int> strides;
std::vector<int64_t> kernel_sizes_me = AnfAlgo::GetNodeAttr<std::vector<int64_t>>(kernel_node, KSIZE);
std::vector<int64_t> kernel_sizes_me = AnfAlgo::GetNodeAttr<std::vector<int64_t>>(kernel_node, KERNEL_SIZE);
std::vector<int64_t> strides_me = AnfAlgo::GetNodeAttr<std::vector<int64_t>>(kernel_node, STRIDES);
(void)std::transform(kernel_sizes_me.begin(), kernel_sizes_me.end(), std::back_inserter(origin_kernel_sizes),
[](const int64_t &value) { return static_cast<int>(value); });
@ -42,7 +42,7 @@ void AvgPoolingGradCPUKernel::InitKernel(const CNodePtr &kernel_node) {
}
dnnl::memory::dims strides_dims{strides[2], strides[3]};
dnnl::memory::dims kernels_dims{origin_kernel_sizes[2], origin_kernel_sizes[3]};
const std::string pad_mode = AnfAlgo::GetNodeAttr<std::string>(kernel_node, PADDING);
const std::string pad_mode = AnfAlgo::GetNodeAttr<std::string>(kernel_node, PAD_MODE);
std::vector<int> int_padding_l;
std::vector<int> int_padding_r;
std::vector<size_t> kernel_size({IntToSize(origin_kernel_sizes[2]), IntToSize(origin_kernel_sizes[3])});

View File

@ -30,7 +30,7 @@ void PoolingCPUKernel::InitKernel(const CNodePtr &kernel_node) {
dnnl::memory::desc dst_desc = GetDefaultMemDesc(dst_shape);
std::vector<int> origin_kernel_sizes;
std::vector<int> strides;
std::vector<int64_t> kernel_sizes_me = AnfAlgo::GetNodeAttr<std::vector<int64_t>>(kernel_node, KSIZE);
std::vector<int64_t> kernel_sizes_me = AnfAlgo::GetNodeAttr<std::vector<int64_t>>(kernel_node, KERNEL_SIZE);
std::vector<int64_t> strides_me = AnfAlgo::GetNodeAttr<std::vector<int64_t>>(kernel_node, STRIDES);
(void)std::transform(kernel_sizes_me.begin(), kernel_sizes_me.end(), std::back_inserter(origin_kernel_sizes),
[](const int64_t &value) { return static_cast<int>(value); });
@ -41,7 +41,7 @@ void PoolingCPUKernel::InitKernel(const CNodePtr &kernel_node) {
}
dnnl::memory::dims strides_dims{strides[2], strides[3]};
dnnl::memory::dims kernels_dims{origin_kernel_sizes[2], origin_kernel_sizes[3]};
const std::string pad_mode = AnfAlgo::GetNodeAttr<std::string>(kernel_node, PADDING);
const std::string pad_mode = AnfAlgo::GetNodeAttr<std::string>(kernel_node, PAD_MODE);
std::vector<int> int_padding_l;
std::vector<int> int_padding_r;
std::vector<size_t> kernel_size({IntToSize(origin_kernel_sizes[2]), IntToSize(origin_kernel_sizes[3])});

View File

@ -29,7 +29,7 @@ void MaxPoolingGradCPUKernel::InitKernel(const CNodePtr &kernel_node) {
dst_shape_ = AnfAlgo::GetInputDeviceShape(kernel_node, 1);
std::vector<int> kernel_sizes;
std::vector<int> strides;
auto kernel_sizes_me = AnfAlgo::GetNodeAttr<std::vector<int64_t>>(kernel_node, KSIZE);
auto kernel_sizes_me = AnfAlgo::GetNodeAttr<std::vector<int64_t>>(kernel_node, KERNEL_SIZE);
auto strides_me = AnfAlgo::GetNodeAttr<std::vector<int64_t>>(kernel_node, STRIDES);
(void)std::transform(kernel_sizes_me.begin(), kernel_sizes_me.end(), std::back_inserter(kernel_sizes),
[](const int64_t &value) { return static_cast<int>(value); });
@ -39,7 +39,7 @@ void MaxPoolingGradCPUKernel::InitKernel(const CNodePtr &kernel_node) {
MS_LOG(EXCEPTION) << "pooling grad invalid input size";
}
std::vector<int> padding_r;
const std::string pad_mode = AnfAlgo::GetNodeAttr<std::string>(kernel_node, PADDING);
const std::string pad_mode = AnfAlgo::GetNodeAttr<std::string>(kernel_node, PAD_MODE);
kernel_size_ = {IntToSize(kernel_sizes[2]), IntToSize(kernel_sizes[3])};
stride_ = strides[3];
GetPadding(kernel_node, pad_mode, src_shape_, kernel_size_, stride_, &padding_l_, &padding_r);

View File

@ -92,7 +92,7 @@ class MaxPoolWithArgmaxGpuFwdKernel : public GpuKernel {
output_width_ = SizeToInt(output_shape[3]);
std::vector<int> window;
std::vector<int64_t> window_me =
GetValue<std::vector<int64_t>>(AnfAlgo::GetCNodePrimitive(kernel_node)->GetAttr("ksize"));
GetValue<std::vector<int64_t>>(AnfAlgo::GetCNodePrimitive(kernel_node)->GetAttr("kernel_size"));
(void)std::transform(window_me.begin(), window_me.end(), std::back_inserter(window),
[](const int64_t &value) { return static_cast<int>(value); });
window_height_ = window[1];
@ -104,7 +104,7 @@ class MaxPoolWithArgmaxGpuFwdKernel : public GpuKernel {
[](const int64_t &value) { return static_cast<int>(value); });
stride_height_ = stride[1];
stride_width_ = stride[2];
pad_mode_ = GetValue<std::string>(AnfAlgo::GetCNodePrimitive(kernel_node)->GetAttr("padding"));
pad_mode_ = GetValue<std::string>(AnfAlgo::GetCNodePrimitive(kernel_node)->GetAttr("pad_mode"));
pad_top_ = 0;
pad_left_ = 0;
if (pad_mode_ == kSamePadModeUpperCase || pad_mode_ == kSamePadModeLowerCase) {

View File

@ -169,10 +169,10 @@ class PoolingGpuFwdKernel : public GpuKernel {
}
}
void SetPad(const CNodePtr &kernel_node) {
pad_mode_ = GetValue<std::string>(AnfAlgo::GetCNodePrimitive(kernel_node)->GetAttr("padding"));
pad_mode_ = GetValue<std::string>(AnfAlgo::GetCNodePrimitive(kernel_node)->GetAttr("pad_mode"));
std::vector<int> window;
std::vector<int64_t> window_me =
GetValue<std::vector<int64_t>>(AnfAlgo::GetCNodePrimitive(kernel_node)->GetAttr("ksize"));
GetValue<std::vector<int64_t>>(AnfAlgo::GetCNodePrimitive(kernel_node)->GetAttr("kernel_size"));
(void)std::transform(window_me.begin(), window_me.end(), std::back_inserter(window),
[](const int64_t &value) { return static_cast<int>(value); });
int window_height = window[2];

View File

@ -204,10 +204,10 @@ class PoolingGradGpuKernel : public GpuKernel {
return true;
}
void SetPad(const CNodePtr &kernel_node) {
pad_mode_ = GetAttr<std::string>(kernel_node, "padding");
pad_mode_ = GetAttr<std::string>(kernel_node, "pad_mode");
std::vector<int64_t> stride_me = GetAttr<std::vector<int64_t>>(kernel_node, "strides");
std::vector<int> window;
std::vector<int64_t> window_me = GetAttr<std::vector<int64_t>>(kernel_node, "ksize");
std::vector<int64_t> window_me = GetAttr<std::vector<int64_t>>(kernel_node, "kernel_size");
(void)std::transform(stride_me.begin(), stride_me.end(), std::back_inserter(stride_),
[](const int64_t &value) { return static_cast<int>(value); });
(void)std::transform(window_me.begin(), window_me.end(), std::back_inserter(window),

View File

@ -89,7 +89,7 @@ CNodePtr CreateMaxPoolGradWithArgmax(const FuncGraphPtr &graph, const CNodePtr &
void SetNodeAttrs(const CNodePtr &maxpool, const CNodePtr &maxpool_grad, const CNodePtr &maxpool_argmax,
const CNodePtr &maxpool_grad_argmax) {
auto strides = AnfAlgo::GetNodeAttr<std::vector<int64_t>>(maxpool, kAttrStrides);
auto ksize = AnfAlgo::GetNodeAttr<std::vector<int64_t>>(maxpool, kAttrKsize);
auto ksize = AnfAlgo::GetNodeAttr<std::vector<int64_t>>(maxpool, kAttrKernelSize);
if (strides.size() != kMaxPoolAttrAxisNum) {
MS_LOG(EXCEPTION) << "MaxPool's attr strides has wrong axis number, should be " << kMaxPoolAttrAxisNum
<< ", but got " << strides.size();
@ -110,8 +110,8 @@ void SetNodeAttrs(const CNodePtr &maxpool, const CNodePtr &maxpool_grad, const C
AnfAlgo::CopyNodeAttrs(maxpool_grad, maxpool_grad_argmax);
AnfAlgo::SetNodeAttr(kAttrStrides, MakeValue(strides), maxpool_argmax);
AnfAlgo::SetNodeAttr(kAttrStrides, MakeValue(strides), maxpool_grad_argmax);
AnfAlgo::SetNodeAttr(kAttrKsize, MakeValue(ksize), maxpool_argmax);
AnfAlgo::SetNodeAttr(kAttrKsize, MakeValue(ksize), maxpool_grad_argmax);
AnfAlgo::SetNodeAttr(kAttrKernelSize, MakeValue(ksize), maxpool_argmax);
AnfAlgo::SetNodeAttr(kAttrKernelSize, MakeValue(ksize), maxpool_grad_argmax);
}
} // namespace

View File

@ -61,7 +61,7 @@ const AnfNodePtr MaxPoolWithArgmaxUnifyMindIR::Process(const FuncGraphPtr &graph
MS_EXCEPTION_IF_NULL(maxpool_with_argmax);
TypeId argmax_dtype = kNumberTypeUInt16;
auto ksize = AnfAlgo::GetNodeAttr<std::vector<int64_t>>(maxpool_with_argmax, kAttrKsize);
auto ksize = AnfAlgo::GetNodeAttr<std::vector<int64_t>>(maxpool_with_argmax, kAttrKernelSize);
auto output_shape = AnfAlgo::GetOutputInferShape(maxpool_with_argmax, 0);
auto argmax_shape = output_shape;
if (argmax_shape.size() != 4) {
@ -96,7 +96,7 @@ const AnfNodePtr MaxPoolGradWithArgmaxUnifyMindIR::Process(const FuncGraphPtr &g
MS_EXCEPTION_IF_NULL(tuple_getitem0_anf);
TypeId argmax_dtype = kNumberTypeUInt16;
auto ksize = AnfAlgo::GetNodeAttr<std::vector<int64_t>>(maxpool_grad_with_argmax, kAttrKsize);
auto ksize = AnfAlgo::GetNodeAttr<std::vector<int64_t>>(maxpool_grad_with_argmax, kAttrKernelSize);
auto argmax_shape = AnfAlgo::GetOutputInferShape(tuple_getitem0_anf, 0);
if (argmax_shape.size() != 4) {
MS_LOG(DEBUG) << "argmax's infer shape size not equal 4";

View File

@ -229,22 +229,22 @@ OPERATOR_ONNX_CONVERT_DEFINE(SimpleMean, AveragePool, OpNameInfo())
OPERATOR_ONNX_CONVERT_DEFINE(
MaxPool, MaxPool,
OpNameInfo()
.Attr("ksize", "kernel_shape", onnx::AttributeProto_AttributeType_INTS, SetAttrTupleValueToProto<2>)
.Attr("padding", "auto_pad", onnx::AttributeProto_AttributeType_STRING, SetPoolingPadMode)
.Attr("kernel_size", "kernel_shape", onnx::AttributeProto_AttributeType_INTS, SetAttrTupleValueToProto<2>)
.Attr("pad_mode", "auto_pad", onnx::AttributeProto_AttributeType_STRING, SetPoolingPadMode)
.Attr("strides", "strides", onnx::AttributeProto_AttributeType_INTS, SetAttrTupleValueToProto<2>))
OPERATOR_ONNX_CONVERT_DEFINE(
MaxPoolWithArgmax, MaxPool,
OpNameInfo()
.Attr("ksize", "kernel_shape", onnx::AttributeProto_AttributeType_INTS, SetAttrTupleValueToProto<2>)
.Attr("padding", "auto_pad", onnx::AttributeProto_AttributeType_STRING, SetPoolingPadMode)
.Attr("kernel_size", "kernel_shape", onnx::AttributeProto_AttributeType_INTS, SetAttrTupleValueToProto<2>)
.Attr("pad_mode", "auto_pad", onnx::AttributeProto_AttributeType_STRING, SetPoolingPadMode)
.Attr("strides", "strides", onnx::AttributeProto_AttributeType_INTS, SetAttrTupleValueToProto<2>))
OPERATOR_ONNX_CONVERT_DEFINE(
AvgPool, AveragePool,
OpNameInfo()
.Attr("ksize", "kernel_shape", onnx::AttributeProto_AttributeType_INTS, SetAttrTupleValueToProto<2>)
.Attr("padding", "auto_pad", onnx::AttributeProto_AttributeType_STRING, SetPoolingPadMode)
.Attr("kernel_size", "kernel_shape", onnx::AttributeProto_AttributeType_INTS, SetAttrTupleValueToProto<2>)
.Attr("pad_mode", "auto_pad", onnx::AttributeProto_AttributeType_STRING, SetPoolingPadMode)
.Attr("strides", "strides", onnx::AttributeProto_AttributeType_INTS, SetAttrTupleValueToProto<2>))
OPERATOR_ONNX_CONVERT_DEFINE(GatherV2, Gather, OpNameInfo())

View File

@ -20,54 +20,55 @@
namespace mindspore::transform {
// MaxPool
INPUT_MAP(MaxPool) = {{1, INPUT_DESC(x)}};
ATTR_MAP(MaxPool) = {{"ksize", ATTR_DESC(ksize, AnyTraits<int64_t>(), AnyTraits<std::vector<int64_t>>())},
ATTR_MAP(MaxPool) = {{"kernel_size", ATTR_DESC(ksize, AnyTraits<int64_t>(), AnyTraits<std::vector<int64_t>>())},
{"strides", ATTR_DESC(strides, AnyTraits<int64_t>(), AnyTraits<std::vector<int64_t>>())},
{"padding", ATTR_DESC(padding, AnyTraits<std::string>())},
{"pad_mode", ATTR_DESC(padding, AnyTraits<std::string>())},
{"data_format", ATTR_DESC(data_format, AnyTraits<std::string>())}};
OUTPUT_MAP(MaxPool) = {{0, OUTPUT_DESC(y)}};
REG_ADPT_DESC(MaxPool, kNameMaxPool, ADPT_DESC(MaxPool))
// AvgPool
INPUT_MAP(AvgPool) = {{1, INPUT_DESC(x)}};
ATTR_MAP(AvgPool) = {{"ksize", ATTR_DESC(ksize, AnyTraits<int64_t>(), AnyTraits<std::vector<int64_t>>())},
ATTR_MAP(AvgPool) = {{"kernel_size", ATTR_DESC(ksize, AnyTraits<int64_t>(), AnyTraits<std::vector<int64_t>>())},
{"strides", ATTR_DESC(strides, AnyTraits<int64_t>(), AnyTraits<std::vector<int64_t>>())},
{"padding", ATTR_DESC(padding, AnyTraits<std::string>())},
{"pad_mode", ATTR_DESC(padding, AnyTraits<std::string>())},
{"data_format", ATTR_DESC(data_format, AnyTraits<std::string>())}};
OUTPUT_MAP(AvgPool) = {{0, OUTPUT_DESC(y)}};
REG_ADPT_DESC(AvgPool, kNameAvgPool, ADPT_DESC(AvgPool))
// MaxPoolGrad
INPUT_MAP(MaxPoolGrad) = {{1, INPUT_DESC(x1)}, {2, INPUT_DESC(x2)}, {3, INPUT_DESC(grad)}};
ATTR_MAP(MaxPoolGrad) = {{"ksize", ATTR_DESC(ksize, AnyTraits<int64_t>(), AnyTraits<std::vector<int64_t>>())},
ATTR_MAP(MaxPoolGrad) = {{"kernel_size", ATTR_DESC(ksize, AnyTraits<int64_t>(), AnyTraits<std::vector<int64_t>>())},
{"strides", ATTR_DESC(strides, AnyTraits<int64_t>(), AnyTraits<std::vector<int64_t>>())},
{"padding", ATTR_DESC(padding, AnyTraits<std::string>())},
{"pad_mode", ATTR_DESC(padding, AnyTraits<std::string>())},
{"data_format", ATTR_DESC(data_format, AnyTraits<std::string>())}};
OUTPUT_MAP(MaxPoolGrad) = {{0, OUTPUT_DESC(y)}};
REG_ADPT_DESC(MaxPoolGrad, kNameMaxPoolGrad, ADPT_DESC(MaxPoolGrad))
// avgpoolgrad
INPUT_MAP(AvgPoolGrad) = {{1, INPUT_DESC(orig_input_shape)}, {2, INPUT_DESC(input_grad)}};
ATTR_MAP(AvgPoolGrad) = {{"ksize", ATTR_DESC(ksize, AnyTraits<int64_t>(), AnyTraits<std::vector<int64_t>>())},
ATTR_MAP(AvgPoolGrad) = {{"kernel_size", ATTR_DESC(ksize, AnyTraits<int64_t>(), AnyTraits<std::vector<int64_t>>())},
{"strides", ATTR_DESC(strides, AnyTraits<int64_t>(), AnyTraits<std::vector<int64_t>>())},
{"padding", ATTR_DESC(padding, AnyTraits<std::string>())},
{"pad_mode", ATTR_DESC(padding, AnyTraits<std::string>())},
{"data_format", ATTR_DESC(data_format, AnyTraits<std::string>())}};
OUTPUT_MAP(AvgPoolGrad) = {{0, OUTPUT_DESC(out_grad)}};
REG_ADPT_DESC(AvgPoolGrad, kNameAvgPoolGrad, ADPT_DESC(AvgPoolGrad))
// MaxPoolWithArgmax
INPUT_MAP(MaxPoolWithArgmax) = {{1, INPUT_DESC(x)}};
ATTR_MAP(MaxPoolWithArgmax) = {{"ksize", ATTR_DESC(ksize, AnyTraits<int64_t>(), AnyTraits<std::vector<int64_t>>())},
{"strides", ATTR_DESC(strides, AnyTraits<int64_t>(), AnyTraits<std::vector<int64_t>>())},
{"padding", ATTR_DESC(padding, AnyTraits<std::string>())}};
ATTR_MAP(MaxPoolWithArgmax) = {
{"kernel_size", ATTR_DESC(ksize, AnyTraits<int64_t>(), AnyTraits<std::vector<int64_t>>())},
{"strides", ATTR_DESC(strides, AnyTraits<int64_t>(), AnyTraits<std::vector<int64_t>>())},
{"pad_mode", ATTR_DESC(padding, AnyTraits<std::string>())}};
OUTPUT_MAP(MaxPoolWithArgmax) = {{0, OUTPUT_DESC(y)}, {1, OUTPUT_DESC(argmax)}};
REG_ADPT_DESC(MaxPoolWithArgmax, kNameMaxPoolWithArgmax, ADPT_DESC(MaxPoolWithArgmax))
// MaxPoolGradWithArgmax
INPUT_MAP(MaxPoolGradWithArgmax) = {{1, INPUT_DESC(x)}, {2, INPUT_DESC(grad)}, {3, INPUT_DESC(argmax)}};
ATTR_MAP(MaxPoolGradWithArgmax) = {
{"ksize", ATTR_DESC(ksize, AnyTraits<int64_t>(), AnyTraits<std::vector<int64_t>>())},
{"kernel_size", ATTR_DESC(ksize, AnyTraits<int64_t>(), AnyTraits<std::vector<int64_t>>())},
{"strides", ATTR_DESC(strides, AnyTraits<int64_t>(), AnyTraits<std::vector<int64_t>>())},
{"padding", ATTR_DESC(padding, AnyTraits<std::string>())}};
{"pad_mode", ATTR_DESC(padding, AnyTraits<std::string>())}};
OUTPUT_MAP(MaxPoolGradWithArgmax) = {{0, OUTPUT_DESC(y)}};
REG_ADPT_DESC(MaxPoolGradWithArgmax, kNameMaxPoolGradWithArgmax, ADPT_DESC(MaxPoolGradWithArgmax))
} // namespace mindspore::transform

View File

@ -372,7 +372,6 @@ constexpr auto kAttrCompileInfo = "compile_info";
constexpr auto kAttrFusionType = "fusion_type";
constexpr auto kAttrStride = "stride";
constexpr auto kAttrStrides = "strides";
constexpr auto kAttrKsize = "ksize";
constexpr auto kAttrKernelSize = "kernel_size";
constexpr auto kAttrDilation = "dilation";
constexpr auto kAttrPadMode = "pad_mode";

View File

@ -117,9 +117,9 @@ class MaxPool2d(_PoolNd):
def __init__(self, kernel_size=1, stride=1, pad_mode="valid", data_format="NCHW"):
super(MaxPool2d, self).__init__(kernel_size, stride, pad_mode, data_format)
self.max_pool = P.MaxPool(ksize=self.kernel_size,
self.max_pool = P.MaxPool(kernel_size=self.kernel_size,
strides=self.stride,
padding=self.pad_mode,
pad_mode=self.pad_mode,
data_format=self.format)
def construct(self, x):
@ -185,9 +185,9 @@ class MaxPool1d(_PoolNd):
validator.check_int(stride, 1, Rel.GE, "stride", self.cls_name)
self.kernel_size = (1, kernel_size)
self.stride = (1, stride)
self.max_pool = P.MaxPool(ksize=self.kernel_size,
self.max_pool = P.MaxPool(kernel_size=self.kernel_size,
strides=self.stride,
padding=self.pad_mode)
pad_mode=self.pad_mode)
self.shape = F.shape
self.reduce_mean = P.ReduceMean(keep_dims=True)
self.expand = P.ExpandDims()
@ -263,9 +263,9 @@ class AvgPool2d(_PoolNd):
pad_mode="valid",
data_format="NCHW"):
super(AvgPool2d, self).__init__(kernel_size, stride, pad_mode, data_format)
self.avg_pool = P.AvgPool(ksize=self.kernel_size,
self.avg_pool = P.AvgPool(kernel_size=self.kernel_size,
strides=self.stride,
padding=self.pad_mode,
pad_mode=self.pad_mode,
data_format=self.format)
def construct(self, x):
@ -335,9 +335,9 @@ class AvgPool1d(_PoolNd):
super(AvgPool1d, self).__init__(kernel_size, stride, pad_mode)
self.kernel_size = (1, kernel_size)
self.stride = (1, stride)
self.avg_pool = P.AvgPool(ksize=self.kernel_size,
self.avg_pool = P.AvgPool(kernel_size=self.kernel_size,
strides=self.stride,
padding=self.pad_mode)
pad_mode=self.pad_mode)
self.shape = F.shape
self.reduce_mean = P.ReduceMean(keep_dims=True)
self.slice = P.Slice()

View File

@ -189,9 +189,9 @@ def get_bprop_depthwise_conv2d_native(self):
def get_bprop_max_pool_with_argmax(self):
"""Grad definition for `MaxPoolWithArgmax` operation."""
maxpool_grad = G.MaxPoolGradWithArgmax(
ksize=self.ksize,
kernel_size=self.kernel_size,
strides=self.strides,
padding=self.padding)
pad_mode=self.pad_mode)
def bprop(x, out, dout):
dx = maxpool_grad(x, dout[0], out[1])
@ -204,9 +204,9 @@ def get_bprop_max_pool_with_argmax(self):
def get_bprop_max_pool_grad_grad(self):
"""Grad definition for `MaxPoolGrad` operation."""
maxpool_grad_grad = G.MaxPoolGradGrad(
ksize=self.ksize,
kernel_size=self.kernel_size,
strides=self.strides,
padding=self.padding)
pad_mode=self.pad_mode)
def bprop(x1, x2, grad, out, dout):
dx1 = zeros_like(x1)
@ -221,9 +221,9 @@ def get_bprop_max_pool_grad_grad(self):
def get_bprop_max_pool_grad_grad_grad(self):
"""Grad definition for `MaxPoolGradGrad` operation."""
maxpool_grad = G.MaxPoolGrad(
ksize=self.ksize,
kernel_size=self.kernel_size,
strides=self.strides,
padding=self.padding)
pad_mode=self.pad_mode)
def bprop(x1, x2, grad, out, dout):
dx1 = zeros_like(x1)
@ -238,9 +238,9 @@ def get_bprop_max_pool_grad_grad_grad(self):
def get_bprop_max_pool_grad(self):
"""Grad definition for `MaxPool` operation."""
maxpool_grad = G.MaxPoolGrad(
ksize=self.ksize,
kernel_size=self.kernel_size,
strides=self.strides,
padding=self.padding,
pad_mode=self.pad_mode,
data_format=self.format)
def bprop(x, out, dout):
@ -250,7 +250,7 @@ def get_bprop_max_pool_grad(self):
return bprop
def _windowed_output_size(input_size, ksize, stride, padding):
def _windowed_output_size(input_size, ksize, stride, pad_mode):
"""
helper func for AvgPoolGrad
"""
@ -259,11 +259,11 @@ def _windowed_output_size(input_size, ksize, stride, padding):
tmp_pad_need = 0
tmp_pad_before = 0
tmp_pad_after = 0
if padding == 'VALID':
if pad_mode == 'VALID':
tmp_output = (input_size - ksize + stride) // stride
tmp_pad_before = 0
tmp_pad_after = 0
elif padding == 'SAME':
elif pad_mode == 'SAME':
tmp_output = (input_size + stride - 1) // stride
tmp_pad_need = max(0, (tmp_output - 1) * stride + ksize - input_size)
tmp_pad_before = tmp_pad_need // 2
@ -272,7 +272,7 @@ def _windowed_output_size(input_size, ksize, stride, padding):
@constexpr
def _get_mean_matrix(x_shape, ksize, stride, padding, x_dtype):
def _get_mean_matrix(x_shape, ksize, stride, pad_mode, x_dtype):
"""
helper func for AvgPoolGrad.
@ -291,9 +291,9 @@ def _get_mean_matrix(x_shape, ksize, stride, padding, x_dtype):
h_output, w_output = 0, 0
pad_top, pad_bottom, pad_left, pad_right = 0, 0, 0, 0
h_output, pad_top, pad_bottom = _windowed_output_size(h_input, h_ksize,
h_stride, padding)
h_stride, pad_mode)
w_output, pad_left, pad_right = _windowed_output_size(w_input, w_ksize,
w_stride, padding)
w_stride, pad_mode)
output_size = n_output * c_output * h_output * w_output
output_shape = (n_output, c_output, h_output, w_output)
@ -321,7 +321,7 @@ def _get_mean_matrix(x_shape, ksize, stride, padding, x_dtype):
@constexpr
def _get_kernel_matrix(x_shape_nchw, kernel_matrix_shape, padding, x_dtype):
def _get_kernel_matrix(x_shape_nchw, kernel_matrix_shape, pad_mode, x_dtype):
kernel_matrix = np.ones(kernel_matrix_shape)
return Tensor(kernel_matrix, x_dtype)
@ -333,9 +333,9 @@ def get_bprop_avg_pool_grad(self):
# the parameter of AvgPoolGrad in GPU and TBE/CPU is not same
if self.target == "GPU":
avgpool_grad_gpu = G.AvgPoolGradGpu(
ksize=self.ksize,
kernel_size=self.kernel_size,
strides=self.strides,
padding=self.padding,
pad_mode=self.pad_mode,
data_format=self.format)
def bprop_gpu(x, out, dout):
@ -346,9 +346,9 @@ def get_bprop_avg_pool_grad(self):
elif self.target == "CPU":
avgpool_grad_cpu = G.AvgPoolGradCpu(
ksize=self.ksize,
kernel_size=self.kernel_size,
strides=self.strides,
padding=self.padding,
pad_mode=self.pad_mode,
data_format=self.format)
def bprop_cpu(x, out, dout):
@ -359,9 +359,9 @@ def get_bprop_avg_pool_grad(self):
elif self.target == "GE":
avgpool_grad_ge = G.AvgPoolGrad(
ksize=self.ksize,
kernel_size=self.kernel_size,
strides=self.strides,
padding=self.padding)
pad_mode=self.pad_mode)
shape_op = P.Shape()
def bprop_ge(x, out, dout):
@ -372,12 +372,12 @@ def get_bprop_avg_pool_grad(self):
else:
avgpool_grad_vm = G.AvgPoolGradVm(
ksize=self.ksize,
kernel_size=self.kernel_size,
strides=self.strides,
padding=self.padding)
k_size_nchw = avgpool_grad_vm.ksize
pad_mode=self.pad_mode)
k_size_nchw = avgpool_grad_vm.kernel_size
stride_nchw = avgpool_grad_vm.strides
padding = self.padding
pad_mode = self.pad_mode
def bprop_vm(x, out, dout):
x_shape_nchw = F.shape(x)
@ -385,8 +385,8 @@ def get_bprop_avg_pool_grad(self):
kernel_matrix_shape = (1, x_shape_nchw[1],
k_size_nchw[2],
k_size_nchw[3])
mean_matrix = _get_mean_matrix(x_shape_nchw, k_size_nchw, stride_nchw, padding, x_dtype)
kernel_matrix = _get_kernel_matrix(x_shape_nchw, kernel_matrix_shape, padding, x_dtype)
mean_matrix = _get_mean_matrix(x_shape_nchw, k_size_nchw, stride_nchw, pad_mode, x_dtype)
kernel_matrix = _get_kernel_matrix(x_shape_nchw, kernel_matrix_shape, pad_mode, x_dtype)
dx = avgpool_grad_vm(x_shape_nchw, dout, mean_matrix, kernel_matrix)
return (dx,)

View File

@ -23,9 +23,9 @@ avg_pool_op_info = TBERegOp("AvgPool") \
.compute_cost(10) \
.kernel_name("avg_pool") \
.partial_flag(True) \
.attr("ksize", "required", "listInt", "all") \
.attr("kernel_size", "required", "listInt", "all") \
.attr("strides", "required", "listInt", "all") \
.attr("padding", "required", "str", "all") \
.attr("pad_mode", "required", "str", "all") \
.attr("data_format", "optional", "str", "all") \
.input(0, "x", False, "required", "all") \
.input(1, "filter", False, "optional", "all") \

View File

@ -24,9 +24,9 @@ avg_pool_grad_op_info = TBERegOp("AvgPoolGrad") \
.kernel_name("avg_pool_grad_d") \
.partial_flag(True) \
.attr("x_origin", "required", "listInt", "all") \
.attr("ksize", "required", "listInt", "all") \
.attr("kernel_size", "required", "listInt", "all") \
.attr("strides", "required", "listInt", "all") \
.attr("padding", "required", "str", "all") \
.attr("pad_mode", "required", "str", "all") \
.attr("data_format", "optional", "str", "all") \
.input(0, "input_grad", False, "required", "all") \
.input(1, "mean_matrix", False, "optional", "all") \

View File

@ -24,9 +24,9 @@ avg_pool_grad_vm_op_info = TBERegOp("AvgPoolGradVm") \
.kernel_name("avg_pool_grad_d") \
.partial_flag(True) \
.attr("x_origin", "required", "listInt", "all") \
.attr("ksize", "required", "listInt", "all") \
.attr("kernel_size", "required", "listInt", "all") \
.attr("strides", "required", "listInt", "all") \
.attr("padding", "required", "str", "all") \
.attr("pad_mode", "required", "str", "all") \
.attr("data_format", "optional", "str", "all") \
.input(0, "input_grad", False, "required", "all") \
.input(1, "mean_matrix", False, "optional", "all") \

View File

@ -23,9 +23,9 @@ max_pool_op_info = TBERegOp("MaxPool") \
.compute_cost(10) \
.kernel_name("max_pool") \
.partial_flag(True) \
.attr("ksize", "required", "listInt", "all") \
.attr("kernel_size", "required", "listInt", "all") \
.attr("strides", "required", "listInt", "all") \
.attr("padding", "required", "str", "all") \
.attr("pad_mode", "required", "str", "all") \
.attr("data_format", "required", "str", "all") \
.input(0, "input_data", False, "required", "all") \
.output(0, "output_data", False, "required", "all") \

View File

@ -23,9 +23,9 @@ max_pool_grad_op_info = TBERegOp("MaxPoolGrad") \
.compute_cost(10) \
.kernel_name("max_pool_grad") \
.partial_flag(True) \
.attr("ksize", "required", "listInt", "all") \
.attr("kernel_size", "required", "listInt", "all") \
.attr("strides", "required", "listInt", "all") \
.attr("padding", "required", "str", "all") \
.attr("pad_mode", "required", "str", "all") \
.input(0, "x1", False, "required", "all") \
.input(1, "x2", False, "required", "all") \
.input(2, "grad", False, "required", "all") \

View File

@ -23,9 +23,9 @@ max_pool_grad_grad_op_info = TBERegOp("MaxPoolGradGrad") \
.compute_cost(10) \
.kernel_name("max_pool_grad_grad") \
.partial_flag(True) \
.attr("ksize", "required", "listInt", "all") \
.attr("kernel_size", "required", "listInt", "all") \
.attr("strides", "required", "listInt", "all") \
.attr("padding", "required", "str", "all") \
.attr("pad_mode", "required", "str", "all") \
.attr("data_format", "optional", "str", "all") \
.input(0, "x1", False, "required", "all") \
.input(1, "x2", False, "required", "all") \

View File

@ -23,9 +23,9 @@ max_pool_grad_grad_with_argmax_op_info = TBERegOp("MaxPoolGradGradWithArgmax") \
.compute_cost(10) \
.kernel_name("max_pool_grad_grad_with_argmax") \
.partial_flag(True) \
.attr("ksize", "required", "listInt", "all") \
.attr("kernel_size", "required", "listInt", "all") \
.attr("strides", "required", "listInt", "all") \
.attr("padding", "required", "str", "all") \
.attr("pad_mode", "required", "str", "all") \
.input(0, "x", False, "required", "all") \
.input(1, "grad", False, "required", "all") \
.input(2, "argmax", False, "optional", "all") \

View File

@ -23,9 +23,9 @@ max_pool_grad_with_argmax_op_info = TBERegOp("MaxPoolGradWithArgmax") \
.compute_cost(10) \
.kernel_name("max_pool_grad_with_argmax") \
.partial_flag(True) \
.attr("ksize", "required", "listInt", "all") \
.attr("kernel_size", "required", "listInt", "all") \
.attr("strides", "required", "listInt", "all") \
.attr("padding", "required", "str", "all") \
.attr("pad_mode", "required", "str", "all") \
.input(0, "x", False, "required", "all") \
.input(1, "grad", False, "required", "all") \
.input(2, "argmax", False, "optional", "all") \

View File

@ -23,9 +23,9 @@ max_pool_with_argmax_op_info = TBERegOp("MaxPoolWithArgmax") \
.compute_cost(10) \
.kernel_name("max_pool_with_argmax") \
.partial_flag(True) \
.attr("ksize", "required", "listInt", "all") \
.attr("kernel_size", "required", "listInt", "all") \
.attr("strides", "required", "listInt", "all") \
.attr("padding", "required", "str", "all") \
.attr("pad_mode", "required", "str", "all") \
.input(0, "x", False, "required", "all") \
.output(0, "y", False, "required", "all") \
.output(1, "argmax", False, "required", "all") \

View File

@ -810,13 +810,13 @@ class _PoolGrad(PrimitiveWithInfer):
"""Gradients of the max/avg pool operation."""
@prim_attr_register
def __init__(self, ksize, strides, padding="VALID", data_format="NCHW"):
def __init__(self, kernel_size, strides, pad_mode="VALID", data_format="NCHW"):
self.init_prim_io_names(inputs=['x_origin', 'out_origin', 'grad'], outputs=['output'])
validator.check_value_type('ksize', ksize, [int, tuple], self.name)
validator.check_value_type('kernel_size', kernel_size, [int, tuple], self.name)
validator.check_value_type('strides', strides, [int, tuple], self.name)
self.padding = validator.check_string(padding.upper(), ['VALID', 'SAME'], 'padding', self.name)
self.add_prim_attr("padding", self.padding)
self.pad_mode = validator.check_string(pad_mode.upper(), ['VALID', 'SAME'], 'pad_mode', self.name)
self.add_prim_attr("pad_mode", self.pad_mode)
self.format = validator.check_string(data_format, ['NCHW', 'NHWC'], 'format', self.name)
if context.get_context("device_target") != "GPU" and self.format == "NHWC":
raise ValueError("NHWC format only support in GPU target.")
@ -842,9 +842,10 @@ class _PoolGrad(PrimitiveWithInfer):
raise error_msg
return ret
ksize = _grad_check_int_or_tuple("ksize", ksize, self.is_maxpoolgradwithargmax)
self.ksize = ksize if self.format == "NCHW" else [ksize[0], ksize[2], ksize[3], ksize[1]]
self.add_prim_attr("ksize", self.ksize)
kernel_size = _grad_check_int_or_tuple("kernel_size", kernel_size, self.is_maxpoolgradwithargmax)
self.kernel_size = kernel_size if self.format == "NCHW" else [kernel_size[0], kernel_size[2],
kernel_size[3], kernel_size[1]]
self.add_prim_attr("kernel_size", self.kernel_size)
strides = _grad_check_int_or_tuple("strides", strides, self.is_maxpoolgradwithargmax)
self.strides = strides if self.format == "NCHW" else [strides[0], strides[2], strides[3], strides[1]]
@ -855,8 +856,8 @@ class AvgPoolGrad(_PoolGrad):
"""Gradients of the avg pool operation for ge."""
@prim_attr_register
def __init__(self, ksize=1, strides=1, padding="VALID"):
super(AvgPoolGrad, self).__init__(ksize, strides, padding)
def __init__(self, kernel_size=1, strides=1, pad_mode="VALID"):
super(AvgPoolGrad, self).__init__(kernel_size, strides, pad_mode)
def __infer__(self, origin_input, dout):
out = {
@ -872,8 +873,8 @@ class AvgPoolGradVm(_PoolGrad):
"""Gradients of the avg pool operation for vm."""
@prim_attr_register
def __init__(self, ksize=1, strides=1, padding="VALID"):
super(AvgPoolGradVm, self).__init__(ksize, strides, padding)
def __init__(self, kernel_size=1, strides=1, pad_mode="VALID"):
super(AvgPoolGradVm, self).__init__(kernel_size, strides, pad_mode)
self.init_prim_io_names(inputs=['x_origin', 'grad', 'mean_matrix', 'kernel_matrix'], outputs=['output'])
def __infer__(self, origin_input, dout, mean_matrix, kernel_matrix):
@ -890,8 +891,8 @@ class AvgPoolGradGpu(_PoolGrad):
"""Gradients of the avg pool operation for gpu."""
@prim_attr_register
def __init__(self, ksize=1, strides=1, padding="VALID", data_format="NCHW"):
super(AvgPoolGradGpu, self).__init__(ksize, strides, padding, data_format)
def __init__(self, kernel_size=1, strides=1, pad_mode="VALID", data_format="NCHW"):
super(AvgPoolGradGpu, self).__init__(kernel_size, strides, pad_mode, data_format)
def infer_shape(self, x1_shape, x2_shape, grad_shape):
return x1_shape
@ -904,8 +905,8 @@ class AvgPoolGradCpu(_PoolGrad):
"""Gradients of the avg pool operation for cpu."""
@prim_attr_register
def __init__(self, ksize=1, strides=1, padding="VALID", data_format="NCHW"):
super(AvgPoolGradCpu, self).__init__(ksize, strides, padding, data_format)
def __init__(self, kernel_size=1, strides=1, pad_mode="VALID", data_format="NCHW"):
super(AvgPoolGradCpu, self).__init__(kernel_size, strides, pad_mode, data_format)
def infer_shape(self, x1_shape, x2_shape, grad_shape):
return x1_shape
@ -918,8 +919,8 @@ class MaxPoolGrad(_PoolGrad):
"""Performs gradients of the max pool operation."""
@prim_attr_register
def __init__(self, ksize=1, strides=1, padding="VALID", data_format="NCHW"):
super(MaxPoolGrad, self).__init__(ksize, strides, padding, data_format)
def __init__(self, kernel_size=1, strides=1, pad_mode="VALID", data_format="NCHW"):
super(MaxPoolGrad, self).__init__(kernel_size, strides, pad_mode, data_format)
def infer_shape(self, x1_shape, x2_shape, grad_shape):
return x1_shape
@ -933,13 +934,13 @@ class MaxPoolGradGrad(_PoolGrad):
Performs gradients of the MaxPoolGrad operation.
Args:
ksize (Union[int, tuple[int]]): The size of kernel used to take the maximum value,
is an int number that represents height and width are both ksize, or a tuple
kernel_size (Union[int, tuple[int]]): The size of kernel used to take the maximum value,
is an int number that represents height and width are both kernel_size, or a tuple
of two int numbers that represent height and width respectively. Default: 1.
strides (Union[int, tuple[int]]): The distance of kernel moving, an int number that represents
the height and width of movement are both strides, or a tuple of two int numbers that
represent height and width of movement respectively. Default: 1.
padding (str): The optional value for pad mode, is "same" or "valid", not case sensitive.
pad_mode (str): The optional value for pad mode, is "same" or "valid", not case sensitive.
Default: "valid".
- same: Adopts the way of completion. The height and width of the output will be the same as
@ -961,8 +962,8 @@ class MaxPoolGradGrad(_PoolGrad):
"""
@prim_attr_register
def __init__(self, ksize=1, strides=1, padding="VALID"):
super(MaxPoolGradGrad, self).__init__(ksize, strides, padding)
def __init__(self, kernel_size=1, strides=1, pad_mode="VALID"):
super(MaxPoolGradGrad, self).__init__(kernel_size, strides, pad_mode)
def infer_shape(self, x1_shape, x2_shape, grad_shape):
return x1_shape
@ -985,9 +986,9 @@ class MaxPoolGradWithArgmax(_PoolGrad):
"""Computes the gradients of MaxPoolWithArgmax."""
@prim_attr_register
def __init__(self, ksize=1, strides=1, padding="VALID"):
def __init__(self, kernel_size=1, strides=1, pad_mode="VALID"):
self.init_prim_io_names(inputs=['x', 'grad', 'argmax'], outputs=['output'])
super(MaxPoolGradWithArgmax, self).__init__(ksize, strides, padding)
super(MaxPoolGradWithArgmax, self).__init__(kernel_size, strides, pad_mode)
def infer_shape(self, x_shape, grad_shape, argmax_shape):
if not grad_shape:
@ -1003,13 +1004,13 @@ class MaxPoolGradGradWithArgmax(_PoolGrad):
Computes the gradients of MaxPoolGradWithArgmax.
Args:
ksize (Union[int, tuple[int]]): The size of kernel used to take the maximum value,
is an int number that represents height and width are both ksize, or a tuple
kernel_size (Union[int, tuple[int]]): The size of kernel used to take the maximum value,
is an int number that represents height and width are both kernel_size, or a tuple
of two int numbers that represent height and width respectively. Default: 1.
strides (Union[int, tuple[int]]): The distance of kernel moving, an int number that represents
the height and width of movement are both strides, or a tuple of two int numbers that
represent height and width of movement respectively. Default: 1.
padding (str): The optional value for pad mode, is "same" or "valid", not case sensitive.
pad_mode (str): The optional value for pad mode, is "same" or "valid", not case sensitive.
Default: "valid".
- same: Adopts the way of completion. The height and width of the output will be the same as
@ -1031,9 +1032,9 @@ class MaxPoolGradGradWithArgmax(_PoolGrad):
"""
@prim_attr_register
def __init__(self, ksize=1, strides=1, padding="VALID"):
def __init__(self, kernel_size=1, strides=1, pad_mode="VALID"):
self.init_prim_io_names(inputs=['x', 'grad', 'argmax'], outputs=['output'])
super(MaxPoolGradGradWithArgmax, self).__init__(ksize, strides, padding)
super(MaxPoolGradGradWithArgmax, self).__init__(kernel_size, strides, pad_mode)
def infer_shape(self, x_shape, grad_shape, argmax_shape):
if not grad_shape:

View File

@ -1518,23 +1518,23 @@ class _Pool(PrimitiveWithInfer):
Performs max/avg pooling operation.
Args:
ksize (Union[int, tuple[int]]): The size of the kernel, that must be a tuple
kernel_size (Union[int, tuple[int]]): The size of the kernel, that must be a tuple
of two `int` for height and width. Default: 1.
strides (Union[int, tuple[int]]): The stride of the window, that must be
a tuple of two `int` for height and width. Default: 1.
padding (str): The optional value for pad mode, is "same" or "valid", not case sensitive.
pad_mode (str): The optional value for pad mode, is "same" or "valid", not case sensitive.
Default: "valid".
data_format (str): The optional value for data format, is 'NHWC' or 'NCHW'.
Default: "NCHW".
"""
@prim_attr_register
def __init__(self, ksize=1, strides=1, padding="valid", data_format="NCHW"):
def __init__(self, kernel_size=1, strides=1, pad_mode="valid", data_format="NCHW"):
self.init_prim_io_names(inputs=['x'], outputs=['output'])
validator.check_value_type('ksize', ksize, [int, tuple], self.name)
validator.check_value_type('kernel_size', kernel_size, [int, tuple], self.name)
validator.check_value_type('strides', strides, [int, tuple], self.name)
self.padding = validator.check_string(padding.upper(), ['VALID', 'SAME'], 'padding', self.name)
self.add_prim_attr("padding", self.padding)
self.pad_mode = validator.check_string(pad_mode.upper(), ['VALID', 'SAME'], 'pad_mode', self.name)
self.add_prim_attr("pad_mode", self.pad_mode)
self.is_maxpoolwithargmax = (self.name == "MaxPoolWithArgmax")
self.format = validator.check_string(data_format, ['NCHW', 'NHWC'], 'format', self.name)
if context.get_context("device_target") != "GPU" and self.format == "NHWC":
@ -1542,10 +1542,11 @@ class _Pool(PrimitiveWithInfer):
if not self.is_maxpoolwithargmax:
self.add_prim_attr('data_format', self.format)
self.ksize = _check_positive_int_or_tuple("ksize", ksize, self.name, allow_four=False, ret_four=True)
self.kernel_size = _check_positive_int_or_tuple(
"kernel_size", kernel_size, self.name, allow_four=False, ret_four=True)
if self.is_maxpoolwithargmax:
self.ksize = (1, self.ksize[-2], self.ksize[-1], 1)
self.add_prim_attr("ksize", self.ksize)
self.kernel_size = (1, self.kernel_size[-2], self.kernel_size[-1], 1)
self.add_prim_attr("kernel_size", self.kernel_size)
self.strides = _check_positive_int_or_tuple("strides", strides, self.name, allow_four=False, ret_four=True)
if self.is_maxpoolwithargmax:
@ -1557,16 +1558,16 @@ class _Pool(PrimitiveWithInfer):
validator.check_equal_int(len(x_shape_norm), 4, "x rank", self.name)
batch, channel, input_h, input_w = x_shape_norm
if self.is_maxpoolwithargmax:
_, kernel_h, kernel_w, _ = self.ksize
_, kernel_h, kernel_w, _ = self.kernel_size
_, stride_h, stride_w, _ = self.strides
else:
_, _, kernel_h, kernel_w = self.ksize
_, _, kernel_h, kernel_w = self.kernel_size
_, _, stride_h, stride_w = self.strides
if self.padding == "VALID":
if self.pad_mode == "VALID":
out_h = math.ceil((input_h - (kernel_h - 1)) / stride_h)
out_w = math.ceil((input_w - (kernel_w - 1)) / stride_w)
elif self.padding == "SAME":
elif self.pad_mode == "SAME":
out_h = math.ceil(input_h / stride_h)
out_w = math.ceil(input_w / stride_w)
out_shape = [batch, channel, out_h, out_w] if self.format == "NCHW" else [batch, out_h, out_w, channel]
@ -1597,13 +1598,13 @@ class MaxPool(_Pool):
\text{input}(N_i, C_j, s_0 \times h + m, s_1 \times w + n)
Args:
ksize (Union[int, tuple[int]]): The size of kernel used to take the maximum value,
is an int number that represents height and width are both ksize, or a tuple
kernel_size (Union[int, tuple[int]]): The size of kernel used to take the maximum value,
is an int number that represents height and width are both kernel_size, or a tuple
of two int numbers that represent height and width respectively. Default: 1.
strides (Union[int, tuple[int]]): The distance of kernel moving, an int number that represents
the height and width of movement are both strides, or a tuple of two int numbers that
represent height and width of movement respectively. Default: 1.
padding (str): The optional value for pad mode, is "same" or "valid", not case sensitive.
pad_mode (str): The optional value for pad mode, is "same" or "valid", not case sensitive.
Default: "valid".
format (str) : The optional value for data format, is 'NHWC' or 'NCHW'.
Default: 'NCHW'.
@ -1627,7 +1628,7 @@ class MaxPool(_Pool):
Examples:
>>> input_tensor = Tensor(np.arange(1 * 3 * 3 * 4).reshape((1, 3, 3, 4)), mindspore.float32)
>>> maxpool_op = ops.MaxPool(padding="VALID", ksize=2, strides=1)
>>> maxpool_op = ops.MaxPool(pad_mode="VALID", kernel_size=2, strides=1)
>>> output = maxpool_op(input_tensor)
>>> print(output)
[[[[ 5. 6. 7.]
@ -1639,8 +1640,8 @@ class MaxPool(_Pool):
"""
@prim_attr_register
def __init__(self, ksize=1, strides=1, padding="valid", data_format="NCHW"):
super(MaxPool, self).__init__(ksize, strides, padding, data_format)
def __init__(self, kernel_size=1, strides=1, pad_mode="valid", data_format="NCHW"):
super(MaxPool, self).__init__(kernel_size, strides, pad_mode, data_format)
class MaxPoolWithArgmax(_Pool):
@ -1656,13 +1657,13 @@ class MaxPoolWithArgmax(_Pool):
\text{input}(N_i, C_j, s_0 \times h + m, s_1 \times w + n)
Args:
ksize (Union[int, tuple[int]]): The size of kernel used to take the maximum value and arg value,
is an int number that represents height and width are both ksize, or a tuple of
kernel_size (Union[int, tuple[int]]): The size of kernel used to take the maximum value and arg
value, is an int number that represents height and width are both kernel_size, or a tuple of
two int numbers that represent height and width respectively. Default: 1.
strides (Union[int, tuple[int]]): The distance of kernel moving, an int number that represents
the height and width of movement are both strides, or a tuple of two int numbers that
represent height and width of movement respectively. Default: 1.
padding (str): The optional value for pad mode, is "same" or "valid", not case sensitive.
pad_mode (str): The optional value for pad mode, is "same" or "valid", not case sensitive.
Default: "valid".
- same: Adopts the way of completion. The height and width of the output will be the same as
@ -1693,7 +1694,7 @@ class MaxPoolWithArgmax(_Pool):
Examples:
>>> input_tensor = Tensor(np.arange(1 * 3 * 3 * 4).reshape((1, 3, 3, 4)), mindspore.float32)
>>> maxpool_arg_op = ops.MaxPoolWithArgmax(padding="VALID", ksize=2, strides=1)
>>> maxpool_arg_op = ops.MaxPoolWithArgmax(pad_mode="VALID", kernel_size=2, strides=1)
>>> output_tensor, argmax = maxpool_arg_op(input_tensor)
>>> print(output_tensor)
[[[[ 5. 6. 7.]
@ -1704,8 +1705,8 @@ class MaxPoolWithArgmax(_Pool):
[33. 34. 35.]]]]
"""
def __init__(self, ksize=1, strides=1, padding="valid", data_format="NCHW"):
super(MaxPoolWithArgmax, self).__init__(ksize, strides, padding, data_format)
def __init__(self, kernel_size=1, strides=1, pad_mode="valid", data_format="NCHW"):
super(MaxPoolWithArgmax, self).__init__(kernel_size, strides, pad_mode, data_format)
self.is_tbe = context.get_context("device_target") == "Ascend"
self.is_gpu = context.get_context("device_target") == "GPU"
@ -1733,13 +1734,13 @@ class AvgPool(_Pool):
\text{input}(N_i, C_j, s_0 \times h + m, s_1 \times w + n)
Args:
ksize (Union[int, tuple[int]]): The size of kernel used to take the average value,
is an int number that represents height and width are both ksize, or a tuple
kernel_size (Union[int, tuple[int]]): The size of kernel used to take the average value,
is an int number that represents height and width are both kernel_size, or a tuple
of two int numbers that represent height and width respectively. Default: 1.
strides (Union[int, tuple[int]]): The distance of kernel moving, an int number that represents
the height and width of movement are both strides, or a tuple of two int numbers that
represent height and width of movement respectively. Default: 1.
padding (str): The optional value for pad mode, is "same" or "valid", not case sensitive.
pad_mode (str): The optional value for pad mode, is "same" or "valid", not case sensitive.
Default: "valid".
- same: Adopts the way of completion. The height and width of the output will be the same as
@ -1770,7 +1771,7 @@ class AvgPool(_Pool):
>>> class Net(nn.Cell):
... def __init__(self):
... super(Net, self).__init__()
... self.avgpool_op = ops.AvgPool(padding="VALID", ksize=2, strides=1)
... self.avgpool_op = ops.AvgPool(pad_mode="VALID", kernel_size=2, strides=1)
...
... def construct(self, x):
... result = self.avgpool_op(x)
@ -1789,7 +1790,7 @@ class AvgPool(_Pool):
"""
@prim_attr_register
def __init__(self, ksize=1, strides=1, padding="valid", data_format="NCHW"):
def __init__(self, kernel_size=1, strides=1, pad_mode="valid", data_format="NCHW"):
if context.get_context("device_target") == "GPU":
self.target = "GPU"
elif context.get_context("device_target") == "CPU":
@ -1798,7 +1799,7 @@ class AvgPool(_Pool):
self.target = "GE"
else:
self.target = "OTHER"
super(AvgPool, self).__init__(ksize, strides, padding, data_format)
super(AvgPool, self).__init__(kernel_size, strides, pad_mode, data_format)
class Conv2DBackpropInput(PrimitiveWithInfer):

View File

@ -295,7 +295,7 @@ class CenterFaceWithNms(nn.Cell):
self.centerface_network = network
self.config = ConfigCenterface()
# two type of maxpool self.maxpool2d = nn.MaxPool2d(kernel_size=3, stride=1, pad_mode='same')
self.maxpool2d = P.MaxPoolWithArgmax(ksize=3, strides=1, padding='same')
self.maxpool2d = P.MaxPoolWithArgmax(kernel_size=3, strides=1, pad_mode='same')
self.topk = P.TopK(sorted=True)
self.reshape = P.Reshape()
self.print = P.Print()

View File

@ -87,7 +87,7 @@ class FeatPyramidNeck(nn.Cell):
self.interpolate1 = P.ResizeNearestNeighbor((48, 80))
self.interpolate2 = P.ResizeNearestNeighbor((96, 160))
self.interpolate3 = P.ResizeNearestNeighbor((192, 320))
self.maxpool = P.MaxPool(ksize=1, strides=2, padding="same")
self.maxpool = P.MaxPool(kernel_size=1, strides=2, pad_mode="same")
def construct(self, inputs):
x = ()

View File

@ -87,7 +87,7 @@ class ResNetFea(nn.Cell):
self.conv1 = _conv(3, 64, kernel_size=7, stride=2, padding=3, pad_mode='pad')
self.bn1 = _BatchNorm2dInit(64, affine=bn_training, use_batch_statistics=bn_training)
self.relu = P.ReLU()
self.maxpool = P.MaxPool(ksize=3, strides=2, padding="SAME")
self.maxpool = P.MaxPool(kernel_size=3, strides=2, pad_mode="SAME")
self.weights_update = weights_update
if not self.weights_update:

View File

@ -86,7 +86,7 @@ class FeatPyramidNeck(nn.Cell):
self.interpolate2 = P.ResizeBilinear((96, 160))
self.interpolate3 = P.ResizeBilinear((192, 320))
self.cast = P.Cast()
self.maxpool = P.MaxPool(ksize=1, strides=2, padding="same")
self.maxpool = P.MaxPool(kernel_size=1, strides=2, pad_mode="same")
def construct(self, inputs):
x = ()

View File

@ -83,7 +83,7 @@ class ResNetFea(nn.Cell):
self.conv1 = _conv(3, 64, kernel_size=7, stride=2, padding=3, pad_mode='pad')
self.bn1 = _BatchNorm2dInit(64, affine=bn_training, use_batch_statistics=bn_training)
self.relu = P.ReLU()
self.maxpool = P.MaxPool(ksize=3, strides=2, padding="SAME")
self.maxpool = P.MaxPool(kernel_size=3, strides=2, pad_mode="SAME")
self.weights_update = weights_update
if not self.weights_update:

View File

@ -86,7 +86,7 @@ class FeatPyramidNeck(nn.Cell):
self.interpolate2 = P.ResizeBilinear((96, 160))
self.interpolate3 = P.ResizeBilinear((192, 320))
self.cast = P.Cast()
self.maxpool = P.MaxPool(ksize=1, strides=2, padding="same")
self.maxpool = P.MaxPool(kernel_size=1, strides=2, pad_mode="same")
def construct(self, inputs):
x = ()

View File

@ -248,7 +248,7 @@ class ResNet(nn.Cell):
self.conv1 = _conv7x7(3, 64, stride=2, damping=damping, loss_scale=loss_scale, frequency=frequency)
self.bn1 = _bn(64)
self.relu = P.ReLU()
self.maxpool = P.MaxPoolWithArgmax(padding="same", ksize=3, strides=2)
self.maxpool = P.MaxPoolWithArgmax(pad_mode="same", kernel_size=3, strides=2)
self.layer1 = self._make_layer(block,
layer_nums[0],

View File

@ -26,8 +26,8 @@ class Net(nn.Cell):
def __init__(self):
super(Net, self).__init__()
self.maxpool = P.MaxPoolWithArgmax(padding="same",
ksize=3,
self.maxpool = P.MaxPoolWithArgmax(pad_mode="same",
kernel_size=3,
strides=2)
self.x = Parameter(initializer(
'normal', [1, 64, 112, 112]), name='w')

View File

@ -39,8 +39,8 @@ class Net(nn.Cell):
def __init__(self):
super(Net, self).__init__()
self.maxpool = P.MaxPoolWithArgmax(padding="same",
ksize=3,
self.maxpool = P.MaxPoolWithArgmax(pad_mode="same",
kernel_size=3,
strides=2)
@ms_function

View File

@ -24,7 +24,7 @@ from mindspore.ops import operations as P
class Net(nn.Cell):
def __init__(self):
super(Net, self).__init__()
self.maxpool = P.MaxPool(padding="SAME", ksize=3, strides=2)
self.maxpool = P.MaxPool(pad_mode="SAME", kernel_size=3, strides=2)
@ms_function
def construct(self, x):

View File

@ -38,7 +38,7 @@ class Grad(nn.Cell):
class Net(nn.Cell):
def __init__(self):
super(Net, self).__init__()
self.maxpool = P.MaxPool(padding="SAME", ksize=3, strides=2)
self.maxpool = P.MaxPool(pad_mode="SAME", kernel_size=3, strides=2)
@ms_function
def construct(self, x):

View File

@ -29,8 +29,8 @@ context.set_context(mode=context.GRAPH_MODE, device_target='CPU')
class Net_Pool_Grad(nn.Cell):
def __init__(self):
super(Net_Pool_Grad, self).__init__()
self.maxpool_grad_fun = G.MaxPoolGrad(padding="VALID",
ksize=2,
self.maxpool_grad_fun = G.MaxPoolGrad(pad_mode="VALID",
kernel_size=2,
strides=2)
self.x = Parameter(initializer(

View File

@ -33,8 +33,8 @@ class Conv2dBpropInputInplace(nn.Cell):
self.conv2d_2 = P.Conv2DBackpropInput(out_channel=256, kernel_size=1)
self.w2 = Parameter(initializer(w2, w2.shape), name='w2')
self.add = P.TensorAdd()
self.maxpool = P.MaxPool(ksize=3, strides=2, padding='SAME')
self.maxpool_grad = G.MaxPoolGrad(ksize=3, strides=2, padding='SAME')
self.maxpool = P.MaxPool(kernel_size=3, strides=2, pad_mode='SAME')
self.maxpool_grad = G.MaxPoolGrad(kernel_size=3, strides=2, pad_mode='SAME')
self.shape = (32, 64, 56, 56)
def construct(self, x1, x2, x3):

View File

@ -25,8 +25,8 @@ from mindspore.ops.operations import _grad_ops as G
class Net_Pool_Grad(nn.Cell):
def __init__(self):
super(Net_Pool_Grad, self).__init__()
self.maxpool_grad_fun = G.MaxPoolGrad(padding="VALID",
ksize=2,
self.maxpool_grad_fun = G.MaxPoolGrad(pad_mode="VALID",
kernel_size=2,
strides=2)
def construct(self, x, a, d):

View File

@ -25,7 +25,7 @@ from mindspore.ops import composite as C
class MaxPoolWithArgMax_Net(Cell):
def __init__(self, padding, ksize, strides):
super(MaxPoolWithArgMax_Net, self).__init__()
self.maxpool_with_argmax = P.MaxPoolWithArgmax(padding=padding, ksize=ksize, strides=strides)
self.maxpool_with_argmax = P.MaxPoolWithArgmax(pad_mode=padding, kernel_size=ksize, strides=strides)
def construct(self, input_data):
output, argmax = self.maxpool_with_argmax(input_data)

View File

@ -25,7 +25,7 @@ from mindspore.ops.operations import _grad_ops as G
class Net_Pool_Grad(nn.Cell):
def __init__(self):
super(Net_Pool_Grad, self).__init__()
self.maxpool_grad_fun = G.MaxPoolGradWithArgmax(padding="VALID", ksize=2, strides=2)
self.maxpool_grad_fun = G.MaxPoolGradWithArgmax(pad_mode="VALID", kernel_size=2, strides=2)
def construct(self, x, dy, index):
return self.maxpool_grad_fun(x, dy, index)

View File

@ -271,7 +271,7 @@ class ResNet(nn.Cell):
self.bn1 = bn_with_initialize(64)
self.relu = P.ReLU()
self.maxpool = P.MaxPoolWithArgmax(ksize=3, strides=2, padding="SAME")
self.maxpool = P.MaxPoolWithArgmax(kernel_size=3, strides=2, pad_mode="SAME")
self.layer1 = MakeLayer0(block, in_channels=64, out_channels=256, stride=1)
self.layer2 = MakeLayer1(block, in_channels=256, out_channels=512, stride=2)

View File

@ -272,7 +272,7 @@ class ResNet(nn.Cell):
self.bn1 = bn_with_initialize(64)
self.relu = P.ReLU()
self.maxpool = P.MaxPoolWithArgmax(ksize=3, strides=2, padding="SAME")
self.maxpool = P.MaxPoolWithArgmax(kernel_size=3, strides=2, pad_mode="SAME")
self.layer1 = MakeLayer0(block, in_channels=64, out_channels=256, stride=1)
self.layer2 = MakeLayer1(block, in_channels=256, out_channels=512, stride=2)

View File

@ -249,7 +249,7 @@ class ResNet(nn.Cell):
self.bn1 = bn_with_initialize(64)
self.relu = P.ReLU()
self.maxpool = P.MaxPoolWithArgmax(ksize=3, strides=2, padding="SAME")
self.maxpool = P.MaxPoolWithArgmax(kernel_size=3, strides=2, pad_mode="SAME")
self.layer1 = MakeLayer0(block, in_channels=64, out_channels=256, stride=1)
self.layer2 = MakeLayer1(block, in_channels=256, out_channels=512, stride=2)

View File

@ -21,7 +21,7 @@ addn = P.AddN()
add = P.TensorAdd()
sub = P.Sub()
mul = P.Mul()
max_pool = P.MaxPoolWithArgmax(padding="same", ksize=3, strides=2)
max_pool = P.MaxPoolWithArgmax(pad_mode="same", kernel_size=3, strides=2)
make_tuple = Primitive('make_tuple')
four2five = Primitive('Four2Five')
five2four = Primitive('Five2Four')

View File

@ -17,7 +17,7 @@ from mindspore.ops import operations as P
tuple_getitem = Primitive('tuple_getitem')
add = P.TensorAdd()
max_pool = P.MaxPoolWithArgmax(padding="same", ksize=3, strides=2)
max_pool = P.MaxPoolWithArgmax(pad_mode="same", kernel_size=3, strides=2)
make_tuple = Primitive('make_tuple')
transdata = Primitive("TransData")

View File

@ -21,7 +21,7 @@ addn = P.AddN()
add = P.TensorAdd()
sub = P.Sub()
mul = P.Mul()
max_pool = P.MaxPoolWithArgmax(padding="same", ksize=3, strides=2)
max_pool = P.MaxPoolWithArgmax(pad_mode="same", kernel_size=3, strides=2)
make_tuple = Primitive('make_tuple')
cast = Primitive('Cast')

View File

@ -17,7 +17,7 @@ from mindspore.ops import operations as P
tuple_getitem = Primitive('tuple_getitem')
add = P.TensorAdd()
max_pool = P.MaxPoolWithArgmax(padding="same", ksize=3, strides=2)
max_pool = P.MaxPoolWithArgmax(pad_mode="same", kernel_size=3, strides=2)
make_tuple = Primitive('make_tuple')
trans_data = Primitive("TransData")

View File

@ -17,7 +17,7 @@ from mindspore.ops import operations as P
tuple_getitem = Primitive('tuple_getitem')
add = P.TensorAdd()
max_pool = P.MaxPoolWithArgmax(padding="same", ksize=3, strides=2)
max_pool = P.MaxPoolWithArgmax(pad_mode="same", kernel_size=3, strides=2)
make_tuple = Primitive('make_tuple')
four2five = Primitive('Four2Five')
five2four = Primitive('Five2Four')

View File

@ -17,7 +17,7 @@ from mindspore.ops import operations as P
tuple_getitem = Primitive('tuple_getitem')
add = P.TensorAdd()
max_pool = P.MaxPoolWithArgmax(padding="same", ksize=3, strides=2)
max_pool = P.MaxPoolWithArgmax(pad_mode="same", kernel_size=3, strides=2)
make_tuple = Primitive('make_tuple')
transdata = Primitive("TransData")
Transpose = P.Transpose()

View File

@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import mindspore as ms
import mindspore.common.dtype as mstype
from mindspore.ops import Primitive
from mindspore.ops import operations as P
@ -21,12 +21,12 @@ add = P.TensorAdd()
reshape = P.Reshape()
cast = P.Cast()
tuple_getitem = Primitive('tuple_getitem')
max_pool = P.MaxPoolWithArgmax(padding="same", ksize=3, strides=2)
max_pool = P.MaxPoolWithArgmax(pad_mode="same", kernel_size=3, strides=2)
def test_addn_cast(x, y, z):
mysum = addn((x, y, z))
res = cast(mysum, ms.float16)
res = cast(mysum, mstype.float16)
return res

View File

@ -109,7 +109,7 @@ class DefinedNet(nn.Cell):
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=0, weight_init="zeros")
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU()
self.maxpool = P.MaxPoolWithArgmax(padding="same", ksize=2, strides=2)
self.maxpool = P.MaxPoolWithArgmax(pad_mode="same", kernel_size=2, strides=2)
self.flatten = nn.Flatten()
self.fc = nn.Dense(int(56 * 56 * 64), num_classes)

View File

@ -400,7 +400,7 @@ def test_max_pool_with_arg_max():
def __init__(self):
""" ComparisonNet definition """
super(NetMaxPoolWithArgMax, self).__init__()
self.max_pool_with_arg_max = P.MaxPoolWithArgmax(padding="valid", ksize=2, strides=1)
self.max_pool_with_arg_max = P.MaxPoolWithArgmax(pad_mode="valid", kernel_size=2, strides=1)
def construct(self, x):
ret = self.max_pool_with_arg_max(x)
@ -675,15 +675,15 @@ test_cases_for_verify_exception = [
'desc_inputs': [0],
}),
('MaxPoolWithArgmax_ValueError_1', {
'block': (lambda _: P.MaxPoolWithArgmax(padding='sane'), {'exception': ValueError}),
'block': (lambda _: P.MaxPoolWithArgmax(pad_mode='sane'), {'exception': ValueError}),
'desc_inputs': [0],
}),
('MaxPoolWithArgmax_ValueError_2', {
'block': (lambda _: P.MaxPoolWithArgmax(ksize='1'), {'exception': TypeError}),
'block': (lambda _: P.MaxPoolWithArgmax(kernel_size='1'), {'exception': TypeError}),
'desc_inputs': [0],
}),
('MaxPoolWithArgmax_ValueError_3', {
'block': (lambda _: P.MaxPoolWithArgmax(ksize=-2), {'exception': ValueError}),
'block': (lambda _: P.MaxPoolWithArgmax(kernel_size=-2), {'exception': ValueError}),
'desc_inputs': [0],
}),
('MaxPoolWithArgmax_ValueError_4', {

View File

@ -288,7 +288,8 @@ raise_set = [
'skip': ['backward']}),
# kernel size is invalid(very large)
('MaxPoolWithArgmax3', {
'block': (P.MaxPoolWithArgmax(ksize=50), {'exception': ValueError, 'error_keywords': ['MaxPoolWithArgmax']}),
'block': (P.MaxPoolWithArgmax(kernel_size=50),
{'exception': ValueError, 'error_keywords': ['MaxPoolWithArgmax']}),
'desc_inputs': [Tensor(np.ones([1, 1, 32, 32]).astype(np.float32))],
'skip': ['backward']}),
@ -304,7 +305,7 @@ raise_set = [
'skip': ['backward']}),
# rank of x is not 4
('MaxPool2', {
'block': (P.MaxPool(ksize=50, strides=1), {'exception': ValueError, 'error_keywords': ['MaxPool']}),
'block': (P.MaxPool(kernel_size=50, strides=1), {'exception': ValueError, 'error_keywords': ['MaxPool']}),
'desc_inputs': [Tensor(np.ones([1, 1, 32, 32]).astype(np.float32))],
'skip': ['backward']}),
@ -320,7 +321,7 @@ raise_set = [
'skip': ['backward']}),
# rank of x is not 4
('AvgPool2', {
'block': (P.AvgPool(ksize=50, strides=1), {'exception': ValueError, 'error_keywords': ['AvgPool']}),
'block': (P.AvgPool(kernel_size=50, strides=1), {'exception': ValueError, 'error_keywords': ['AvgPool']}),
'desc_inputs': [Tensor(np.ones([1, 1, 32, 32]).astype(np.float32))],
'skip': ['backward']}),

View File

@ -1635,20 +1635,20 @@ test_case_nn_ops = [
'desc_inputs': [[1, 3, 4, 4]],
'desc_bprop': [[1, 3, 4, 4]]}),
('MaxPool', {
'block': P.MaxPool(ksize=(2, 2), strides=(2, 2), padding="VALID"),
'block': P.MaxPool(kernel_size=(2, 2), strides=(2, 2), pad_mode="VALID"),
'desc_inputs': [[100, 3, 28, 28]],
'desc_bprop': [[100, 3, 14, 14]]}),
('MaxPoolGrad', {
'block': G.MaxPoolGrad(ksize=(2, 2), strides=(2, 2), padding="VALID"),
'block': G.MaxPoolGrad(kernel_size=(2, 2), strides=(2, 2), pad_mode="VALID"),
'desc_inputs': [[3, 4, 6, 6], [3, 4, 3, 3], [3, 4, 3, 3]],
'desc_bprop': [[3, 4, 6, 6]],
'skip': ['backward']}),
('AvgPool', {
'block': P.AvgPool(ksize=(2, 2), strides=(2, 2), padding="VALID"),
'block': P.AvgPool(kernel_size=(2, 2), strides=(2, 2), pad_mode="VALID"),
'desc_inputs': [[100, 3, 28, 28]],
'desc_bprop': [[100, 3, 14, 14]]}),
('MaxPoolWithArgmax', {
'block': P.MaxPoolWithArgmax(ksize=2, strides=2),
'block': P.MaxPoolWithArgmax(kernel_size=2, strides=2),
'desc_inputs': [[128, 32, 32, 64]],
'desc_bprop': [[128, 32, 16, 32], ([128, 32, 16, 32], {'dtype': np.int32})]}),
('SoftmaxCrossEntropyWithLogits', {
@ -2218,7 +2218,7 @@ test_case_array_ops = [
'desc_inputs': [Tensor(np.array([1], np.float32)),
Tensor(np.array([1], np.float32)),
Tensor(np.array([1], np.float32))],
'desc_bprop': [[3, ]]}),
'desc_bprop': [[3,]]}),
('Pack_0', {
'block': NetForPackInput(P.Pack()),
'desc_inputs': [[2, 2], [2, 2], [2, 2]],
@ -2640,7 +2640,7 @@ test_case_other_ops = [
Tensor(np.random.rand(1, 64).astype(np.float16)),
Tensor(np.random.rand(1, 64).astype(np.float16)),
Tensor(np.random.rand(96, 256).astype(np.float16)),
Tensor(np.random.rand(256, ).astype(np.float16))],
Tensor(np.random.rand(256,).astype(np.float16))],
'desc_bprop': [Tensor(np.random.rand(1, 64).astype(np.float16)),
Tensor(np.random.rand(1, 64).astype(np.float16)),
Tensor(np.random.rand(1, 64).astype(np.float16)),

View File

@ -156,7 +156,7 @@ def vm_impl_max_pool_grad_with_argmax(self):
dout = dout.asnumpy()
arg_max = argmax.asnumpy()
dx = vm.max_pool_grad_with_argmax(x, dout, arg_max,
self.ksize[1], self.ksize[2], self.strides[1])
self.kernel_size[1], self.kernel_size[2], self.strides[1])
return Tensor(dx)
return vm_impl
@ -168,7 +168,7 @@ def vm_impl_max_pool_with_argmax(self):
def vm_impl(x):
x = x.asnumpy()
out, out_argmax = vm.max_pool_with_argmax(x, self.ksize[1], self.ksize[2], self.strides[1])
out, out_argmax = vm.max_pool_with_argmax(x, self.kernel_size[1], self.kernel_size[2], self.strides[1])
return Tensor(out), Tensor(out_argmax)
return vm_impl
@ -180,7 +180,7 @@ def vm_impl_max_pool(self):
def vm_impl(x):
x = x.asnumpy()
out = vm.max_pooling(x, self.ksize[-2], self.ksize[-1], self.strides[-2])
out = vm.max_pooling(x, self.kernel_size[-2], self.kernel_size[-1], self.strides[-2])
return Tensor(out)
return vm_impl
@ -193,7 +193,7 @@ def vm_impl_max_pool_grad(self):
def vm_impl(x, out, dout):
x = x.asnumpy()
dout = dout.asnumpy()
out = vm.max_pool_grad(x, dout, self.ksize[-2], self.ksize[-1], self.strides[-2])
out = vm.max_pool_grad(x, dout, self.kernel_size[-2], self.kernel_size[-1], self.strides[-2])
return Tensor(out)
return vm_impl
@ -205,7 +205,7 @@ def vm_impl_avg_pool(self):
def vm_impl(x):
x = x.asnumpy()
out = vm.avg_pooling(x, self.ksize[-2], self.ksize[-1], self.strides[-2])
out = vm.avg_pooling(x, self.kernel_size[-2], self.kernel_size[-1], self.strides[-2])
return Tensor(out)
return vm_impl
@ -217,7 +217,7 @@ def vm_impl_avg_pool_grad(self):
def vm_impl(dout, origin_shape):
dout = dout.asnumpy()
out = vm.avg_pool_grad(dout, origin_shape, self.ksize[-2], self.ksize[-1], self.strides[-2])
out = vm.avg_pool_grad(dout, origin_shape, self.kernel_size[-2], self.kernel_size[-1], self.strides[-2])
return Tensor(out)
return vm_impl