!38265 [feat] [assistant] [I4XJI7] add array operator MaskedFill
Merge pull request !38265 from Seeker98/op_maskedfill_0711
This commit is contained in:
commit
21ce969674
|
@ -6,7 +6,8 @@ mindspore.ops.masked_fill
|
|||
将掩码位置为True的位置填充指定的值。`input_x` 和 `mask` 的shape需相同或可广播。
|
||||
|
||||
参数:
|
||||
- **input_x** (Tensor) - 输入Tensor,其数据类型为float16、float32、int8、或int32。
|
||||
- **input_x** (Tensor) - 输入Tensor,其数据类型为bool, uint8, int8, int16, int32,
|
||||
int64, float16, float32, float64, complex64或complex128。
|
||||
- **mask** (Tensor[bool]) - 输入的掩码,其数据类型为bool。
|
||||
- **value** (Union[float, Tensor]) - 用来填充的值,其数据类型与 `input_x` 相同。
|
||||
|
||||
|
@ -17,7 +18,8 @@ mindspore.ops.masked_fill
|
|||
- **TypeError** - `mask` 的数据类型不是bool。
|
||||
- **TypeError** - `input_x` 或 `mask` 不是Tensor。
|
||||
- **ValueError** - `input_x` 和 `mask` 的shape不可广播。
|
||||
- **TypeError** - `input_x` 或 `value` 的数据类型不是float16、float32、int8、或int32。
|
||||
- **TypeError** - `input_x` 或 `value` 的数据类型不是bool, uint8, int8, int16, int32,
|
||||
int64, float16, float32, float64, complex64或complex128。
|
||||
- **TypeError** - `value` 的数据类型与 `input_x` 不同。
|
||||
- **TypeError** - `value` 既不是float也不是Tensor。
|
||||
|
||||
|
|
|
@ -78,7 +78,7 @@ TypePtr MaskedFillInferType(const PrimitivePtr &prim, const std::vector<Abstract
|
|||
MS_EXCEPTION_IF_NULL(context);
|
||||
bool is_ascend = (context->get_param<std::string>(MS_CTX_DEVICE_TARGET) == kAscendDevice);
|
||||
if (is_ascend) {
|
||||
valid_types = {kFloat16, kFloat32, kInt8, kInt32};
|
||||
valid_types = {kBool, kUInt8, kInt8, kInt16, kInt32, kInt64, kFloat16, kFloat32, kFloat64, kComplex64, kComplex128};
|
||||
} else {
|
||||
valid_types = {kBool, kInt8, kInt16, kInt32, kInt64, kUInt8, kUInt16, kUInt32, kUInt64,
|
||||
kFloat16, kFloat32, kFloat64, kInt, kUInt, kFloat, kComplex64, kComplex128};
|
||||
|
@ -87,10 +87,13 @@ TypePtr MaskedFillInferType(const PrimitivePtr &prim, const std::vector<Abstract
|
|||
std::map<std::string, TypePtr> types;
|
||||
(void)types.emplace("input", input_args[kInputIndex0]->BuildType());
|
||||
(void)types.emplace("value", input_args[kInputIndex2]->BuildType());
|
||||
return CheckAndConvertUtils::CheckTensorTypeSame(types, valid_types, op_name);
|
||||
(void)CheckAndConvertUtils::CheckTensorTypeSame(types, valid_types, op_name);
|
||||
return types["input"];
|
||||
} else {
|
||||
(void)CheckAndConvertUtils::CheckSubClass("value", input_args[kInputIndex2]->BuildType(), {kFloat}, op_name);
|
||||
return CheckAndConvertUtils::CheckTensorTypeValid("input", input_args[0]->BuildType(), valid_types, op_name);
|
||||
auto input_type = input_args[kInputIndex0]->BuildType();
|
||||
(void)CheckAndConvertUtils::CheckTensorTypeValid("input", input_type, valid_types, op_name);
|
||||
return input_type;
|
||||
}
|
||||
}
|
||||
} // namespace
|
||||
|
@ -98,8 +101,12 @@ TypePtr MaskedFillInferType(const PrimitivePtr &prim, const std::vector<Abstract
|
|||
MIND_API_OPERATOR_IMPL(MaskedFill, BaseOperator);
|
||||
AbstractBasePtr MaskedFillInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive,
|
||||
const std::vector<AbstractBasePtr> &input_args) {
|
||||
return std::make_shared<abstract::AbstractTensor>(MaskedFillInferType(primitive, input_args),
|
||||
MaskedFillInferShape(primitive, input_args)->shape());
|
||||
MS_EXCEPTION_IF_NULL(primitive);
|
||||
const int64_t kInputNum = 3;
|
||||
CheckAndConvertUtils::CheckInputArgs(input_args, kEqual, kInputNum, primitive->name());
|
||||
auto infer_type = MaskedFillInferType(primitive, input_args);
|
||||
auto infer_shape = MaskedFillInferShape(primitive, input_args);
|
||||
return abstract::MakeAbstract(infer_shape, infer_type);
|
||||
}
|
||||
REGISTER_PRIMITIVE_EVAL_IMPL(MaskedFill, prim::kPrimMaskedFill, MaskedFillInfer, nullptr, true);
|
||||
} // namespace ops
|
||||
|
|
|
@ -0,0 +1,42 @@
|
|||
# Copyright 2022 Huawei Technologies Co., Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ============================================================================
|
||||
|
||||
"""MaskedFill op"""
|
||||
from mindspore.ops.op_info_register import op_info_register, AiCPURegOp, DataType
|
||||
|
||||
masked_fill_op_info = AiCPURegOp("MaskedFill") \
|
||||
.fusion_type("OPAQUE") \
|
||||
.input(0, "x", "required") \
|
||||
.input(1, "mask", "required") \
|
||||
.input(2, "value", "required") \
|
||||
.output(0, "output", "required") \
|
||||
.dtype_format(DataType.BOOL_Default, DataType.BOOL_Default, DataType.BOOL_Default, DataType.BOOL_Default) \
|
||||
.dtype_format(DataType.U8_Default, DataType.BOOL_Default, DataType.U8_Default, DataType.U8_Default) \
|
||||
.dtype_format(DataType.I8_Default, DataType.BOOL_Default, DataType.I8_Default, DataType.I8_Default) \
|
||||
.dtype_format(DataType.I16_Default, DataType.BOOL_Default, DataType.I16_Default, DataType.I16_Default) \
|
||||
.dtype_format(DataType.I32_Default, DataType.BOOL_Default, DataType.I32_Default, DataType.I32_Default) \
|
||||
.dtype_format(DataType.I64_Default, DataType.BOOL_Default, DataType.I64_Default, DataType.I64_Default) \
|
||||
.dtype_format(DataType.F16_Default, DataType.BOOL_Default, DataType.F16_Default, DataType.F16_Default) \
|
||||
.dtype_format(DataType.F32_Default, DataType.BOOL_Default, DataType.F32_Default, DataType.F32_Default) \
|
||||
.dtype_format(DataType.F64_Default, DataType.BOOL_Default, DataType.F64_Default, DataType.F64_Default) \
|
||||
.dtype_format(DataType.C64_Default, DataType.BOOL_Default, DataType.C64_Default, DataType.C64_Default) \
|
||||
.dtype_format(DataType.C128_Default, DataType.BOOL_Default, DataType.C128_Default, DataType.C128_Default) \
|
||||
.get_op_info()
|
||||
|
||||
|
||||
@op_info_register(masked_fill_op_info)
|
||||
def _masked_fill_aicpu():
|
||||
"""MaskedFill AiCPU register"""
|
||||
return
|
|
@ -4081,7 +4081,8 @@ def masked_fill(input_x, mask, value):
|
|||
The shapes of `input_x` and `mask` need to be the same or broadcastable.
|
||||
|
||||
Args:
|
||||
input_x (Tensor): The source Tensor whose data type is one of float16, float32, int8, int32.
|
||||
input_x (Tensor): The source Tensor whose data type is one of bool, uint8, int8, int16, int32,
|
||||
int64, float16, float32, float64, complex64, complex128.
|
||||
mask (Tensor[bool]): The boolean mask.
|
||||
value (Union[float, Tensor]): The value to fill in with, which dtype is the same as `input_x`.
|
||||
|
||||
|
@ -4092,7 +4093,8 @@ def masked_fill(input_x, mask, value):
|
|||
TypeError: If dtype of `mask` is not bool.
|
||||
TypeError: If `input_x` or `mask` is not a Tensor.
|
||||
ValueError: If the shapes of `input_x` and `mask` could not be broadcast.
|
||||
TypeError: If dtype of `input_x` or `value` is not one of float16, float32, int8, int32.
|
||||
TypeError: If dtype of `input_x` or `value` is not one of bool, uint8, int8, int16, int32,
|
||||
int64, float16, float32, float64, complex64, complex128.
|
||||
TypeError: If dtype of `value` is different from that of `input_x`.
|
||||
TypeError: If `value` is neither float number nor Tensor.
|
||||
|
||||
|
@ -4100,9 +4102,9 @@ def masked_fill(input_x, mask, value):
|
|||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> input = Tensor(np.array([1., 2., 3., 4.]), mindspore.float32)
|
||||
>>> input_x = Tensor(np.array([1., 2., 3., 4.]), mindspore.float32)
|
||||
>>> mask = Tensor(np.array([True, True, False, True]), mindspore.bool_)
|
||||
>>> output = ops.masked_fill(input, mask, 0.5)
|
||||
>>> output = ops.masked_fill(input_x, mask, 0.5)
|
||||
>>> print(output)
|
||||
[0.5 0.5 3. 0.5]
|
||||
"""
|
||||
|
|
|
@ -386,6 +386,16 @@ class NetForFlattenConcat(Cell):
|
|||
return self.flatten_concat([x1, x2, x3])
|
||||
|
||||
|
||||
class MaskedFillFunc(Cell):
|
||||
def __init__(self):
|
||||
super(MaskedFillFunc, self).__init__()
|
||||
self.maskedfill_ = ops.function.masked_fill
|
||||
|
||||
def construct(self, x, mask, value):
|
||||
y = self.maskedfill_(x, mask, value)
|
||||
return y
|
||||
|
||||
|
||||
test_case_array_ops = [
|
||||
('CustNet1', {
|
||||
'block': CustNet1(),
|
||||
|
@ -448,6 +458,12 @@ test_case_array_ops = [
|
|||
'desc_inputs': [Tensor(np.array([1], np.float32)),
|
||||
Tensor(np.array([2], np.float32)),
|
||||
Tensor(np.array([3], np.float64))]}),
|
||||
('MaskedFill', {
|
||||
'block': MaskedFillFunc(),
|
||||
'desc_inputs': [Tensor(np.array([[3.0, 2.0, 1.0]]), mstype.float32),
|
||||
Tensor(np.array([[True, True, False]]), mstype.bool_),
|
||||
Tensor(5.0, mstype.float32)],
|
||||
'desc_bprop': [Tensor(np.array([[3.0, 2.0, 1.0]]), mstype.float32)]}),
|
||||
('TensorShapeNet', {'block': TensorShapeNet(), 'desc_inputs': [Tensor(np.array([1, 2, 3, 2]), ms.int32)]})
|
||||
]
|
||||
|
||||
|
|
Loading…
Reference in New Issue