Add float16-in-float32-out support for BatchtMatMul and MatMul

This commit is contained in:
lilinjie 2022-07-28 15:09:17 +08:00
parent 4e3c39b572
commit 126c68314e
2 changed files with 3 additions and 0 deletions

View File

@ -31,6 +31,7 @@ batch_matmul_op_info = TBERegOp("BatchMatMul") \
.input(2, "bias", False, "optional", "all") \
.output(0, "y", False, "required", "all") \
.dtype_format(DataType.F16_FracNZ, DataType.F16_FracNZ, DataType.F16_Default, DataType.F16_FracNZ) \
.dtype_format(DataType.F16_FracNZ, DataType.F16_FracNZ, DataType.F32_Default, DataType.F32_FracNZ) \
.get_op_info()

View File

@ -36,6 +36,8 @@ matmul_op_info = TBERegOp("MatMul") \
DataType.F16_FracNZ) \
.dtype_format(DataType.F16_FracNZ, DataType.F16_FracNZ, DataType.F32_Default, DataType.I8_Default,
DataType.F16_FracNZ) \
.dtype_format(DataType.F16_FracNZ, DataType.F16_FracNZ, DataType.F32_Default, DataType.I8_Default,
DataType.F32_FracNZ) \
.get_op_info()