llvm-project/llvm/test/CodeGen/X86/GlobalISel/legalize-add-v512.mir

246 lines
11 KiB
Plaintext
Raw Normal View History

# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx -global-isel -run-pass=legalizer %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=AVX1
# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx512f -global-isel -run-pass=legalizer %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=AVX512F
# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx512f,+avx512bw -global-isel -run-pass=legalizer %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=AVX512BW
--- |
define void @test_add_v64i8() {
%ret = add <64 x i8> undef, undef
ret void
}
define void @test_add_v32i16() {
%ret = add <32 x i16> undef, undef
ret void
}
define void @test_add_v16i32() {
%ret = add <16 x i32> undef, undef
ret void
}
define void @test_add_v8i64() {
%ret = add <8 x i64> undef, undef
ret void
}
define <64 x i8> @test_add_v64i8_2(<64 x i8> %arg1, <64 x i8> %arg2) #0 {
%ret = add <64 x i8> %arg1, %arg2
ret <64 x i8> %ret
}
...
---
name: test_add_v64i8
alignment: 4
legalized: false
regBankSelected: false
registers:
- { id: 0, class: _ }
- { id: 1, class: _ }
- { id: 2, class: _ }
body: |
bb.1 (%ir-block.0):
liveins: $zmm0, $zmm1
; ALL-LABEL: name: test_add_v64i8
; ALL: [[DEF:%[0-9]+]]:_(<64 x s8>) = IMPLICIT_DEF
; ALL: [[DEF1:%[0-9]+]]:_(<64 x s8>) = IMPLICIT_DEF
; AVX1: [[UV:%[0-9]+]]:_(<16 x s8>), [[UV1:%[0-9]+]]:_(<16 x s8>), [[UV2:%[0-9]+]]:_(<16 x s8>), [[UV3:%[0-9]+]]:_(<16 x s8>) = G_UNMERGE_VALUES [[DEF]](<64 x s8>)
; AVX1: [[UV4:%[0-9]+]]:_(<16 x s8>), [[UV5:%[0-9]+]]:_(<16 x s8>), [[UV6:%[0-9]+]]:_(<16 x s8>), [[UV7:%[0-9]+]]:_(<16 x s8>) = G_UNMERGE_VALUES [[DEF1]](<64 x s8>)
; AVX1: [[ADD:%[0-9]+]]:_(<16 x s8>) = G_ADD [[UV]], [[UV4]]
; AVX1: [[ADD1:%[0-9]+]]:_(<16 x s8>) = G_ADD [[UV1]], [[UV5]]
; AVX1: [[ADD2:%[0-9]+]]:_(<16 x s8>) = G_ADD [[UV2]], [[UV6]]
; AVX1: [[ADD3:%[0-9]+]]:_(<16 x s8>) = G_ADD [[UV3]], [[UV7]]
; AVX1: [[MV:%[0-9]+]]:_(<64 x s8>) = G_MERGE_VALUES [[ADD]](<16 x s8>), [[ADD1]](<16 x s8>), [[ADD2]](<16 x s8>), [[ADD3]](<16 x s8>)
; AVX1: $zmm0 = COPY [[MV]](<64 x s8>)
; AVX512F: [[UV:%[0-9]+]]:_(<32 x s8>), [[UV1:%[0-9]+]]:_(<32 x s8>) = G_UNMERGE_VALUES [[DEF]](<64 x s8>)
; AVX512F: [[UV2:%[0-9]+]]:_(<32 x s8>), [[UV3:%[0-9]+]]:_(<32 x s8>) = G_UNMERGE_VALUES [[DEF1]](<64 x s8>)
; AVX512F: [[ADD:%[0-9]+]]:_(<32 x s8>) = G_ADD [[UV]], [[UV2]]
; AVX512F: [[ADD1:%[0-9]+]]:_(<32 x s8>) = G_ADD [[UV1]], [[UV3]]
; AVX512F: [[MV:%[0-9]+]]:_(<64 x s8>) = G_MERGE_VALUES [[ADD]](<32 x s8>), [[ADD1]](<32 x s8>)
; AVX512F: $zmm0 = COPY [[MV]](<64 x s8>)
; AVX512BW: [[ADD:%[0-9]+]]:_(<64 x s8>) = G_ADD [[DEF]], [[DEF1]]
; AVX512BW: $zmm0 = COPY [[ADD]](<64 x s8>)
; ALL: RET 0
%0(<64 x s8>) = IMPLICIT_DEF
%1(<64 x s8>) = IMPLICIT_DEF
%2(<64 x s8>) = G_ADD %0, %1
$zmm0 = COPY %2
RET 0
...
---
name: test_add_v32i16
alignment: 4
legalized: false
regBankSelected: false
registers:
- { id: 0, class: _ }
- { id: 1, class: _ }
- { id: 2, class: _ }
body: |
bb.1 (%ir-block.0):
liveins: $zmm0, $zmm1
; ALL-LABEL: name: test_add_v32i16
; ALL: [[DEF:%[0-9]+]]:_(<32 x s16>) = IMPLICIT_DEF
; ALL: [[DEF1:%[0-9]+]]:_(<32 x s16>) = IMPLICIT_DEF
; AVX1: [[UV:%[0-9]+]]:_(<8 x s16>), [[UV1:%[0-9]+]]:_(<8 x s16>), [[UV2:%[0-9]+]]:_(<8 x s16>), [[UV3:%[0-9]+]]:_(<8 x s16>) = G_UNMERGE_VALUES [[DEF]](<32 x s16>)
; AVX1: [[UV4:%[0-9]+]]:_(<8 x s16>), [[UV5:%[0-9]+]]:_(<8 x s16>), [[UV6:%[0-9]+]]:_(<8 x s16>), [[UV7:%[0-9]+]]:_(<8 x s16>) = G_UNMERGE_VALUES [[DEF1]](<32 x s16>)
; AVX1: [[ADD:%[0-9]+]]:_(<8 x s16>) = G_ADD [[UV]], [[UV4]]
; AVX1: [[ADD1:%[0-9]+]]:_(<8 x s16>) = G_ADD [[UV1]], [[UV5]]
; AVX1: [[ADD2:%[0-9]+]]:_(<8 x s16>) = G_ADD [[UV2]], [[UV6]]
; AVX1: [[ADD3:%[0-9]+]]:_(<8 x s16>) = G_ADD [[UV3]], [[UV7]]
; AVX1: [[MV:%[0-9]+]]:_(<32 x s16>) = G_MERGE_VALUES [[ADD]](<8 x s16>), [[ADD1]](<8 x s16>), [[ADD2]](<8 x s16>), [[ADD3]](<8 x s16>)
; AVX1: $zmm0 = COPY [[MV]](<32 x s16>)
; AVX512F: [[UV:%[0-9]+]]:_(<16 x s16>), [[UV1:%[0-9]+]]:_(<16 x s16>) = G_UNMERGE_VALUES [[DEF]](<32 x s16>)
; AVX512F: [[UV2:%[0-9]+]]:_(<16 x s16>), [[UV3:%[0-9]+]]:_(<16 x s16>) = G_UNMERGE_VALUES [[DEF1]](<32 x s16>)
; AVX512F: [[ADD:%[0-9]+]]:_(<16 x s16>) = G_ADD [[UV]], [[UV2]]
; AVX512F: [[ADD1:%[0-9]+]]:_(<16 x s16>) = G_ADD [[UV1]], [[UV3]]
; AVX512F: [[MV:%[0-9]+]]:_(<32 x s16>) = G_MERGE_VALUES [[ADD]](<16 x s16>), [[ADD1]](<16 x s16>)
; AVX512F: $zmm0 = COPY [[MV]](<32 x s16>)
; AVX512BW: [[ADD:%[0-9]+]]:_(<32 x s16>) = G_ADD [[DEF]], [[DEF1]]
; AVX512BW: $zmm0 = COPY [[ADD]](<32 x s16>)
; ALL: RET 0
%0(<32 x s16>) = IMPLICIT_DEF
%1(<32 x s16>) = IMPLICIT_DEF
%2(<32 x s16>) = G_ADD %0, %1
$zmm0 = COPY %2
RET 0
...
---
name: test_add_v16i32
alignment: 4
legalized: false
regBankSelected: false
registers:
- { id: 0, class: _ }
- { id: 1, class: _ }
- { id: 2, class: _ }
body: |
bb.1 (%ir-block.0):
liveins: $zmm0, $zmm1
; ALL-LABEL: name: test_add_v16i32
; ALL: [[DEF:%[0-9]+]]:_(<16 x s32>) = IMPLICIT_DEF
; ALL: [[DEF1:%[0-9]+]]:_(<16 x s32>) = IMPLICIT_DEF
; AVX1: [[UV:%[0-9]+]]:_(<4 x s32>), [[UV1:%[0-9]+]]:_(<4 x s32>), [[UV2:%[0-9]+]]:_(<4 x s32>), [[UV3:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[DEF]](<16 x s32>)
; AVX1: [[UV4:%[0-9]+]]:_(<4 x s32>), [[UV5:%[0-9]+]]:_(<4 x s32>), [[UV6:%[0-9]+]]:_(<4 x s32>), [[UV7:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[DEF1]](<16 x s32>)
; AVX1: [[ADD:%[0-9]+]]:_(<4 x s32>) = G_ADD [[UV]], [[UV4]]
; AVX1: [[ADD1:%[0-9]+]]:_(<4 x s32>) = G_ADD [[UV1]], [[UV5]]
; AVX1: [[ADD2:%[0-9]+]]:_(<4 x s32>) = G_ADD [[UV2]], [[UV6]]
; AVX1: [[ADD3:%[0-9]+]]:_(<4 x s32>) = G_ADD [[UV3]], [[UV7]]
; AVX1: [[MV:%[0-9]+]]:_(<16 x s32>) = G_MERGE_VALUES [[ADD]](<4 x s32>), [[ADD1]](<4 x s32>), [[ADD2]](<4 x s32>), [[ADD3]](<4 x s32>)
; AVX1: $zmm0 = COPY [[MV]](<16 x s32>)
; AVX512F: [[ADD:%[0-9]+]]:_(<16 x s32>) = G_ADD [[DEF]], [[DEF1]]
; AVX512F: $zmm0 = COPY [[ADD]](<16 x s32>)
; AVX512BW: [[ADD:%[0-9]+]]:_(<16 x s32>) = G_ADD [[DEF]], [[DEF1]]
; AVX512BW: $zmm0 = COPY [[ADD]](<16 x s32>)
; ALL: RET 0
%0(<16 x s32>) = IMPLICIT_DEF
%1(<16 x s32>) = IMPLICIT_DEF
%2(<16 x s32>) = G_ADD %0, %1
$zmm0 = COPY %2
RET 0
...
---
name: test_add_v8i64
alignment: 4
legalized: false
regBankSelected: false
registers:
- { id: 0, class: _ }
- { id: 1, class: _ }
- { id: 2, class: _ }
body: |
bb.1 (%ir-block.0):
liveins: $zmm0, $zmm1
; ALL-LABEL: name: test_add_v8i64
; ALL: [[DEF:%[0-9]+]]:_(<8 x s64>) = IMPLICIT_DEF
; ALL: [[DEF1:%[0-9]+]]:_(<8 x s64>) = IMPLICIT_DEF
; AVX1: [[UV:%[0-9]+]]:_(<2 x s64>), [[UV1:%[0-9]+]]:_(<2 x s64>), [[UV2:%[0-9]+]]:_(<2 x s64>), [[UV3:%[0-9]+]]:_(<2 x s64>) = G_UNMERGE_VALUES [[DEF]](<8 x s64>)
; AVX1: [[UV4:%[0-9]+]]:_(<2 x s64>), [[UV5:%[0-9]+]]:_(<2 x s64>), [[UV6:%[0-9]+]]:_(<2 x s64>), [[UV7:%[0-9]+]]:_(<2 x s64>) = G_UNMERGE_VALUES [[DEF1]](<8 x s64>)
; AVX1: [[ADD:%[0-9]+]]:_(<2 x s64>) = G_ADD [[UV]], [[UV4]]
; AVX1: [[ADD1:%[0-9]+]]:_(<2 x s64>) = G_ADD [[UV1]], [[UV5]]
; AVX1: [[ADD2:%[0-9]+]]:_(<2 x s64>) = G_ADD [[UV2]], [[UV6]]
; AVX1: [[ADD3:%[0-9]+]]:_(<2 x s64>) = G_ADD [[UV3]], [[UV7]]
; AVX1: [[MV:%[0-9]+]]:_(<8 x s64>) = G_MERGE_VALUES [[ADD]](<2 x s64>), [[ADD1]](<2 x s64>), [[ADD2]](<2 x s64>), [[ADD3]](<2 x s64>)
; AVX1: $zmm0 = COPY [[MV]](<8 x s64>)
; AVX512F: [[ADD:%[0-9]+]]:_(<8 x s64>) = G_ADD [[DEF]], [[DEF1]]
; AVX512F: $zmm0 = COPY [[ADD]](<8 x s64>)
; AVX512BW: [[ADD:%[0-9]+]]:_(<8 x s64>) = G_ADD [[DEF]], [[DEF1]]
; AVX512BW: $zmm0 = COPY [[ADD]](<8 x s64>)
; ALL: RET 0
%0(<8 x s64>) = IMPLICIT_DEF
%1(<8 x s64>) = IMPLICIT_DEF
%2(<8 x s64>) = G_ADD %0, %1
$zmm0 = COPY %2
RET 0
...
---
name: test_add_v64i8_2
alignment: 4
legalized: false
regBankSelected: false
registers:
- { id: 0, class: _ }
- { id: 1, class: _ }
- { id: 2, class: _ }
- { id: 3, class: _ }
- { id: 4, class: _ }
- { id: 5, class: _ }
- { id: 6, class: _ }
- { id: 7, class: _ }
- { id: 8, class: _ }
#
#
body: |
bb.1 (%ir-block.0):
liveins: $ymm0, $ymm1, $ymm2, $ymm3
; ALL-LABEL: name: test_add_v64i8_2
; ALL: [[COPY:%[0-9]+]]:_(<32 x s8>) = COPY $ymm0
; ALL: [[COPY1:%[0-9]+]]:_(<32 x s8>) = COPY $ymm1
; ALL: [[COPY2:%[0-9]+]]:_(<32 x s8>) = COPY $ymm2
; ALL: [[COPY3:%[0-9]+]]:_(<32 x s8>) = COPY $ymm3
; AVX1: [[UV:%[0-9]+]]:_(<16 x s8>), [[UV1:%[0-9]+]]:_(<16 x s8>) = G_UNMERGE_VALUES [[COPY]](<32 x s8>)
; AVX1: [[UV2:%[0-9]+]]:_(<16 x s8>), [[UV3:%[0-9]+]]:_(<16 x s8>) = G_UNMERGE_VALUES [[COPY1]](<32 x s8>)
; AVX1: [[UV4:%[0-9]+]]:_(<16 x s8>), [[UV5:%[0-9]+]]:_(<16 x s8>) = G_UNMERGE_VALUES [[COPY2]](<32 x s8>)
; AVX1: [[UV6:%[0-9]+]]:_(<16 x s8>), [[UV7:%[0-9]+]]:_(<16 x s8>) = G_UNMERGE_VALUES [[COPY3]](<32 x s8>)
; AVX1: [[ADD:%[0-9]+]]:_(<16 x s8>) = G_ADD [[UV]], [[UV4]]
; AVX1: [[ADD1:%[0-9]+]]:_(<16 x s8>) = G_ADD [[UV1]], [[UV5]]
; AVX1: [[ADD2:%[0-9]+]]:_(<16 x s8>) = G_ADD [[UV2]], [[UV6]]
; AVX1: [[ADD3:%[0-9]+]]:_(<16 x s8>) = G_ADD [[UV3]], [[UV7]]
; AVX1: [[MV:%[0-9]+]]:_(<32 x s8>) = G_MERGE_VALUES [[ADD]](<16 x s8>), [[ADD1]](<16 x s8>)
; AVX1: [[MV1:%[0-9]+]]:_(<32 x s8>) = G_MERGE_VALUES [[ADD2]](<16 x s8>), [[ADD3]](<16 x s8>)
; AVX1: $ymm0 = COPY [[MV]](<32 x s8>)
; AVX1: $ymm1 = COPY [[MV1]](<32 x s8>)
; AVX512F: [[ADD:%[0-9]+]]:_(<32 x s8>) = G_ADD [[COPY]], [[COPY2]]
; AVX512F: [[ADD1:%[0-9]+]]:_(<32 x s8>) = G_ADD [[COPY1]], [[COPY3]]
; AVX512F: $ymm0 = COPY [[ADD]](<32 x s8>)
; AVX512F: $ymm1 = COPY [[ADD1]](<32 x s8>)
; AVX512BW: [[MV:%[0-9]+]]:_(<64 x s8>) = G_MERGE_VALUES [[COPY]](<32 x s8>), [[COPY1]](<32 x s8>)
; AVX512BW: [[MV1:%[0-9]+]]:_(<64 x s8>) = G_MERGE_VALUES [[COPY2]](<32 x s8>), [[COPY3]](<32 x s8>)
; AVX512BW: [[ADD:%[0-9]+]]:_(<64 x s8>) = G_ADD [[MV]], [[MV1]]
; AVX512BW: [[UV:%[0-9]+]]:_(<32 x s8>), [[UV1:%[0-9]+]]:_(<32 x s8>) = G_UNMERGE_VALUES [[ADD]](<64 x s8>)
; AVX512BW: $ymm0 = COPY [[UV]](<32 x s8>)
; AVX512BW: $ymm1 = COPY [[UV1]](<32 x s8>)
; ALL: RET 0, implicit $ymm0, implicit $ymm1
%2(<32 x s8>) = COPY $ymm0
%3(<32 x s8>) = COPY $ymm1
%4(<32 x s8>) = COPY $ymm2
%5(<32 x s8>) = COPY $ymm3
%0(<64 x s8>) = G_MERGE_VALUES %2(<32 x s8>), %3(<32 x s8>)
%1(<64 x s8>) = G_MERGE_VALUES %4(<32 x s8>), %5(<32 x s8>)
%6(<64 x s8>) = G_ADD %0, %1
%7(<32 x s8>), %8(<32 x s8>) = G_UNMERGE_VALUES %6(<64 x s8>)
$ymm0 = COPY %7(<32 x s8>)
$ymm1 = COPY %8(<32 x s8>)
RET 0, implicit $ymm0, implicit $ymm1
...