[X86] Add support for printing shuffle comments for VALIGN instructions.

llvm-svn: 284915
This commit is contained in:
Craig Topper 2016-10-22 06:51:56 +00:00
parent 7b2b8db438
commit b084c90a18
5 changed files with 68 additions and 3 deletions

View File

@ -255,6 +255,10 @@ static std::string getMaskName(const MCInst *MI, const char *DestName,
CASE_MASKZ_UNPCK(UNPCKLPS, r)
CASE_MASKZ_SHUF(PALIGNR, r)
CASE_MASKZ_SHUF(PALIGNR, m)
CASE_MASKZ_SHUF(ALIGNQ, r)
CASE_MASKZ_SHUF(ALIGNQ, m)
CASE_MASKZ_SHUF(ALIGND, r)
CASE_MASKZ_SHUF(ALIGND, m)
CASE_MASKZ_SHUF(SHUFPD, m)
CASE_MASKZ_SHUF(SHUFPD, r)
CASE_MASKZ_SHUF(SHUFPS, m)
@ -340,6 +344,10 @@ static std::string getMaskName(const MCInst *MI, const char *DestName,
CASE_MASK_UNPCK(UNPCKLPS, r)
CASE_MASK_SHUF(PALIGNR, r)
CASE_MASK_SHUF(PALIGNR, m)
CASE_MASK_SHUF(ALIGNQ, r)
CASE_MASK_SHUF(ALIGNQ, m)
CASE_MASK_SHUF(ALIGND, r)
CASE_MASK_SHUF(ALIGND, m)
CASE_MASK_SHUF(SHUFPD, m)
CASE_MASK_SHUF(SHUFPD, r)
CASE_MASK_SHUF(SHUFPS, m)
@ -620,6 +628,42 @@ bool llvm::EmitAnyX86InstComments(const MCInst *MI, raw_ostream &OS,
ShuffleMask);
break;
CASE_AVX512_INS_COMMON(ALIGNQ, Z, rri)
CASE_AVX512_INS_COMMON(ALIGNQ, Z256, rri)
CASE_AVX512_INS_COMMON(ALIGNQ, Z128, rri)
Src1Name = getRegName(MI->getOperand(NumOperands - 2).getReg());
RegForm = true;
LLVM_FALLTHROUGH;
CASE_AVX512_INS_COMMON(ALIGNQ, Z, rmi)
CASE_AVX512_INS_COMMON(ALIGNQ, Z256, rmi)
CASE_AVX512_INS_COMMON(ALIGNQ, Z128, rmi)
Src2Name = getRegName(MI->getOperand(NumOperands-(RegForm?3:7)).getReg());
DestName = getRegName(MI->getOperand(0).getReg());
if (MI->getOperand(NumOperands - 1).isImm())
DecodeVALIGNMask(getRegOperandVectorVT(MI, MVT::i64, 0),
MI->getOperand(NumOperands - 1).getImm(),
ShuffleMask);
break;
CASE_AVX512_INS_COMMON(ALIGND, Z, rri)
CASE_AVX512_INS_COMMON(ALIGND, Z256, rri)
CASE_AVX512_INS_COMMON(ALIGND, Z128, rri)
Src1Name = getRegName(MI->getOperand(NumOperands - 2).getReg());
RegForm = true;
LLVM_FALLTHROUGH;
CASE_AVX512_INS_COMMON(ALIGND, Z, rmi)
CASE_AVX512_INS_COMMON(ALIGND, Z256, rmi)
CASE_AVX512_INS_COMMON(ALIGND, Z128, rmi)
Src2Name = getRegName(MI->getOperand(NumOperands-(RegForm?3:7)).getReg());
DestName = getRegName(MI->getOperand(0).getReg());
if (MI->getOperand(NumOperands - 1).isImm())
DecodeVALIGNMask(getRegOperandVectorVT(MI, MVT::i32, 0),
MI->getOperand(NumOperands - 1).getImm(),
ShuffleMask);
break;
CASE_SHUF(PSHUFD, ri)
Src1Name = getRegName(MI->getOperand(NumOperands - 2).getReg());
LLVM_FALLTHROUGH;

View File

@ -151,6 +151,16 @@ void DecodePALIGNRMask(MVT VT, unsigned Imm,
}
}
void DecodeVALIGNMask(MVT VT, unsigned Imm,
SmallVectorImpl<int> &ShuffleMask) {
int NumElts = VT.getVectorNumElements();
// Not all bits of the immediate are used so mask it.
assert(isPowerOf2_32(NumElts) && "NumElts should be power of 2");
Imm = Imm & (NumElts - 1);
for (int i = 0; i != NumElts; ++i)
ShuffleMask.push_back(i + Imm);
}
/// DecodePSHUFMask - This decodes the shuffle masks for pshufw, pshufd, and vpermilp*.
/// VT indicates the type of the vector allowing it to handle different
/// datatypes and vector widths.

View File

@ -55,6 +55,8 @@ void DecodePSRLDQMask(MVT VT, unsigned Imm, SmallVectorImpl<int> &ShuffleMask);
void DecodePALIGNRMask(MVT VT, unsigned Imm, SmallVectorImpl<int> &ShuffleMask);
void DecodeVALIGNMask(MVT VT, unsigned Imm, SmallVectorImpl<int> &ShuffleMask);
/// Decodes the shuffle masks for pshufd/pshufw/vpermilpd/vpermilps.
/// VT indicates the type of the vector allowing it to handle different
/// datatypes and vector widths.

View File

@ -846,7 +846,7 @@ declare i16 @llvm.x86.avx512.ptestm.d.512(<16 x i32>, <16 x i32>, i16)
define <8 x i64> @test_valign_q(<8 x i64> %a, <8 x i64> %b) {
; CHECK-LABEL: test_valign_q:
; CHECK: ## BB#0:
; CHECK-NEXT: valignq $2, %zmm1, %zmm0, %zmm0
; CHECK-NEXT: valignq {{.*#+}} zmm0 = zmm1[2,3,4,5,6,7],zmm0[0,1]
; CHECK-NEXT: retq
%res = call <8 x i64> @llvm.x86.avx512.mask.valign.q.512(<8 x i64> %a, <8 x i64> %b, i32 2, <8 x i64> zeroinitializer, i8 -1)
ret <8 x i64> %res
@ -856,7 +856,7 @@ define <8 x i64> @test_mask_valign_q(<8 x i64> %a, <8 x i64> %b, <8 x i64> %src,
; CHECK-LABEL: test_mask_valign_q:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: valignq $2, %zmm1, %zmm0, %zmm2 {%k1}
; CHECK-NEXT: valignq {{.*#+}} zmm2 {%k1} = zmm1[2,3,4,5,6,7],zmm0[0,1]
; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0
; CHECK-NEXT: retq
%res = call <8 x i64> @llvm.x86.avx512.mask.valign.q.512(<8 x i64> %a, <8 x i64> %b, i32 2, <8 x i64> %src, i8 %mask)
@ -869,7 +869,7 @@ define <16 x i32> @test_maskz_valign_d(<16 x i32> %a, <16 x i32> %b, i16 %mask)
; CHECK-LABEL: test_maskz_valign_d:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: valignd $5, %zmm1, %zmm0, %zmm0 {%k1} {z}
; CHECK-NEXT: valignd {{.*#+}} zmm0 {%k1} {z} = zmm1[5,6,7,8,9,10,11,12,13,14,15],zmm0[0,1,2,3,4]
; CHECK-NEXT: retq
%res = call <16 x i32> @llvm.x86.avx512.mask.valign.d.512(<16 x i32> %a, <16 x i32> %b, i32 5, <16 x i32> zeroinitializer, i16 %mask)
ret <16 x i32> %res

View File

@ -3946,8 +3946,11 @@ define <4 x i32>@test_int_x86_avx512_mask_valign_d_128(<4 x i32> %x0, <4 x i32>
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: valignd $22, %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf3,0x7d,0x09,0x03,0xd1,0x16]
; CHECK-NEXT: ## xmm2 {%k1} = xmm1[2,3],xmm0[0,1]
; CHECK-NEXT: valignd $22, %xmm1, %xmm0, %xmm3 {%k1} {z} ## encoding: [0x62,0xf3,0x7d,0x89,0x03,0xd9,0x16]
; CHECK-NEXT: ## xmm3 {%k1} {z} = xmm1[2,3],xmm0[0,1]
; CHECK-NEXT: valignd $22, %xmm1, %xmm0, %xmm0 ## encoding: [0x62,0xf3,0x7d,0x08,0x03,0xc1,0x16]
; CHECK-NEXT: ## xmm0 = xmm1[2,3],xmm0[0,1]
; CHECK-NEXT: vpaddd %xmm0, %xmm2, %xmm0 ## encoding: [0x62,0xf1,0x6d,0x08,0xfe,0xc0]
; CHECK-NEXT: vpaddd %xmm3, %xmm0, %xmm0 ## encoding: [0x62,0xf1,0x7d,0x08,0xfe,0xc3]
; CHECK-NEXT: retq ## encoding: [0xc3]
@ -3966,7 +3969,9 @@ define <8 x i32>@test_int_x86_avx512_mask_valign_d_256(<8 x i32> %x0, <8 x i32>
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: valignd $22, %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf3,0x7d,0x29,0x03,0xd1,0x16]
; CHECK-NEXT: ## ymm2 {%k1} = ymm1[6,7],ymm0[0,1,2,3,4,5]
; CHECK-NEXT: valignd $22, %ymm1, %ymm0, %ymm0 ## encoding: [0x62,0xf3,0x7d,0x28,0x03,0xc1,0x16]
; CHECK-NEXT: ## ymm0 = ymm1[6,7],ymm0[0,1,2,3,4,5]
; CHECK-NEXT: vpaddd %ymm0, %ymm2, %ymm0 ## encoding: [0x62,0xf1,0x6d,0x28,0xfe,0xc0]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <8 x i32> @llvm.x86.avx512.mask.valign.d.256(<8 x i32> %x0, <8 x i32> %x1, i32 22, <8 x i32> %x3, i8 %x4)
@ -3982,7 +3987,9 @@ define <2 x i64>@test_int_x86_avx512_mask_valign_q_128(<2 x i64> %x0, <2 x i64>
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: valignq $22, %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf3,0xfd,0x09,0x03,0xd1,0x16]
; CHECK-NEXT: ## xmm2 {%k1} = xmm1[0,1]
; CHECK-NEXT: valignq $22, %xmm1, %xmm0, %xmm0 ## encoding: [0x62,0xf3,0xfd,0x08,0x03,0xc1,0x16]
; CHECK-NEXT: ## xmm0 = xmm1[0,1]
; CHECK-NEXT: vpaddq %xmm0, %xmm2, %xmm0 ## encoding: [0x62,0xf1,0xed,0x08,0xd4,0xc0]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <2 x i64> @llvm.x86.avx512.mask.valign.q.128(<2 x i64> %x0, <2 x i64> %x1, i32 22, <2 x i64> %x3, i8 %x4)
@ -3998,7 +4005,9 @@ define <4 x i64>@test_int_x86_avx512_mask_valign_q_256(<4 x i64> %x0, <4 x i64>
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: valignq $22, %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf3,0xfd,0x29,0x03,0xd1,0x16]
; CHECK-NEXT: ## ymm2 {%k1} = ymm1[2,3],ymm0[0,1]
; CHECK-NEXT: valignq $22, %ymm1, %ymm0, %ymm0 ## encoding: [0x62,0xf3,0xfd,0x28,0x03,0xc1,0x16]
; CHECK-NEXT: ## ymm0 = ymm1[2,3],ymm0[0,1]
; CHECK-NEXT: vpaddq %ymm0, %ymm2, %ymm0 ## encoding: [0x62,0xf1,0xed,0x28,0xd4,0xc0]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <4 x i64> @llvm.x86.avx512.mask.valign.q.256(<4 x i64> %x0, <4 x i64> %x1, i32 22, <4 x i64> %x3, i8 %x4)