From cc6d302c91baad2ecf3c9a75ce68d552df0a42b7 Mon Sep 17 00:00:00 2001 From: Craig Topper Date: Thu, 22 Jul 2021 23:25:33 -0700 Subject: [PATCH] [X86] Fix a bug in TEST with immediate creation This code tries to form a TEST from CMP+AND with an optional truncate in between. If we looked through the truncate, we may have extra bits in the AND mask that shouldn't participate in the checks. Normally SimplifyDemendedBits takes care of this, but the AND may have another user. So manually mask out any extra bits. Fixes PR51175. Differential Revision: https://reviews.llvm.org/D106634 --- llvm/lib/Target/X86/X86ISelDAGToDAG.cpp | 3 +++ llvm/test/CodeGen/X86/pr51175.ll | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp b/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp index c263c62e2e2e..e9c7ba44b524 100644 --- a/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp +++ b/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp @@ -5446,6 +5446,9 @@ void X86DAGToDAGISel::Select(SDNode *Node) { ConstantSDNode *C = dyn_cast(N0.getOperand(1)); if (!C) break; uint64_t Mask = C->getZExtValue(); + // We may have looked through a truncate so mask off any bits that + // shouldn't be part of the compare. + Mask &= maskTrailingOnes(CmpVT.getScalarSizeInBits()); // Check if we can replace AND+IMM64 with a shift. This is possible for // masks/ like 0xFF000000 or 0x00FFFFFF and if we care only about the zero diff --git a/llvm/test/CodeGen/X86/pr51175.ll b/llvm/test/CodeGen/X86/pr51175.ll index 9800165cc9e7..26d7492d9071 100644 --- a/llvm/test/CodeGen/X86/pr51175.ll +++ b/llvm/test/CodeGen/X86/pr51175.ll @@ -15,7 +15,7 @@ define i32 @foo(i16 signext %0, i32 %1, i32* nocapture %2) { ; CHECK-NEXT: andl $65527, %eax # imm = 0xFFF7 ; CHECK-NEXT: movl %eax, (%rdx) ; CHECK-NEXT: xorl %eax, %eax -; CHECK-NEXT: testl $-9, %edi +; CHECK-NEXT: testb $-9, %dil ; CHECK-NEXT: cmovel %esi, %eax ; CHECK-NEXT: retq %4 = add i16 %0, 1