[X86] Add test cases to show missed opportunities to use a sign extended 8 or 32 bit immediate AND when reversing SHL+AND to form an LEA.

When we shift the AND mask over we should shift in sign bits instead of zero bits. The scale in the LEA will shift these bits out so it doesn't matter whether we mask the bits off or not. Using sign bits will potentially allow a sign extended immediate to be used.

Also add some other test cases for cases that are currently optimal.

llvm-svn: 357845
This commit is contained in:
Craig Topper 2019-04-06 18:00:45 +00:00
parent 9d7379c250
commit 82448bc09e
2 changed files with 113 additions and 0 deletions

View File

@ -0,0 +1,79 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s
define i8 @t1(i8* %X, i64 %i) {
; CHECK-LABEL: t1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movabsq $4611686018427387649, %rax # imm = 0x3FFFFFFFFFFFFF01
; CHECK-NEXT: andq %rsi, %rax
; CHECK-NEXT: movb (%rdi,%rax,4), %al
; CHECK-NEXT: retq
entry:
%tmp2 = shl i64 %i, 2
%tmp4 = and i64 %tmp2, -1020
%tmp7 = getelementptr i8, i8* %X, i64 %tmp4
%tmp9 = load i8, i8* %tmp7
ret i8 %tmp9
}
define i8 @t2(i8* %X, i64 %i) {
; CHECK-LABEL: t2:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movabsq $4611686018427387890, %rax # imm = 0x3FFFFFFFFFFFFFF2
; CHECK-NEXT: andq %rsi, %rax
; CHECK-NEXT: movb (%rdi,%rax,4), %al
; CHECK-NEXT: retq
entry:
%tmp2 = shl i64 %i, 2
%tmp4 = and i64 %tmp2, -56
%tmp7 = getelementptr i8, i8* %X, i64 %tmp4
%tmp9 = load i8, i8* %tmp7
ret i8 %tmp9
}
define i8 @t3(i8* %X, i64 %i) {
; CHECK-LABEL: t3:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movl %esi, %eax
; CHECK-NEXT: movb (%rdi,%rax,4), %al
; CHECK-NEXT: retq
entry:
%tmp2 = shl i64 %i, 2
%tmp4 = and i64 %tmp2, 17179869180
%tmp7 = getelementptr i8, i8* %X, i64 %tmp4
%tmp9 = load i8, i8* %tmp7
ret i8 %tmp9
}
define i8 @t4(i8* %X, i64 %i) {
; CHECK-LABEL: t4:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: andl $-2, %esi
; CHECK-NEXT: movb (%rdi,%rsi,4), %al
; CHECK-NEXT: retq
entry:
%tmp2 = shl i64 %i, 2
%tmp4 = and i64 %tmp2, 17179869176
%tmp7 = getelementptr i8, i8* %X, i64 %tmp4
%tmp9 = load i8, i8* %tmp7
ret i8 %tmp9
}
define i8 @t5(i8* %X, i64 %i) {
; CHECK-LABEL: t5:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: andl $-250002, %esi # imm = 0xFFFC2F6E
; CHECK-NEXT: movb (%rdi,%rsi,4), %al
; CHECK-NEXT: retq
entry:
%tmp2 = shl i64 %i, 2
%tmp4 = and i64 %tmp2, 17178869176
%tmp7 = getelementptr i8, i8* %X, i64 %tmp4
%tmp9 = load i8, i8* %tmp7
ret i8 %tmp9
}

View File

@ -89,3 +89,37 @@ entry:
%sum.2 = add i32 %sum.1, %index
ret i32 %sum.2
}
define i8 @t5(i8* %X, i32 %i) {
; CHECK-LABEL: t5:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx
; CHECK-NEXT: andl $1073741810, %ecx # imm = 0x3FFFFFF2
; CHECK-NEXT: movb (%eax,%ecx,4), %al
; CHECK-NEXT: retl
entry:
%tmp2 = shl i32 %i, 2
%tmp4 = and i32 %tmp2, -56
%tmp7 = getelementptr i8, i8* %X, i32 %tmp4
%tmp9 = load i8, i8* %tmp7
ret i8 %tmp9
}
define i8 @t6(i8* %X, i32 %i) {
; CHECK-LABEL: t6:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx
; CHECK-NEXT: andl $1073741569, %ecx # imm = 0x3FFFFF01
; CHECK-NEXT: movb (%eax,%ecx,4), %al
; CHECK-NEXT: retl
entry:
%tmp2 = shl i32 %i, 2
%tmp4 = and i32 %tmp2, -1020
%tmp7 = getelementptr i8, i8* %X, i32 %tmp4
%tmp9 = load i8, i8* %tmp7
ret i8 %tmp9
}