diff --git a/llvm/test/CodeGen/X86/partial-fold32.ll b/llvm/test/CodeGen/X86/partial-fold32.ll new file mode 100644 index 000000000000..ba3f73ba90fc --- /dev/null +++ b/llvm/test/CodeGen/X86/partial-fold32.ll @@ -0,0 +1,26 @@ +; RUN: llc -mtriple=i686-unknown-linux-gnu -enable-misched=false < %s | FileCheck %s + +define fastcc i8 @fold32to8(i32 %add, i8 %spill) { +; CHECK-LABEL: fold32to8: +; CHECK: movl %ecx, (%esp) # 4-byte Spill +; CHECK: movl (%esp), %eax # 4-byte Reload +; CHECK: subb %al, %dl +entry: + tail call void asm sideeffect "", "~{eax},~{ebx},~{ecx},~{edi},~{esi},~{ebp},~{dirflag},~{fpsr},~{flags}"() + %trunc = trunc i32 %add to i8 + %sub = sub i8 %spill, %trunc + ret i8 %sub +} + +; Do not fold a 1-byte store into a 4-byte spill slot +define fastcc i8 @nofold(i32 %add, i8 %spill) { +; CHECK-LABEL: nofold: +; CHECK: movl %edx, (%esp) # 4-byte Spill +; CHECK: movl (%esp), %eax # 4-byte Reload +; CHECK: subb %cl, %al +entry: + tail call void asm sideeffect "", "~{eax},~{ebx},~{edx},~{edi},~{esi},~{ebp},~{dirflag},~{fpsr},~{flags}"() + %trunc = trunc i32 %add to i8 + %sub = sub i8 %spill, %trunc + ret i8 %sub +} diff --git a/llvm/test/CodeGen/X86/partial-fold64.ll b/llvm/test/CodeGen/X86/partial-fold64.ll new file mode 100644 index 000000000000..b9ea7d6773a3 --- /dev/null +++ b/llvm/test/CodeGen/X86/partial-fold64.ll @@ -0,0 +1,43 @@ +; RUN: llc -mtriple=x86_64-unknown-linux-gnu -enable-misched=false < %s | FileCheck %s + +define i32 @fold64to32(i64 %add, i32 %spill) { +; CHECK-LABEL: fold64to32: +; CHECK: movq %rdi, -{{[0-9]+}}(%rsp) # 8-byte Spill +; CHECK: movq -{{[0-9]+}}(%rsp), %rax # 8-byte Reload +; CHECK: subl %eax, %esi +entry: + tail call void asm sideeffect "", "~{rax},~{rbx},~{rcx},~{rdx},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15},~{dirflag},~{fpsr},~{flags}"() + %trunc = trunc i64 %add to i32 + %sub = sub i32 %spill, %trunc + ret i32 %sub +} + +define i8 @fold64to8(i64 %add, i8 %spill) { +; CHECK-LABEL: fold64to8: +; CHECK: movq %rdi, -{{[0-9]+}}(%rsp) # 8-byte Spill +; CHECK: movq -{{[0-9]+}}(%rsp), %rax # 8-byte Reload +; CHECK: subb %al, %sil +entry: + tail call void asm sideeffect "", "~{rax},~{rbx},~{rcx},~{rdx},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15},~{dirflag},~{fpsr},~{flags}"() + %trunc = trunc i64 %add to i8 + %sub = sub i8 %spill, %trunc + ret i8 %sub +} + +; Do not fold a 4-byte store into a 8-byte spill slot +; CHECK-LABEL: nofold +; CHECK: movq %rsi, -{{[0-9]+}}(%rsp) # 8-byte Spill +; CHECK: movq -{{[0-9]+}}(%rsp), %rax # 8-byte Reload +; CHECK: subl %edi, %eax +; CHECK: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill +; CHECK: movq -{{[0-9]+}}(%rsp), %rax # 8-byte Reload +define i32 @nofold(i64 %add, i64 %spill) { +entry: + tail call void asm sideeffect "", "~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15},~{dirflag},~{fpsr},~{flags}"() + %trunc = trunc i64 %add to i32 + %truncspill = trunc i64 %spill to i32 + %sub = sub i32 %truncspill, %trunc + tail call void asm sideeffect "", "~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15},~{dirflag},~{fpsr},~{flags}"() + ret i32 %sub +} +