2017-08-24 09:08:27 +08:00
|
|
|
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
|
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu | FileCheck %s
|
|
|
|
|
|
|
|
@var_3 = external global i16, align 2
|
|
|
|
@var_13 = external global i16, align 2
|
|
|
|
@var_212 = external global i64, align 8
|
|
|
|
|
|
|
|
define void @pr34127() {
|
|
|
|
; CHECK-LABEL: pr34127:
|
2017-12-05 01:18:51 +08:00
|
|
|
; CHECK: # %bb.0: # %entry
|
2017-08-24 09:08:27 +08:00
|
|
|
; CHECK-NEXT: movzwl {{.*}}(%rip), %eax
|
[X86FixupBWInsts] More precise register liveness if no <imp-use> on MOVs.
Summary:
Subregister liveness tracking is not implemented for X86 backend, so
sometimes the whole super register is said to be live, when only a
subregister is really live. That might happen if the def and the use
are located in different MBBs, see added fixup-bw-isnt.mir test.
However, using knowledge of the specific instructions handled by the
bw-fixup-pass we can get more precise liveness information which this
change does.
Reviewers: MatzeB, DavidKreitzer, ab, andrew.w.kaylor, craig.topper
Reviewed By: craig.topper
Subscribers: n.bozhenov, myatsina, llvm-commits, hiraditya
Patch by Andrei Elovikov <andrei.elovikov@intel.com>
Differential Revision: https://reviews.llvm.org/D37559
llvm-svn: 313524
2017-09-18 18:17:59 +08:00
|
|
|
; CHECK-NEXT: movzwl {{.*}}(%rip), %ecx
|
2017-08-24 09:08:27 +08:00
|
|
|
; CHECK-NEXT: andl %eax, %ecx
|
2018-04-25 06:35:27 +08:00
|
|
|
; CHECK-NEXT: andl %eax, %ecx
|
|
|
|
; CHECK-NEXT: movzwl %cx, %ecx
|
2017-08-24 09:08:27 +08:00
|
|
|
; CHECK-NEXT: movl %ecx, -{{[0-9]+}}(%rsp)
|
|
|
|
; CHECK-NEXT: xorl %edx, %edx
|
|
|
|
; CHECK-NEXT: testw %cx, %cx
|
|
|
|
; CHECK-NEXT: sete %dl
|
|
|
|
; CHECK-NEXT: andl %eax, %edx
|
|
|
|
; CHECK-NEXT: movq %rdx, {{.*}}(%rip)
|
|
|
|
; CHECK-NEXT: movw $0, (%rax)
|
|
|
|
; CHECK-NEXT: retq
|
|
|
|
entry:
|
|
|
|
%a = alloca i32, align 4
|
|
|
|
%0 = load i16, i16* @var_3, align 2
|
|
|
|
%conv = zext i16 %0 to i32
|
|
|
|
%1 = load i16, i16* @var_3, align 2
|
|
|
|
%conv1 = zext i16 %1 to i32
|
|
|
|
%2 = load i16, i16* @var_13, align 2
|
|
|
|
%conv2 = zext i16 %2 to i32
|
|
|
|
%and = and i32 %conv1, %conv2
|
|
|
|
%and3 = and i32 %conv, %and
|
|
|
|
store i32 %and3, i32* %a, align 4
|
|
|
|
%3 = load i16, i16* @var_3, align 2
|
|
|
|
%conv4 = zext i16 %3 to i32
|
|
|
|
%4 = load i16, i16* @var_3, align 2
|
|
|
|
%conv5 = zext i16 %4 to i32
|
|
|
|
%5 = load i16, i16* @var_13, align 2
|
|
|
|
%conv6 = zext i16 %5 to i32
|
|
|
|
%and7 = and i32 %conv5, %conv6
|
|
|
|
%and8 = and i32 %conv4, %and7
|
|
|
|
%tobool = icmp ne i32 %and8, 0
|
|
|
|
%lnot = xor i1 %tobool, true
|
|
|
|
%conv9 = zext i1 %lnot to i32
|
|
|
|
%6 = load i16, i16* @var_3, align 2
|
|
|
|
%conv10 = zext i16 %6 to i32
|
|
|
|
%and11 = and i32 %conv9, %conv10
|
|
|
|
%conv12 = sext i32 %and11 to i64
|
|
|
|
store i64 %conv12, i64* @var_212, align 8
|
|
|
|
%conv14 = zext i1 undef to i16
|
|
|
|
store i16 %conv14, i16* undef, align 2
|
|
|
|
ret void
|
|
|
|
}
|