forked from OSchip/llvm-project
98 lines
3.1 KiB
LLVM
98 lines
3.1 KiB
LLVM
|
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
||
|
; RUN: llc < %s -mtriple=i686-unknown-unknown -mcpu=skx | FileCheck %s --check-prefix=X86
|
||
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=skx | FileCheck %s --check-prefix=X64
|
||
|
|
||
|
; Check for assert in foldMaskAndShiftToScale due to out of range mask scaling.
|
||
|
|
||
|
@b = common global i8 zeroinitializer, align 1
|
||
|
@c = common global i8 zeroinitializer, align 1
|
||
|
@d = common global i64 zeroinitializer, align 8
|
||
|
@e = common global i64 zeroinitializer, align 8
|
||
|
|
||
|
define void @foo() {
|
||
|
; X86-LABEL: foo:
|
||
|
; X86: # BB#0:
|
||
|
; X86-NEXT: pushl %eax
|
||
|
; X86-NEXT: .cfi_def_cfa_offset 8
|
||
|
; X86-NEXT: movl d, %eax
|
||
|
; X86-NEXT: movl d+4, %ecx
|
||
|
; X86-NEXT: movl $701685459, %edx # imm = 0x29D2DED3
|
||
|
; X86-NEXT: andnl %edx, %ecx, %ecx
|
||
|
; X86-NEXT: movl $-564453154, %edx # imm = 0xDE5B20DE
|
||
|
; X86-NEXT: andnl %edx, %eax, %edx
|
||
|
; X86-NEXT: shrdl $21, %ecx, %edx
|
||
|
; X86-NEXT: shrl $21, %ecx
|
||
|
; X86-NEXT: xorl %eax, %eax
|
||
|
; X86-NEXT: testb %al, %al
|
||
|
; X86-NEXT: cmovnel %ecx, %edx
|
||
|
; X86-NEXT: cmovnel %eax, %ecx
|
||
|
; X86-NEXT: andl $-2, %edx
|
||
|
; X86-NEXT: addl $7, %edx
|
||
|
; X86-NEXT: adcxl %eax, %ecx
|
||
|
; X86-NEXT: pushl %ecx
|
||
|
; X86-NEXT: .cfi_adjust_cfa_offset 4
|
||
|
; X86-NEXT: pushl %edx
|
||
|
; X86-NEXT: .cfi_adjust_cfa_offset 4
|
||
|
; X86-NEXT: pushl $0
|
||
|
; X86-NEXT: .cfi_adjust_cfa_offset 4
|
||
|
; X86-NEXT: pushl $0
|
||
|
; X86-NEXT: .cfi_adjust_cfa_offset 4
|
||
|
; X86-NEXT: calll __divdi3
|
||
|
; X86-NEXT: addl $16, %esp
|
||
|
; X86-NEXT: .cfi_adjust_cfa_offset -16
|
||
|
; X86-NEXT: orl %eax, %edx
|
||
|
; X86-NEXT: setne {{[0-9]+}}(%esp)
|
||
|
; X86-NEXT: popl %eax
|
||
|
; X86-NEXT: retl
|
||
|
;
|
||
|
; X64-LABEL: foo:
|
||
|
; X64: # BB#0:
|
||
|
; X64-NEXT: movq {{.*}}(%rip), %rax
|
||
|
; X64-NEXT: movabsq $3013716102212485120, %rcx # imm = 0x29D2DED3DE400000
|
||
|
; X64-NEXT: andnq %rcx, %rax, %rcx
|
||
|
; X64-NEXT: shrq $21, %rcx
|
||
|
; X64-NEXT: addq $7, %rcx
|
||
|
; X64-NEXT: movabsq $4393751543808, %rax # imm = 0x3FF00000000
|
||
|
; X64-NEXT: testq %rax, %rcx
|
||
|
; X64-NEXT: je .LBB0_1
|
||
|
; X64-NEXT: # BB#2:
|
||
|
; X64-NEXT: xorl %eax, %eax
|
||
|
; X64-NEXT: xorl %edx, %edx
|
||
|
; X64-NEXT: idivq %rcx
|
||
|
; X64-NEXT: jmp .LBB0_3
|
||
|
; X64-NEXT: .LBB0_1:
|
||
|
; X64-NEXT: xorl %eax, %eax
|
||
|
; X64-NEXT: xorl %edx, %edx
|
||
|
; X64-NEXT: divl %ecx
|
||
|
; X64-NEXT: # kill: %EAX<def> %EAX<kill> %RAX<def>
|
||
|
; X64-NEXT: .LBB0_3:
|
||
|
; X64-NEXT: testq %rax, %rax
|
||
|
; X64-NEXT: setne -{{[0-9]+}}(%rsp)
|
||
|
; X64-NEXT: retq
|
||
|
%1 = alloca i8, align 1
|
||
|
%2 = load i64, i64* @d, align 8
|
||
|
%3 = or i64 -3013716102214263007, %2
|
||
|
%4 = xor i64 %3, -1
|
||
|
%5 = load i64, i64* @e, align 8
|
||
|
%6 = load i8, i8* @b, align 1
|
||
|
%7 = trunc i8 %6 to i1
|
||
|
%8 = zext i1 %7 to i64
|
||
|
%9 = xor i64 %5, %8
|
||
|
%10 = load i8, i8* @c, align 1
|
||
|
%11 = trunc i8 %10 to i1
|
||
|
%12 = zext i1 %11 to i32
|
||
|
%13 = or i32 551409149, %12
|
||
|
%14 = sub nsw i32 %13, 551409131
|
||
|
%15 = zext i32 %14 to i64
|
||
|
%16 = shl i64 %9, %15
|
||
|
%17 = sub nsw i64 %16, 223084523
|
||
|
%18 = ashr i64 %4, %17
|
||
|
%19 = and i64 %18, 9223372036854775806
|
||
|
%20 = add nsw i64 7, %19
|
||
|
%21 = sdiv i64 0, %20
|
||
|
%22 = icmp ne i64 %21, 0
|
||
|
%23 = zext i1 %22 to i8
|
||
|
store i8 %23, i8* %1, align 1
|
||
|
ret void
|
||
|
}
|