forked from OSchip/llvm-project
[x86] promote all multiply i8 by constant to i32
We have these 2 "isDesirable" promotion hooks (I'm not sure why we need both of them, but that's independent of this patch), and we can adjust them to promote "mul i8 X, C" to i32. Then, all of our existing LEA and other multiply expansion magic happens as it would for i32 ops. Some of the test diffs show that we could end up with an actual 32-bit mul instruction here because we choose not to expand to simpler ops. That instruction could be slower depending on the subtarget. On the plus side, this means we don't need a separate instruction to load the constant operand and possibly an extra instruction to move the result. If we need to tune mul i32 further, we could add a later transform that tries to shrink it back to i8 based on subtarget timing. I did not bother to duplicate all of the 32-bit test file RUNs and target settings that exist to test whether LEA expansion is cheap or not. The diffs here assume a default target, so that means LEA is generally cheap. Differential Revision: https://reviews.llvm.org/D54803 llvm-svn: 347557
This commit is contained in:
parent
2447baff84
commit
d31220e0de
|
@ -41060,10 +41060,6 @@ SDValue X86TargetLowering::PerformDAGCombine(SDNode *N,
|
|||
return SDValue();
|
||||
}
|
||||
|
||||
/// Return true if the target has native support for the specified value type
|
||||
/// and it is 'desirable' to use the type for the given node type. e.g. On x86
|
||||
/// i16 is legal, but undesirable since i16 instruction encodings are longer and
|
||||
/// some i16 instructions are slow.
|
||||
bool X86TargetLowering::isTypeDesirableForOp(unsigned Opc, EVT VT) const {
|
||||
if (!isTypeLegal(VT))
|
||||
return false;
|
||||
|
@ -41072,26 +41068,37 @@ bool X86TargetLowering::isTypeDesirableForOp(unsigned Opc, EVT VT) const {
|
|||
if (Opc == ISD::SHL && VT.isVector() && VT.getVectorElementType() == MVT::i8)
|
||||
return false;
|
||||
|
||||
if (VT != MVT::i16)
|
||||
return true;
|
||||
|
||||
switch (Opc) {
|
||||
default:
|
||||
return true;
|
||||
case ISD::LOAD:
|
||||
case ISD::SIGN_EXTEND:
|
||||
case ISD::ZERO_EXTEND:
|
||||
case ISD::ANY_EXTEND:
|
||||
case ISD::SHL:
|
||||
case ISD::SRL:
|
||||
case ISD::SUB:
|
||||
case ISD::ADD:
|
||||
case ISD::MUL:
|
||||
case ISD::AND:
|
||||
case ISD::OR:
|
||||
case ISD::XOR:
|
||||
// 8-bit multiply is probably not much cheaper than 32-bit multiply, and
|
||||
// we have specializations to turn 32-bit multiply into LEA or other ops.
|
||||
// Also, see the comment in "IsDesirableToPromoteOp" - where we additionally
|
||||
// check for a constant operand to the multiply.
|
||||
if (Opc == ISD::MUL && VT == MVT::i8)
|
||||
return false;
|
||||
|
||||
// i16 instruction encodings are longer and some i16 instructions are slow,
|
||||
// so those are not desirable.
|
||||
if (VT == MVT::i16) {
|
||||
switch (Opc) {
|
||||
default:
|
||||
break;
|
||||
case ISD::LOAD:
|
||||
case ISD::SIGN_EXTEND:
|
||||
case ISD::ZERO_EXTEND:
|
||||
case ISD::ANY_EXTEND:
|
||||
case ISD::SHL:
|
||||
case ISD::SRL:
|
||||
case ISD::SUB:
|
||||
case ISD::ADD:
|
||||
case ISD::MUL:
|
||||
case ISD::AND:
|
||||
case ISD::OR:
|
||||
case ISD::XOR:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// Any legal type not explicitly accounted for above here is desirable.
|
||||
return true;
|
||||
}
|
||||
|
||||
SDValue X86TargetLowering::expandIndirectJTBranch(const SDLoc& dl,
|
||||
|
@ -41110,12 +41117,16 @@ SDValue X86TargetLowering::expandIndirectJTBranch(const SDLoc& dl,
|
|||
return TargetLowering::expandIndirectJTBranch(dl, Value, Addr, DAG);
|
||||
}
|
||||
|
||||
/// This method query the target whether it is beneficial for dag combiner to
|
||||
/// promote the specified node. If true, it should return the desired promotion
|
||||
/// type by reference.
|
||||
bool X86TargetLowering::IsDesirableToPromoteOp(SDValue Op, EVT &PVT) const {
|
||||
EVT VT = Op.getValueType();
|
||||
if (VT != MVT::i16)
|
||||
bool Is8BitMulByConstant = VT == MVT::i8 && Op.getOpcode() == ISD::MUL &&
|
||||
isa<ConstantSDNode>(Op.getOperand(1));
|
||||
|
||||
// i16 is legal, but undesirable since i16 instruction encodings are longer
|
||||
// and some i16 instructions are slow.
|
||||
// 8-bit multiply-by-constant can usually be expanded to something cheaper
|
||||
// using LEA and/or other ALU ops.
|
||||
if (VT != MVT::i16 && !Is8BitMulByConstant)
|
||||
return false;
|
||||
|
||||
auto IsFoldableRMW = [](SDValue Load, SDValue Op) {
|
||||
|
|
|
@ -1,19 +1,13 @@
|
|||
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
||||
; RUN: llc -mtriple=x86_64-- -enable-ipra -print-regusage -o - 2>&1 < %s | FileCheck %s --check-prefix=DEBUG
|
||||
; RUN: llc -mtriple=x86_64-- -enable-ipra -o - < %s | FileCheck %s
|
||||
|
||||
; Here only CL is clobbered so CH should not be clobbred, but CX, ECX and RCX
|
||||
; should be clobbered.
|
||||
; DEBUG: main Clobbered Registers: $ah $al $ax $cl $cx $eax $ecx $eflags $hax $rax $rcx
|
||||
|
||||
define i8 @main(i8 %X) {
|
||||
; CHECK-LABEL: main:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: movl %edi, %eax
|
||||
; CHECK-NEXT: movb $5, %cl
|
||||
; CHECK-NEXT: # kill: def $al killed $al killed $eax
|
||||
; CHECK-NEXT: mulb %cl
|
||||
; CHECK-NEXT: # kill: def $edi killed $edi def $rdi
|
||||
; CHECK-NEXT: leal (%rdi,%rdi,4), %eax
|
||||
; CHECK-NEXT: addb $5, %al
|
||||
; CHECK-NEXT: # kill: def $al killed $al killed $eax
|
||||
; CHECK-NEXT: retq
|
||||
%inc = add i8 %X, 1
|
||||
%inc2 = mul i8 %inc, 5
|
||||
|
|
|
@ -518,33 +518,29 @@ define <2 x i64> @urem_op0_constant(i64* %p) nounwind {
|
|||
define <16 x i8> @urem_op1_constant(i8* %p) nounwind {
|
||||
; SSE-LABEL: urem_op1_constant:
|
||||
; SSE: # %bb.0:
|
||||
; SSE-NEXT: movb (%rdi), %cl
|
||||
; SSE-NEXT: movl %ecx, %eax
|
||||
; SSE-NEXT: shrb %al
|
||||
; SSE-NEXT: movb (%rdi), %al
|
||||
; SSE-NEXT: movl %eax, %ecx
|
||||
; SSE-NEXT: shrb %cl
|
||||
; SSE-NEXT: movzbl %cl, %ecx
|
||||
; SSE-NEXT: imull $49, %ecx, %ecx
|
||||
; SSE-NEXT: shrl $10, %ecx
|
||||
; SSE-NEXT: imull $42, %ecx, %ecx
|
||||
; SSE-NEXT: subb %cl, %al
|
||||
; SSE-NEXT: movzbl %al, %eax
|
||||
; SSE-NEXT: imull $49, %eax, %eax
|
||||
; SSE-NEXT: shrl $10, %eax
|
||||
; SSE-NEXT: movb $42, %dl
|
||||
; SSE-NEXT: # kill: def $al killed $al killed $eax
|
||||
; SSE-NEXT: mulb %dl
|
||||
; SSE-NEXT: subb %al, %cl
|
||||
; SSE-NEXT: movzbl %cl, %eax
|
||||
; SSE-NEXT: movd %eax, %xmm0
|
||||
; SSE-NEXT: retq
|
||||
;
|
||||
; AVX-LABEL: urem_op1_constant:
|
||||
; AVX: # %bb.0:
|
||||
; AVX-NEXT: movb (%rdi), %cl
|
||||
; AVX-NEXT: movl %ecx, %eax
|
||||
; AVX-NEXT: shrb %al
|
||||
; AVX-NEXT: movb (%rdi), %al
|
||||
; AVX-NEXT: movl %eax, %ecx
|
||||
; AVX-NEXT: shrb %cl
|
||||
; AVX-NEXT: movzbl %cl, %ecx
|
||||
; AVX-NEXT: imull $49, %ecx, %ecx
|
||||
; AVX-NEXT: shrl $10, %ecx
|
||||
; AVX-NEXT: imull $42, %ecx, %ecx
|
||||
; AVX-NEXT: subb %cl, %al
|
||||
; AVX-NEXT: movzbl %al, %eax
|
||||
; AVX-NEXT: imull $49, %eax, %eax
|
||||
; AVX-NEXT: shrl $10, %eax
|
||||
; AVX-NEXT: movb $42, %dl
|
||||
; AVX-NEXT: # kill: def $al killed $al killed $eax
|
||||
; AVX-NEXT: mulb %dl
|
||||
; AVX-NEXT: subb %al, %cl
|
||||
; AVX-NEXT: movzbl %cl, %eax
|
||||
; AVX-NEXT: vmovd %eax, %xmm0
|
||||
; AVX-NEXT: retq
|
||||
%x = load i8, i8* %p
|
||||
|
|
|
@ -25,10 +25,9 @@ define i8 @test_mul_by_2(i8 %x) {
|
|||
define i8 @test_mul_by_3(i8 %x) {
|
||||
; X64-LABEL: test_mul_by_3:
|
||||
; X64: # %bb.0:
|
||||
; X64-NEXT: movl %edi, %eax
|
||||
; X64-NEXT: movb $3, %cl
|
||||
; X64-NEXT: # kill: def $edi killed $edi def $rdi
|
||||
; X64-NEXT: leal (%rdi,%rdi,2), %eax
|
||||
; X64-NEXT: # kill: def $al killed $al killed $eax
|
||||
; X64-NEXT: mulb %cl
|
||||
; X64-NEXT: retq
|
||||
%m = mul i8 %x, 3
|
||||
ret i8 %m
|
||||
|
@ -48,10 +47,9 @@ define i8 @test_mul_by_4(i8 %x) {
|
|||
define i8 @test_mul_by_5(i8 %x) {
|
||||
; X64-LABEL: test_mul_by_5:
|
||||
; X64: # %bb.0:
|
||||
; X64-NEXT: movl %edi, %eax
|
||||
; X64-NEXT: movb $5, %cl
|
||||
; X64-NEXT: # kill: def $edi killed $edi def $rdi
|
||||
; X64-NEXT: leal (%rdi,%rdi,4), %eax
|
||||
; X64-NEXT: # kill: def $al killed $al killed $eax
|
||||
; X64-NEXT: mulb %cl
|
||||
; X64-NEXT: retq
|
||||
%m = mul i8 %x, 5
|
||||
ret i8 %m
|
||||
|
@ -60,10 +58,10 @@ define i8 @test_mul_by_5(i8 %x) {
|
|||
define i8 @test_mul_by_6(i8 %x) {
|
||||
; X64-LABEL: test_mul_by_6:
|
||||
; X64: # %bb.0:
|
||||
; X64-NEXT: movl %edi, %eax
|
||||
; X64-NEXT: movb $6, %cl
|
||||
; X64-NEXT: # kill: def $edi killed $edi def $rdi
|
||||
; X64-NEXT: addl %edi, %edi
|
||||
; X64-NEXT: leal (%rdi,%rdi,2), %eax
|
||||
; X64-NEXT: # kill: def $al killed $al killed $eax
|
||||
; X64-NEXT: mulb %cl
|
||||
; X64-NEXT: retq
|
||||
%m = mul i8 %x, 6
|
||||
ret i8 %m
|
||||
|
@ -72,10 +70,10 @@ define i8 @test_mul_by_6(i8 %x) {
|
|||
define i8 @test_mul_by_7(i8 %x) {
|
||||
; X64-LABEL: test_mul_by_7:
|
||||
; X64: # %bb.0:
|
||||
; X64-NEXT: movl %edi, %eax
|
||||
; X64-NEXT: movb $7, %cl
|
||||
; X64-NEXT: # kill: def $edi killed $edi def $rdi
|
||||
; X64-NEXT: leal (,%rdi,8), %eax
|
||||
; X64-NEXT: subl %edi, %eax
|
||||
; X64-NEXT: # kill: def $al killed $al killed $eax
|
||||
; X64-NEXT: mulb %cl
|
||||
; X64-NEXT: retq
|
||||
%m = mul i8 %x, 7
|
||||
ret i8 %m
|
||||
|
@ -95,10 +93,9 @@ define i8 @test_mul_by_8(i8 %x) {
|
|||
define i8 @test_mul_by_9(i8 %x) {
|
||||
; X64-LABEL: test_mul_by_9:
|
||||
; X64: # %bb.0:
|
||||
; X64-NEXT: movl %edi, %eax
|
||||
; X64-NEXT: movb $9, %cl
|
||||
; X64-NEXT: # kill: def $edi killed $edi def $rdi
|
||||
; X64-NEXT: leal (%rdi,%rdi,8), %eax
|
||||
; X64-NEXT: # kill: def $al killed $al killed $eax
|
||||
; X64-NEXT: mulb %cl
|
||||
; X64-NEXT: retq
|
||||
%m = mul i8 %x, 9
|
||||
ret i8 %m
|
||||
|
@ -107,10 +104,10 @@ define i8 @test_mul_by_9(i8 %x) {
|
|||
define i8 @test_mul_by_10(i8 %x) {
|
||||
; X64-LABEL: test_mul_by_10:
|
||||
; X64: # %bb.0:
|
||||
; X64-NEXT: movl %edi, %eax
|
||||
; X64-NEXT: movb $10, %cl
|
||||
; X64-NEXT: # kill: def $edi killed $edi def $rdi
|
||||
; X64-NEXT: addl %edi, %edi
|
||||
; X64-NEXT: leal (%rdi,%rdi,4), %eax
|
||||
; X64-NEXT: # kill: def $al killed $al killed $eax
|
||||
; X64-NEXT: mulb %cl
|
||||
; X64-NEXT: retq
|
||||
%m = mul i8 %x, 10
|
||||
ret i8 %m
|
||||
|
@ -119,10 +116,10 @@ define i8 @test_mul_by_10(i8 %x) {
|
|||
define i8 @test_mul_by_11(i8 %x) {
|
||||
; X64-LABEL: test_mul_by_11:
|
||||
; X64: # %bb.0:
|
||||
; X64-NEXT: movl %edi, %eax
|
||||
; X64-NEXT: movb $11, %cl
|
||||
; X64-NEXT: # kill: def $edi killed $edi def $rdi
|
||||
; X64-NEXT: leal (%rdi,%rdi,4), %eax
|
||||
; X64-NEXT: leal (%rdi,%rax,2), %eax
|
||||
; X64-NEXT: # kill: def $al killed $al killed $eax
|
||||
; X64-NEXT: mulb %cl
|
||||
; X64-NEXT: retq
|
||||
%m = mul i8 %x, 11
|
||||
ret i8 %m
|
||||
|
@ -131,10 +128,10 @@ define i8 @test_mul_by_11(i8 %x) {
|
|||
define i8 @test_mul_by_12(i8 %x) {
|
||||
; X64-LABEL: test_mul_by_12:
|
||||
; X64: # %bb.0:
|
||||
; X64-NEXT: movl %edi, %eax
|
||||
; X64-NEXT: movb $12, %cl
|
||||
; X64-NEXT: # kill: def $edi killed $edi def $rdi
|
||||
; X64-NEXT: shll $2, %edi
|
||||
; X64-NEXT: leal (%rdi,%rdi,2), %eax
|
||||
; X64-NEXT: # kill: def $al killed $al killed $eax
|
||||
; X64-NEXT: mulb %cl
|
||||
; X64-NEXT: retq
|
||||
%m = mul i8 %x, 12
|
||||
ret i8 %m
|
||||
|
@ -143,10 +140,10 @@ define i8 @test_mul_by_12(i8 %x) {
|
|||
define i8 @test_mul_by_13(i8 %x) {
|
||||
; X64-LABEL: test_mul_by_13:
|
||||
; X64: # %bb.0:
|
||||
; X64-NEXT: movl %edi, %eax
|
||||
; X64-NEXT: movb $13, %cl
|
||||
; X64-NEXT: # kill: def $edi killed $edi def $rdi
|
||||
; X64-NEXT: leal (%rdi,%rdi,2), %eax
|
||||
; X64-NEXT: leal (%rdi,%rax,4), %eax
|
||||
; X64-NEXT: # kill: def $al killed $al killed $eax
|
||||
; X64-NEXT: mulb %cl
|
||||
; X64-NEXT: retq
|
||||
%m = mul i8 %x, 13
|
||||
ret i8 %m
|
||||
|
@ -156,9 +153,10 @@ define i8 @test_mul_by_14(i8 %x) {
|
|||
; X64-LABEL: test_mul_by_14:
|
||||
; X64: # %bb.0:
|
||||
; X64-NEXT: movl %edi, %eax
|
||||
; X64-NEXT: movb $14, %cl
|
||||
; X64-NEXT: shll $4, %eax
|
||||
; X64-NEXT: subl %edi, %eax
|
||||
; X64-NEXT: subl %edi, %eax
|
||||
; X64-NEXT: # kill: def $al killed $al killed $eax
|
||||
; X64-NEXT: mulb %cl
|
||||
; X64-NEXT: retq
|
||||
%m = mul i8 %x, 14
|
||||
ret i8 %m
|
||||
|
@ -167,10 +165,10 @@ define i8 @test_mul_by_14(i8 %x) {
|
|||
define i8 @test_mul_by_15(i8 %x) {
|
||||
; X64-LABEL: test_mul_by_15:
|
||||
; X64: # %bb.0:
|
||||
; X64-NEXT: movl %edi, %eax
|
||||
; X64-NEXT: movb $15, %cl
|
||||
; X64-NEXT: # kill: def $edi killed $edi def $rdi
|
||||
; X64-NEXT: leal (%rdi,%rdi,4), %eax
|
||||
; X64-NEXT: leal (%rax,%rax,2), %eax
|
||||
; X64-NEXT: # kill: def $al killed $al killed $eax
|
||||
; X64-NEXT: mulb %cl
|
||||
; X64-NEXT: retq
|
||||
%m = mul i8 %x, 15
|
||||
ret i8 %m
|
||||
|
@ -190,10 +188,11 @@ define i8 @test_mul_by_16(i8 %x) {
|
|||
define i8 @test_mul_by_17(i8 %x) {
|
||||
; X64-LABEL: test_mul_by_17:
|
||||
; X64: # %bb.0:
|
||||
; X64-NEXT: # kill: def $edi killed $edi def $rdi
|
||||
; X64-NEXT: movl %edi, %eax
|
||||
; X64-NEXT: movb $17, %cl
|
||||
; X64-NEXT: shll $4, %eax
|
||||
; X64-NEXT: leal (%rax,%rdi), %eax
|
||||
; X64-NEXT: # kill: def $al killed $al killed $eax
|
||||
; X64-NEXT: mulb %cl
|
||||
; X64-NEXT: retq
|
||||
%m = mul i8 %x, 17
|
||||
ret i8 %m
|
||||
|
@ -202,10 +201,10 @@ define i8 @test_mul_by_17(i8 %x) {
|
|||
define i8 @test_mul_by_18(i8 %x) {
|
||||
; X64-LABEL: test_mul_by_18:
|
||||
; X64: # %bb.0:
|
||||
; X64-NEXT: movl %edi, %eax
|
||||
; X64-NEXT: movb $18, %cl
|
||||
; X64-NEXT: # kill: def $edi killed $edi def $rdi
|
||||
; X64-NEXT: addl %edi, %edi
|
||||
; X64-NEXT: leal (%rdi,%rdi,8), %eax
|
||||
; X64-NEXT: # kill: def $al killed $al killed $eax
|
||||
; X64-NEXT: mulb %cl
|
||||
; X64-NEXT: retq
|
||||
%m = mul i8 %x, 18
|
||||
ret i8 %m
|
||||
|
@ -214,10 +213,10 @@ define i8 @test_mul_by_18(i8 %x) {
|
|||
define i8 @test_mul_by_19(i8 %x) {
|
||||
; X64-LABEL: test_mul_by_19:
|
||||
; X64: # %bb.0:
|
||||
; X64-NEXT: movl %edi, %eax
|
||||
; X64-NEXT: movb $19, %cl
|
||||
; X64-NEXT: # kill: def $edi killed $edi def $rdi
|
||||
; X64-NEXT: leal (%rdi,%rdi,8), %eax
|
||||
; X64-NEXT: leal (%rdi,%rax,2), %eax
|
||||
; X64-NEXT: # kill: def $al killed $al killed $eax
|
||||
; X64-NEXT: mulb %cl
|
||||
; X64-NEXT: retq
|
||||
%m = mul i8 %x, 19
|
||||
ret i8 %m
|
||||
|
@ -226,10 +225,10 @@ define i8 @test_mul_by_19(i8 %x) {
|
|||
define i8 @test_mul_by_20(i8 %x) {
|
||||
; X64-LABEL: test_mul_by_20:
|
||||
; X64: # %bb.0:
|
||||
; X64-NEXT: movl %edi, %eax
|
||||
; X64-NEXT: movb $20, %cl
|
||||
; X64-NEXT: # kill: def $edi killed $edi def $rdi
|
||||
; X64-NEXT: shll $2, %edi
|
||||
; X64-NEXT: leal (%rdi,%rdi,4), %eax
|
||||
; X64-NEXT: # kill: def $al killed $al killed $eax
|
||||
; X64-NEXT: mulb %cl
|
||||
; X64-NEXT: retq
|
||||
%m = mul i8 %x, 20
|
||||
ret i8 %m
|
||||
|
@ -238,10 +237,10 @@ define i8 @test_mul_by_20(i8 %x) {
|
|||
define i8 @test_mul_by_21(i8 %x) {
|
||||
; X64-LABEL: test_mul_by_21:
|
||||
; X64: # %bb.0:
|
||||
; X64-NEXT: movl %edi, %eax
|
||||
; X64-NEXT: movb $21, %cl
|
||||
; X64-NEXT: # kill: def $edi killed $edi def $rdi
|
||||
; X64-NEXT: leal (%rdi,%rdi,4), %eax
|
||||
; X64-NEXT: leal (%rdi,%rax,4), %eax
|
||||
; X64-NEXT: # kill: def $al killed $al killed $eax
|
||||
; X64-NEXT: mulb %cl
|
||||
; X64-NEXT: retq
|
||||
%m = mul i8 %x, 21
|
||||
ret i8 %m
|
||||
|
@ -250,10 +249,11 @@ define i8 @test_mul_by_21(i8 %x) {
|
|||
define i8 @test_mul_by_22(i8 %x) {
|
||||
; X64-LABEL: test_mul_by_22:
|
||||
; X64: # %bb.0:
|
||||
; X64-NEXT: movl %edi, %eax
|
||||
; X64-NEXT: movb $22, %cl
|
||||
; X64-NEXT: # kill: def $edi killed $edi def $rdi
|
||||
; X64-NEXT: leal (%rdi,%rdi,4), %eax
|
||||
; X64-NEXT: leal (%rdi,%rax,4), %eax
|
||||
; X64-NEXT: addl %edi, %eax
|
||||
; X64-NEXT: # kill: def $al killed $al killed $eax
|
||||
; X64-NEXT: mulb %cl
|
||||
; X64-NEXT: retq
|
||||
%m = mul i8 %x, 22
|
||||
ret i8 %m
|
||||
|
@ -262,10 +262,11 @@ define i8 @test_mul_by_22(i8 %x) {
|
|||
define i8 @test_mul_by_23(i8 %x) {
|
||||
; X64-LABEL: test_mul_by_23:
|
||||
; X64: # %bb.0:
|
||||
; X64-NEXT: movl %edi, %eax
|
||||
; X64-NEXT: movb $23, %cl
|
||||
; X64-NEXT: # kill: def $edi killed $edi def $rdi
|
||||
; X64-NEXT: leal (%rdi,%rdi,2), %eax
|
||||
; X64-NEXT: shll $3, %eax
|
||||
; X64-NEXT: subl %edi, %eax
|
||||
; X64-NEXT: # kill: def $al killed $al killed $eax
|
||||
; X64-NEXT: mulb %cl
|
||||
; X64-NEXT: retq
|
||||
%m = mul i8 %x, 23
|
||||
ret i8 %m
|
||||
|
@ -274,10 +275,10 @@ define i8 @test_mul_by_23(i8 %x) {
|
|||
define i8 @test_mul_by_24(i8 %x) {
|
||||
; X64-LABEL: test_mul_by_24:
|
||||
; X64: # %bb.0:
|
||||
; X64-NEXT: movl %edi, %eax
|
||||
; X64-NEXT: movb $24, %cl
|
||||
; X64-NEXT: # kill: def $edi killed $edi def $rdi
|
||||
; X64-NEXT: shll $3, %edi
|
||||
; X64-NEXT: leal (%rdi,%rdi,2), %eax
|
||||
; X64-NEXT: # kill: def $al killed $al killed $eax
|
||||
; X64-NEXT: mulb %cl
|
||||
; X64-NEXT: retq
|
||||
%m = mul i8 %x, 24
|
||||
ret i8 %m
|
||||
|
@ -286,10 +287,10 @@ define i8 @test_mul_by_24(i8 %x) {
|
|||
define i8 @test_mul_by_25(i8 %x) {
|
||||
; X64-LABEL: test_mul_by_25:
|
||||
; X64: # %bb.0:
|
||||
; X64-NEXT: movl %edi, %eax
|
||||
; X64-NEXT: movb $25, %cl
|
||||
; X64-NEXT: # kill: def $edi killed $edi def $rdi
|
||||
; X64-NEXT: leal (%rdi,%rdi,4), %eax
|
||||
; X64-NEXT: leal (%rax,%rax,4), %eax
|
||||
; X64-NEXT: # kill: def $al killed $al killed $eax
|
||||
; X64-NEXT: mulb %cl
|
||||
; X64-NEXT: retq
|
||||
%m = mul i8 %x, 25
|
||||
ret i8 %m
|
||||
|
@ -298,10 +299,11 @@ define i8 @test_mul_by_25(i8 %x) {
|
|||
define i8 @test_mul_by_26(i8 %x) {
|
||||
; X64-LABEL: test_mul_by_26:
|
||||
; X64: # %bb.0:
|
||||
; X64-NEXT: movl %edi, %eax
|
||||
; X64-NEXT: movb $26, %cl
|
||||
; X64-NEXT: # kill: def $edi killed $edi def $rdi
|
||||
; X64-NEXT: leal (%rdi,%rdi,4), %eax
|
||||
; X64-NEXT: leal (%rax,%rax,4), %eax
|
||||
; X64-NEXT: addl %edi, %eax
|
||||
; X64-NEXT: # kill: def $al killed $al killed $eax
|
||||
; X64-NEXT: mulb %cl
|
||||
; X64-NEXT: retq
|
||||
%m = mul i8 %x, 26
|
||||
ret i8 %m
|
||||
|
@ -310,10 +312,10 @@ define i8 @test_mul_by_26(i8 %x) {
|
|||
define i8 @test_mul_by_27(i8 %x) {
|
||||
; X64-LABEL: test_mul_by_27:
|
||||
; X64: # %bb.0:
|
||||
; X64-NEXT: movl %edi, %eax
|
||||
; X64-NEXT: movb $27, %cl
|
||||
; X64-NEXT: # kill: def $edi killed $edi def $rdi
|
||||
; X64-NEXT: leal (%rdi,%rdi,8), %eax
|
||||
; X64-NEXT: leal (%rax,%rax,2), %eax
|
||||
; X64-NEXT: # kill: def $al killed $al killed $eax
|
||||
; X64-NEXT: mulb %cl
|
||||
; X64-NEXT: retq
|
||||
%m = mul i8 %x, 27
|
||||
ret i8 %m
|
||||
|
@ -322,10 +324,11 @@ define i8 @test_mul_by_27(i8 %x) {
|
|||
define i8 @test_mul_by_28(i8 %x) {
|
||||
; X64-LABEL: test_mul_by_28:
|
||||
; X64: # %bb.0:
|
||||
; X64-NEXT: movl %edi, %eax
|
||||
; X64-NEXT: movb $28, %cl
|
||||
; X64-NEXT: # kill: def $edi killed $edi def $rdi
|
||||
; X64-NEXT: leal (%rdi,%rdi,8), %eax
|
||||
; X64-NEXT: leal (%rax,%rax,2), %eax
|
||||
; X64-NEXT: addl %edi, %eax
|
||||
; X64-NEXT: # kill: def $al killed $al killed $eax
|
||||
; X64-NEXT: mulb %cl
|
||||
; X64-NEXT: retq
|
||||
%m = mul i8 %x, 28
|
||||
ret i8 %m
|
||||
|
@ -334,10 +337,12 @@ define i8 @test_mul_by_28(i8 %x) {
|
|||
define i8 @test_mul_by_29(i8 %x) {
|
||||
; X64-LABEL: test_mul_by_29:
|
||||
; X64: # %bb.0:
|
||||
; X64-NEXT: movl %edi, %eax
|
||||
; X64-NEXT: movb $29, %cl
|
||||
; X64-NEXT: # kill: def $edi killed $edi def $rdi
|
||||
; X64-NEXT: leal (%rdi,%rdi,8), %eax
|
||||
; X64-NEXT: leal (%rax,%rax,2), %eax
|
||||
; X64-NEXT: addl %edi, %eax
|
||||
; X64-NEXT: addl %edi, %eax
|
||||
; X64-NEXT: # kill: def $al killed $al killed $eax
|
||||
; X64-NEXT: mulb %cl
|
||||
; X64-NEXT: retq
|
||||
%m = mul i8 %x, 29
|
||||
ret i8 %m
|
||||
|
@ -347,9 +352,10 @@ define i8 @test_mul_by_30(i8 %x) {
|
|||
; X64-LABEL: test_mul_by_30:
|
||||
; X64: # %bb.0:
|
||||
; X64-NEXT: movl %edi, %eax
|
||||
; X64-NEXT: movb $30, %cl
|
||||
; X64-NEXT: shll $5, %eax
|
||||
; X64-NEXT: subl %edi, %eax
|
||||
; X64-NEXT: subl %edi, %eax
|
||||
; X64-NEXT: # kill: def $al killed $al killed $eax
|
||||
; X64-NEXT: mulb %cl
|
||||
; X64-NEXT: retq
|
||||
%m = mul i8 %x, 30
|
||||
ret i8 %m
|
||||
|
@ -359,9 +365,9 @@ define i8 @test_mul_by_31(i8 %x) {
|
|||
; X64-LABEL: test_mul_by_31:
|
||||
; X64: # %bb.0:
|
||||
; X64-NEXT: movl %edi, %eax
|
||||
; X64-NEXT: movb $31, %cl
|
||||
; X64-NEXT: shll $5, %eax
|
||||
; X64-NEXT: subl %edi, %eax
|
||||
; X64-NEXT: # kill: def $al killed $al killed $eax
|
||||
; X64-NEXT: mulb %cl
|
||||
; X64-NEXT: retq
|
||||
%m = mul i8 %x, 31
|
||||
ret i8 %m
|
||||
|
@ -381,10 +387,10 @@ define i8 @test_mul_by_32(i8 %x) {
|
|||
define i8 @test_mul_by_37(i8 %x) {
|
||||
; X64-LABEL: test_mul_by_37:
|
||||
; X64: # %bb.0:
|
||||
; X64-NEXT: movl %edi, %eax
|
||||
; X64-NEXT: movb $37, %cl
|
||||
; X64-NEXT: # kill: def $edi killed $edi def $rdi
|
||||
; X64-NEXT: leal (%rdi,%rdi,8), %eax
|
||||
; X64-NEXT: leal (%rdi,%rax,4), %eax
|
||||
; X64-NEXT: # kill: def $al killed $al killed $eax
|
||||
; X64-NEXT: mulb %cl
|
||||
; X64-NEXT: retq
|
||||
%m = mul i8 %x, 37
|
||||
ret i8 %m
|
||||
|
@ -393,10 +399,10 @@ define i8 @test_mul_by_37(i8 %x) {
|
|||
define i8 @test_mul_by_41(i8 %x) {
|
||||
; X64-LABEL: test_mul_by_41:
|
||||
; X64: # %bb.0:
|
||||
; X64-NEXT: movl %edi, %eax
|
||||
; X64-NEXT: movb $41, %cl
|
||||
; X64-NEXT: # kill: def $edi killed $edi def $rdi
|
||||
; X64-NEXT: leal (%rdi,%rdi,4), %eax
|
||||
; X64-NEXT: leal (%rdi,%rax,8), %eax
|
||||
; X64-NEXT: # kill: def $al killed $al killed $eax
|
||||
; X64-NEXT: mulb %cl
|
||||
; X64-NEXT: retq
|
||||
%m = mul i8 %x, 41
|
||||
ret i8 %m
|
||||
|
@ -406,9 +412,10 @@ define i8 @test_mul_by_62(i8 %x) {
|
|||
; X64-LABEL: test_mul_by_62:
|
||||
; X64: # %bb.0:
|
||||
; X64-NEXT: movl %edi, %eax
|
||||
; X64-NEXT: movb $62, %cl
|
||||
; X64-NEXT: shll $6, %eax
|
||||
; X64-NEXT: subl %edi, %eax
|
||||
; X64-NEXT: subl %edi, %eax
|
||||
; X64-NEXT: # kill: def $al killed $al killed $eax
|
||||
; X64-NEXT: mulb %cl
|
||||
; X64-NEXT: retq
|
||||
%m = mul i8 %x, 62
|
||||
ret i8 %m
|
||||
|
@ -417,10 +424,11 @@ define i8 @test_mul_by_62(i8 %x) {
|
|||
define i8 @test_mul_by_66(i8 %x) {
|
||||
; X64-LABEL: test_mul_by_66:
|
||||
; X64: # %bb.0:
|
||||
; X64-NEXT: # kill: def $edi killed $edi def $rdi
|
||||
; X64-NEXT: movl %edi, %eax
|
||||
; X64-NEXT: movb $66, %cl
|
||||
; X64-NEXT: shll $6, %eax
|
||||
; X64-NEXT: leal (%rax,%rdi,2), %eax
|
||||
; X64-NEXT: # kill: def $al killed $al killed $eax
|
||||
; X64-NEXT: mulb %cl
|
||||
; X64-NEXT: retq
|
||||
%m = mul i8 %x, 66
|
||||
ret i8 %m
|
||||
|
@ -429,10 +437,10 @@ define i8 @test_mul_by_66(i8 %x) {
|
|||
define i8 @test_mul_by_73(i8 %x) {
|
||||
; X64-LABEL: test_mul_by_73:
|
||||
; X64: # %bb.0:
|
||||
; X64-NEXT: movl %edi, %eax
|
||||
; X64-NEXT: movb $73, %cl
|
||||
; X64-NEXT: # kill: def $edi killed $edi def $rdi
|
||||
; X64-NEXT: leal (%rdi,%rdi,8), %eax
|
||||
; X64-NEXT: leal (%rdi,%rax,8), %eax
|
||||
; X64-NEXT: # kill: def $al killed $al killed $eax
|
||||
; X64-NEXT: mulb %cl
|
||||
; X64-NEXT: retq
|
||||
%m = mul i8 %x, 73
|
||||
ret i8 %m
|
||||
|
@ -452,10 +460,11 @@ define i8 @test_mul_by_520(i8 %x) {
|
|||
define i8 @test_mul_by_neg10(i8 %x) {
|
||||
; X64-LABEL: test_mul_by_neg10:
|
||||
; X64: # %bb.0:
|
||||
; X64-NEXT: movl %edi, %eax
|
||||
; X64-NEXT: movb $-10, %cl
|
||||
; X64-NEXT: # kill: def $edi killed $edi def $rdi
|
||||
; X64-NEXT: addl %edi, %edi
|
||||
; X64-NEXT: leal (%rdi,%rdi,4), %eax
|
||||
; X64-NEXT: negl %eax
|
||||
; X64-NEXT: # kill: def $al killed $al killed $eax
|
||||
; X64-NEXT: mulb %cl
|
||||
; X64-NEXT: retq
|
||||
%m = mul i8 %x, -10
|
||||
ret i8 %m
|
||||
|
@ -464,10 +473,11 @@ define i8 @test_mul_by_neg10(i8 %x) {
|
|||
define i8 @test_mul_by_neg36(i8 %x) {
|
||||
; X64-LABEL: test_mul_by_neg36:
|
||||
; X64: # %bb.0:
|
||||
; X64-NEXT: movl %edi, %eax
|
||||
; X64-NEXT: movb $-36, %cl
|
||||
; X64-NEXT: # kill: def $edi killed $edi def $rdi
|
||||
; X64-NEXT: shll $2, %edi
|
||||
; X64-NEXT: leal (%rdi,%rdi,8), %eax
|
||||
; X64-NEXT: negl %eax
|
||||
; X64-NEXT: # kill: def $al killed $al killed $eax
|
||||
; X64-NEXT: mulb %cl
|
||||
; X64-NEXT: retq
|
||||
%m = mul i8 %x, -36
|
||||
ret i8 %m
|
||||
|
|
|
@ -6,14 +6,13 @@
|
|||
define i8 @foo(i8 %tmp325) {
|
||||
; CHECK-LABEL: foo:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
|
||||
; CHECK-NEXT: imull $111, %ecx, %eax
|
||||
; CHECK-NEXT: shrl $12, %eax
|
||||
; CHECK-NEXT: movb $37, %dl
|
||||
; CHECK-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
||||
; CHECK-NEXT: imull $111, %eax, %ecx
|
||||
; CHECK-NEXT: shrl $12, %ecx
|
||||
; CHECK-NEXT: leal (%ecx,%ecx,8), %edx
|
||||
; CHECK-NEXT: leal (%ecx,%edx,4), %ecx
|
||||
; CHECK-NEXT: subb %cl, %al
|
||||
; CHECK-NEXT: # kill: def $al killed $al killed $eax
|
||||
; CHECK-NEXT: mulb %dl
|
||||
; CHECK-NEXT: subb %al, %cl
|
||||
; CHECK-NEXT: movl %ecx, %eax
|
||||
; CHECK-NEXT: retl
|
||||
%t546 = urem i8 %tmp325, 37
|
||||
ret i8 %t546
|
||||
|
|
Loading…
Reference in New Issue