[AArch64] Redundant masks in downcast long multiply

Adds patterns to catch masks preceeding a long multiply,
and generating a single umull/smull instruction instead.

Differential revision: https://reviews.llvm.org/D89956
This commit is contained in:
Nicholas Guy 2020-10-22 13:41:05 +01:00
parent cfc32267e2
commit 54d8627852
2 changed files with 86 additions and 0 deletions

View File

@ -1475,8 +1475,16 @@ def SMSUBLrrr : WideMulAccum<1, 0b001, "smsubl", sub, sext>;
def UMADDLrrr : WideMulAccum<0, 0b101, "umaddl", add, zext>;
def UMSUBLrrr : WideMulAccum<1, 0b101, "umsubl", sub, zext>;
def : Pat<(i64 (mul (sext_inreg GPR64:$Rn, i32), (sext_inreg GPR64:$Rm, i32))),
(SMADDLrrr (EXTRACT_SUBREG $Rn, sub_32), (EXTRACT_SUBREG $Rm, sub_32), XZR)>;
def : Pat<(i64 (mul (sext_inreg GPR64:$Rn, i32), (sext GPR32:$Rm))),
(SMADDLrrr (EXTRACT_SUBREG $Rn, sub_32), $Rm, XZR)>;
def : Pat<(i64 (mul (sext GPR32:$Rn), (sext GPR32:$Rm))),
(SMADDLrrr GPR32:$Rn, GPR32:$Rm, XZR)>;
def : Pat<(i64 (mul (and GPR64:$Rn, 0xFFFFFFFF), (and GPR64:$Rm, 0xFFFFFFFF))),
(UMADDLrrr (EXTRACT_SUBREG $Rn, sub_32), (EXTRACT_SUBREG $Rm, sub_32), XZR)>;
def : Pat<(i64 (mul (and GPR64:$Rn, 0xFFFFFFFF), (zext GPR32:$Rm))),
(UMADDLrrr (EXTRACT_SUBREG $Rn, sub_32), $Rm, XZR)>;
def : Pat<(i64 (mul (zext GPR32:$Rn), (zext GPR32:$Rm))),
(UMADDLrrr GPR32:$Rn, GPR32:$Rm, XZR)>;

View File

@ -0,0 +1,78 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=aarch64-none-linux-gnu < %s -o -| FileCheck %s
define i64 @umull(i64 %x0, i64 %x1) {
; CHECK-LABEL: umull:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: umull x0, w1, w0
; CHECK-NEXT: ret
entry:
%and = and i64 %x0, 4294967295
%and1 = and i64 %x1, 4294967295
%mul = mul nuw i64 %and1, %and
ret i64 %mul
}
define i64 @umull2(i64 %x, i32 %y) {
; CHECK-LABEL: umull2:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: umull x0, w0, w1
; CHECK-NEXT: ret
entry:
%and = and i64 %x, 4294967295
%conv = zext i32 %y to i64
%mul = mul nuw nsw i64 %and, %conv
ret i64 %mul
}
define i64 @umull2_commuted(i64 %x, i32 %y) {
; CHECK-LABEL: umull2_commuted:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: umull x0, w0, w1
; CHECK-NEXT: ret
entry:
%and = and i64 %x, 4294967295
%conv = zext i32 %y to i64
%mul = mul nuw nsw i64 %conv, %and
ret i64 %mul
}
define i64 @smull(i64 %x0, i64 %x1) {
; CHECK-LABEL: smull:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: smull x0, w1, w0
; CHECK-NEXT: ret
entry:
%sext = shl i64 %x0, 32
%conv1 = ashr exact i64 %sext, 32
%sext4 = shl i64 %x1, 32
%conv3 = ashr exact i64 %sext4, 32
%mul = mul nsw i64 %conv3, %conv1
ret i64 %mul
}
define i64 @smull2(i64 %x, i32 %y) {
; CHECK-LABEL: smull2:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: smull x0, w0, w1
; CHECK-NEXT: ret
entry:
%shl = shl i64 %x, 32
%shr = ashr exact i64 %shl, 32
%conv = sext i32 %y to i64
%mul = mul nsw i64 %shr, %conv
ret i64 %mul
}
define i64 @smull2_commuted(i64 %x, i32 %y) {
; CHECK-LABEL: smull2_commuted:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: smull x0, w0, w1
; CHECK-NEXT: ret
entry:
%shl = shl i64 %x, 32
%shr = ashr exact i64 %shl, 32
%conv = sext i32 %y to i64
%mul = mul nsw i64 %conv, %shr
ret i64 %mul
}