[AArch64] Disable LDP/STP for quads

Disable LDP/STP for quads on Exynos M1 as they are not as efficient as pairs
of regular LDR/STR.

Patch by Abderrazek Zaafrani <a.zaafrani@samsung.com>.

llvm-svn: 266223
This commit is contained in:
Evandro Menezes 2016-04-13 18:31:45 +00:00
parent 5927257206
commit 8d53f88162
3 changed files with 93 additions and 0 deletions

View File

@ -1409,6 +1409,20 @@ bool AArch64InstrInfo::isCandidateToMergeOrPair(MachineInstr *MI) const {
if (isLdStPairSuppressed(MI))
return false;
// Do not pair quad ld/st for Exynos.
if (Subtarget.isExynosM1()) {
switch (MI->getOpcode()) {
default:
break;
case AArch64::LDURQi:
case AArch64::STURQi:
case AArch64::LDRQui:
case AArch64::STRQui:
return false;
}
}
return true;
}

View File

@ -1,5 +1,6 @@
; REQUIRES: asserts
; RUN: llc < %s -mtriple=arm64-linux-gnu -mcpu=cortex-a57 -verify-misched -debug-only=misched -o - 2>&1 > /dev/null | FileCheck %s
; RUN: llc < %s -mtriple=arm64-linux-gnu -mcpu=exynos-m1 -verify-misched -debug-only=misched -o - 2>&1 > /dev/null | FileCheck --check-prefix=EXYNOS %s
; Test ldr clustering.
; CHECK: ********** MI Scheduling **********
@ -7,6 +8,11 @@
; CHECK: Cluster loads SU(1) - SU(2)
; CHECK: SU(1): %vreg{{[0-9]+}}<def> = LDRWui
; CHECK: SU(2): %vreg{{[0-9]+}}<def> = LDRWui
; EXYNOS: ********** MI Scheduling **********
; EXYNOS-LABEL: ldr_int:BB#0
; EXYNOS: Cluster loads SU(1) - SU(2)
; EXYNOS: SU(1): %vreg{{[0-9]+}}<def> = LDRWui
; EXYNOS: SU(2): %vreg{{[0-9]+}}<def> = LDRWui
define i32 @ldr_int(i32* %a) nounwind {
%p1 = getelementptr inbounds i32, i32* %a, i32 1
%tmp1 = load i32, i32* %p1, align 2
@ -22,6 +28,11 @@ define i32 @ldr_int(i32* %a) nounwind {
; CHECK: Cluster loads SU(1) - SU(2)
; CHECK: SU(1): %vreg{{[0-9]+}}<def> = LDRSWui
; CHECK: SU(2): %vreg{{[0-9]+}}<def> = LDRSWui
; EXYNOS: ********** MI Scheduling **********
; EXYNOS-LABEL: ldp_sext_int:BB#0
; EXYNOS: Cluster loads SU(1) - SU(2)
; EXYNOS: SU(1): %vreg{{[0-9]+}}<def> = LDRSWui
; EXYNOS: SU(2): %vreg{{[0-9]+}}<def> = LDRSWui
define i64 @ldp_sext_int(i32* %p) nounwind {
%tmp = load i32, i32* %p, align 4
%add.ptr = getelementptr inbounds i32, i32* %p, i64 1
@ -38,6 +49,11 @@ define i64 @ldp_sext_int(i32* %p) nounwind {
; CHECK: Cluster loads SU(2) - SU(1)
; CHECK: SU(1): %vreg{{[0-9]+}}<def> = LDURWi
; CHECK: SU(2): %vreg{{[0-9]+}}<def> = LDURWi
; EXYNOS: ********** MI Scheduling **********
; EXYNOS-LABEL: ldur_int:BB#0
; EXYNOS: Cluster loads SU(2) - SU(1)
; EXYNOS: SU(1): %vreg{{[0-9]+}}<def> = LDURWi
; EXYNOS: SU(2): %vreg{{[0-9]+}}<def> = LDURWi
define i32 @ldur_int(i32* %a) nounwind {
%p1 = getelementptr inbounds i32, i32* %a, i32 -1
%tmp1 = load i32, i32* %p1, align 2
@ -53,6 +69,11 @@ define i32 @ldur_int(i32* %a) nounwind {
; CHECK: Cluster loads SU(3) - SU(4)
; CHECK: SU(3): %vreg{{[0-9]+}}<def> = LDRSWui
; CHECK: SU(4): %vreg{{[0-9]+}}:sub_32<def,read-undef> = LDRWui
; EXYNOS: ********** MI Scheduling **********
; EXYNOS-LABEL: ldp_half_sext_zext_int:BB#0
; EXYNOS: Cluster loads SU(3) - SU(4)
; EXYNOS: SU(3): %vreg{{[0-9]+}}<def> = LDRSWui
; EXYNOS: SU(4): %vreg{{[0-9]+}}:sub_32<def,read-undef> = LDRWui
define i64 @ldp_half_sext_zext_int(i64* %q, i32* %p) nounwind {
%tmp0 = load i64, i64* %q, align 4
%tmp = load i32, i32* %p, align 4
@ -71,6 +92,11 @@ define i64 @ldp_half_sext_zext_int(i64* %q, i32* %p) nounwind {
; CHECK: Cluster loads SU(3) - SU(4)
; CHECK: SU(3): %vreg{{[0-9]+}}:sub_32<def,read-undef> = LDRWui
; CHECK: SU(4): %vreg{{[0-9]+}}<def> = LDRSWui
; EXYNOS: ********** MI Scheduling **********
; EXYNOS-LABEL: ldp_half_zext_sext_int:BB#0
; EXYNOS: Cluster loads SU(3) - SU(4)
; EXYNOS: SU(3): %vreg{{[0-9]+}}:sub_32<def,read-undef> = LDRWui
; EXYNOS: SU(4): %vreg{{[0-9]+}}<def> = LDRSWui
define i64 @ldp_half_zext_sext_int(i64* %q, i32* %p) nounwind {
%tmp0 = load i64, i64* %q, align 4
%tmp = load i32, i32* %p, align 4
@ -89,6 +115,11 @@ define i64 @ldp_half_zext_sext_int(i64* %q, i32* %p) nounwind {
; CHECK-NOT: Cluster loads
; CHECK: SU(1): %vreg{{[0-9]+}}<def> = LDRWui
; CHECK: SU(2): %vreg{{[0-9]+}}<def> = LDRWui
; EXYNOS: ********** MI Scheduling **********
; EXYNOS-LABEL: ldr_int_volatile:BB#0
; EXYNOS-NOT: Cluster loads
; EXYNOS: SU(1): %vreg{{[0-9]+}}<def> = LDRWui
; EXYNOS: SU(2): %vreg{{[0-9]+}}<def> = LDRWui
define i32 @ldr_int_volatile(i32* %a) nounwind {
%p1 = getelementptr inbounds i32, i32* %a, i32 1
%tmp1 = load volatile i32, i32* %p1, align 2
@ -97,3 +128,23 @@ define i32 @ldr_int_volatile(i32* %a) nounwind {
%tmp3 = add i32 %tmp1, %tmp2
ret i32 %tmp3
}
; Test ldq clustering (no clustering for Exynos).
; CHECK: ********** MI Scheduling **********
; CHECK-LABEL: ldq_cluster:BB#0
; CHECK: Cluster loads SU(1) - SU(3)
; CHECK: SU(1): %vreg{{[0-9]+}}<def> = LDRQui
; CHECK: SU(3): %vreg{{[0-9]+}}<def> = LDRQui
; EXYNOS: ********** MI Scheduling **********
; EXYNOS-LABEL: ldq_cluster:BB#0
; EXYNOS-NOT: Cluster loads
define <2 x i64> @ldq_cluster(i64* %p) {
%a1 = bitcast i64* %p to <2 x i64>*
%tmp1 = load <2 x i64>, < 2 x i64>* %a1, align 8
%add.ptr2 = getelementptr inbounds i64, i64* %p, i64 2
%a2 = bitcast i64* %add.ptr2 to <2 x i64>*
%tmp2 = add nsw <2 x i64> %tmp1, %tmp1
%tmp3 = load <2 x i64>, <2 x i64>* %a2, align 8
%res = mul nsw <2 x i64> %tmp2, %tmp3
ret <2 x i64> %res
}

View File

@ -0,0 +1,28 @@
; RUN: llc < %s -march=aarch64 -mcpu=exynos-m1 -verify-machineinstrs -asm-verbose=false | FileCheck %s
; CHECK-LABEL: test_exynos_nopair_st
; CHECK: str
; CHECK: stur
; CHECK-NOT: stp
define void @test_exynos_nopair_st(double* %ptr, <2 x double> %v1, <2 x double> %v2) {
%tmp1 = bitcast double* %ptr to <2 x double>*
store <2 x double> %v2, <2 x double>* %tmp1, align 16
%add.ptr = getelementptr inbounds double, double* %ptr, i64 -2
%tmp = bitcast double* %add.ptr to <2 x double>*
store <2 x double> %v1, <2 x double>* %tmp, align 16
ret void
}
; CHECK-LABEL: test_exynos_nopair_ld
; CHECK: ldr
; CHECK: ldr
; CHECK-NOT: ldp
define <2 x i64> @test_exynos_nopair_ld(i64* %p) {
%a1 = bitcast i64* %p to <2 x i64>*
%tmp1 = load <2 x i64>, < 2 x i64>* %a1, align 8
%add.ptr2 = getelementptr inbounds i64, i64* %p, i64 2
%a2 = bitcast i64* %add.ptr2 to <2 x i64>*
%tmp2 = load <2 x i64>, <2 x i64>* %a2, align 8
%add = add nsw <2 x i64> %tmp1, %tmp2
ret <2 x i64> %add
}