llvm-project/llvm/test/CodeGen/PowerPC/fast-isel-load-store.ll

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

221 lines
4.6 KiB
LLVM
Raw Normal View History

Use PIC relocation model as default for PowerPC64 ELF. Most of the PowerPC64 code generation for the ELF ABI is already PIC. There are four main exceptions: (1) Constant pointer arrays etc. should in writeable sections. (2) The TOC restoration NOP after a call is needed for all global symbols. While GNU ld has a workaround for questionable GCC self-calls, we trigger the checks for calls from COMDAT sections as they cross input sections and are therefore not considered self-calls. The current decision is questionable and suboptimal, but outside the scope of the change. (3) TLS access can not use the initial-exec model. (4) Jump tables should use relative addresses. Note that the current encoding doesn't work for the large code model, but it is more compact than the default for any non-trivial jump table. Improving this is again beyond the scope of this change. At least (1) and (3) are assumptions made in target-independent code and introducing additional hooks is a bit messy. Testing with clang shows that a -fPIC binary is 600KB smaller than the corresponding -fno-pic build. Separate testing from improved jump table encodings would explain only about 100KB or so. The rest is expected to be a result of more aggressive immediate forming for -fno-pic, where the -fPIC binary just uses TOC entries. This change brings the LLVM output in line with the GCC output, other PPC64 compilers like XLC on AIX are known to produce PIC by default as well. The relocation model can still be provided explicitly, i.e. when using MCJIT. One test case for case (1) is included, other test cases with relocation mode sensitive behavior are wired to static for now. They will be reviewed and adjusted separately. Differential Revision: https://reviews.llvm.org/D26566 llvm-svn: 289743
2016-12-15 08:01:53 +08:00
; RUN: llc -relocation-model=static < %s -O0 -verify-machineinstrs -fast-isel -fast-isel-abort=1 -mattr=-vsx -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7 | FileCheck %s --check-prefix=ELF64
; RUN: llc -relocation-model=static < %s -O0 -verify-machineinstrs -fast-isel -fast-isel-abort=1 -mattr=+vsx -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7 | FileCheck %s --check-prefix=VSX
; RUN: llc -relocation-model=static < %s -O0 -verify-machineinstrs -fast-isel -fast-isel-abort=1 -mattr=spe -mtriple=powerpc-unknown-linux-gnu -mcpu=e500 | FileCheck %s --check-prefix=SPE
; This test verifies that load/store instructions are properly generated,
; and that they pass MI verification.
@a = global i8 1, align 1
@b = global i16 2, align 2
@c = global i32 4, align 4
@d = global i64 8, align 8
@e = global float 1.25, align 4
@f = global double 3.5, align 8
%struct.s = type<{ i8, i32 }>
%struct.t = type<{ i8, i64 }>
@g = global %struct.s <{ i8 1, i32 2 }>, align 1
@h = global %struct.t <{ i8 1, i64 2 }>, align 1
@i = common global [8192 x i64] zeroinitializer, align 8
; load
define i8 @t1() nounwind {
; ELF64: t1
%1 = load i8, i8* @a, align 1
; ELF64: lbz
%2 = add nsw i8 %1, 1
; ELF64: addi
ret i8 %2
}
define i16 @t2() nounwind {
; ELF64: t2
%1 = load i16, i16* @b, align 2
; ELF64: lhz
%2 = add nsw i16 %1, 1
; ELF64: addi
ret i16 %2
}
define i32 @t3() nounwind {
; ELF64: t3
%1 = load i32, i32* @c, align 4
; ELF64: lwz
%2 = add nsw i32 %1, 1
; ELF64: addi
ret i32 %2
}
define i64 @t4() nounwind {
; ELF64: t4
%1 = load i64, i64* @d, align 4
; ELF64: ld
%2 = add nsw i64 %1, 1
; ELF64: addi
ret i64 %2
}
define float @t5() nounwind {
; ELF64: t5
; SPE: t5
%1 = load float, float* @e, align 4
; ELF64: lfs
; SPE: lwz
%2 = fadd float %1, 1.0
; ELF64: fadds
; SPE: efsadd
ret float %2
}
define double @t6() nounwind {
; ELF64: t6
; SPE: t6
%1 = load double, double* @f, align 8
; ELF64: lfd
; VSX: lxsdx
; SPE: evldd
%2 = fadd double %1, 1.0
; ELF64: fadd
; VSX: xsadddp
; SPE: efdadd
ret double %2
}
; store
define void @t7(i8 %v) nounwind {
; ELF64: t7
%1 = add nsw i8 %v, 1
store i8 %1, i8* @a, align 1
; ELF64: addis
; ELF64: addi
; ELF64: addi
; ELF64: stb
ret void
}
define void @t8(i16 %v) nounwind {
; ELF64: t8
%1 = add nsw i16 %v, 1
store i16 %1, i16* @b, align 2
; ELF64: addis
; ELF64: addi
; ELF64: addi
; ELF64: sth
ret void
}
define void @t9(i32 %v) nounwind {
; ELF64: t9
%1 = add nsw i32 %v, 1
store i32 %1, i32* @c, align 4
; ELF64: addis
; ELF64: addi
; ELF64: addi
; ELF64: stw
ret void
}
define void @t10(i64 %v) nounwind {
; ELF64: t10
%1 = add nsw i64 %v, 1
store i64 %1, i64* @d, align 4
; ELF64: addis
; ELF64: addi
; ELF64: addi
; ELF64: std
ret void
}
define void @t11(float %v) nounwind {
; ELF64: t11
; SPE: t11
%1 = fadd float %v, 1.0
store float %1, float* @e, align 4
; ELF64: fadds
; ELF64: stfs
; SPE: efsadd
; SPE: stw
ret void
}
define void @t12(double %v) nounwind {
; ELF64: t12
; SPE: t12
%1 = fadd double %v, 1.0
store double %1, double* @f, align 8
; ELF64: fadd
; ELF64: stfd
; VSX: xsadddp
; VSX: stxsdx
; SPE: efdadd
; SPE: evstdd
ret void
}
;; lwa requires an offset divisible by 4, so we need lwax here.
define i64 @t13() nounwind {
; ELF64: t13
%1 = load i32, i32* getelementptr inbounds (%struct.s, %struct.s* @g, i32 0, i32 1), align 1
%2 = sext i32 %1 to i64
; ELF64: li
; ELF64: lwax
%3 = add nsw i64 %2, 1
; ELF64: addi
ret i64 %3
}
;; ld requires an offset divisible by 4, so we need ldx here.
define i64 @t14() nounwind {
; ELF64: t14
%1 = load i64, i64* getelementptr inbounds (%struct.t, %struct.t* @h, i32 0, i32 1), align 1
; ELF64: li
; ELF64: ldx
%2 = add nsw i64 %1, 1
; ELF64: addi
ret i64 %2
}
;; std requires an offset divisible by 4, so we need stdx here.
define void @t15(i64 %v) nounwind {
; ELF64: t15
%1 = add nsw i64 %v, 1
store i64 %1, i64* getelementptr inbounds (%struct.t, %struct.t* @h, i32 0, i32 1), align 1
; ELF64: addis
; ELF64: addi
; ELF64: addi
; ELF64: li
; ELF64: stdx
ret void
}
;; ld requires an offset that fits in 16 bits, so we need ldx here.
define i64 @t16() nounwind {
; ELF64: t16
%1 = load i64, i64* getelementptr inbounds ([8192 x i64], [8192 x i64]* @i, i32 0, i64 5000), align 8
; ELF64: lis
; ELF64: ori
; ELF64: ldx
%2 = add nsw i64 %1, 1
; ELF64: addi
ret i64 %2
}
;; std requires an offset that fits in 16 bits, so we need stdx here.
define void @t17(i64 %v) nounwind {
; ELF64: t17
%1 = add nsw i64 %v, 1
store i64 %1, i64* getelementptr inbounds ([8192 x i64], [8192 x i64]* @i, i32 0, i64 5000), align 8
; ELF64: addis
; ELF64: addi
; ELF64: addi
; ELF64: lis
; ELF64: ori
; ELF64: stdx
ret void
}