llvm-project/llvm/test/CodeGen/PowerPC/ppcf128-endian.ll

157 lines
3.5 KiB
LLVM
Raw Normal View History

[PowerPC 1/4] Little-endian adjustments for VSX loads/stores This patch addresses the inherent big-endian bias in the lxvd2x, lxvw4x, stxvd2x, and stxvw4x instructions. These instructions load vector elements into registers left-to-right (with the first element loaded into the high-order bits of the register), regardless of the endian setting of the processor. However, these are the only vector memory instructions that permit unaligned storage accesses, so we want to use them for little-endian. To make this work, a lxvd2x or lxvw4x is replaced with an lxvd2x followed by an xxswapd, which swaps the doublewords. This works for lxvw4x as well as lxvd2x, because for lxvw4x on an LE system the vector elements are in LE order (right-to-left) within each doubleword. (Thus after lxvw2x of a <4 x float> the elements will appear as 1, 0, 3, 2. Following the swap, they will appear as 3, 2, 0, 1, as desired.) For stores, an stxvd2x or stxvw4x is replaced with an stxvd2x preceded by an xxswapd. Introduction of extra swap instructions provides correctness, but obviously is not ideal from a performance perspective. Future patches will address this with optimizations to remove most of the introduced swaps, which have proven effective in other implementations. The introduction of the swaps is performed during lowering of LOAD, STORE, INTRINSIC_W_CHAIN, and INTRINSIC_VOID operations. The latter are used to translate intrinsics that specify the VSX loads and stores directly into equivalent sequences for little endian. Thus code that uses vec_vsx_ld and vec_vsx_st does not have to be modified to be ported from BE to LE. We introduce new PPCISD opcodes for LXVD2X, STXVD2X, and XXSWAPD for use during this lowering step. In PPCInstrVSX.td, we add new SDType and SDNode definitions for these (PPClxvd2x, PPCstxvd2x, PPCxxswapd). These are recognized during instruction selection and mapped to the correct instructions. Several tests that were written to use -mcpu=pwr7 or pwr8 are modified to disable VSX on LE variants because code generation changes with this and subsequent patches in this set. I chose to include all of these in the first patch than try to rigorously sort out which tests were broken by one or another of the patches. Sorry about that. The new test vsx-ldst-builtin-le.ll, and the changes to vsx-ldst.ll, are disabled until LE support is enabled because of breakages that occur as noted in those tests. They are re-enabled in patch 4/4. llvm-svn: 223783
2014-12-10 00:35:51 +08:00
; RUN: llc -mcpu=pwr7 -mattr=+altivec -mattr=-vsx < %s | FileCheck %s
2014-07-03 23:06:47 +08:00
target datalayout = "e-m:e-i64:64-n32:64"
target triple = "powerpc64le-unknown-linux-gnu"
@g = common global ppc_fp128 0xM00000000000000000000000000000000, align 16
define void @callee(ppc_fp128 %x) {
entry:
%x.addr = alloca ppc_fp128, align 16
store ppc_fp128 %x, ppc_fp128* %x.addr, align 16
%0 = load ppc_fp128, ppc_fp128* %x.addr, align 16
2014-07-03 23:06:47 +08:00
store ppc_fp128 %0, ppc_fp128* @g, align 16
ret void
}
; CHECK: @callee
; CHECK: ld [[REG:[0-9]+]], .LC
; CHECK: stfd 2, 8([[REG]])
; CHECK: stfd 1, 0([[REG]])
; CHECK: blr
define void @caller() {
entry:
%0 = load ppc_fp128, ppc_fp128* @g, align 16
2014-07-03 23:06:47 +08:00
call void @test(ppc_fp128 %0)
ret void
}
; CHECK: @caller
; CHECK: ld [[REG:[0-9]+]], .LC
; CHECK: lfd 2, 8([[REG]])
; CHECK: lfd 1, 0([[REG]])
; CHECK: bl test
declare void @test(ppc_fp128)
define void @caller_const() {
entry:
call void @test(ppc_fp128 0xM3FF00000000000000000000000000000)
ret void
}
; CHECK: .LCPI[[LC:[0-9]+]]_0:
; CHECK: .long 1065353216
; CHECK: .LCPI[[LC]]_1:
; CHECK: .long 0
; CHECK: @caller_const
; CHECK: addi [[REG0:[0-9]+]], {{[0-9]+}}, .LCPI[[LC]]_0
; CHECK: addi [[REG1:[0-9]+]], {{[0-9]+}}, .LCPI[[LC]]_1
; CHECK: lfs 1, 0([[REG0]])
; CHECK: lfs 2, 0([[REG1]])
; CHECK: bl test
define ppc_fp128 @result() {
entry:
%0 = load ppc_fp128, ppc_fp128* @g, align 16
2014-07-03 23:06:47 +08:00
ret ppc_fp128 %0
}
; CHECK: @result
; CHECK: ld [[REG:[0-9]+]], .LC
; CHECK: lfd 1, 0([[REG]])
; CHECK: lfd 2, 8([[REG]])
; CHECK: blr
define void @use_result() {
entry:
%call = tail call ppc_fp128 @test_result() #3
store ppc_fp128 %call, ppc_fp128* @g, align 16
ret void
}
; CHECK: @use_result
; CHECK: bl test_result
; CHECK: ld [[REG:[0-9]+]], .LC
; CHECK: stfd 2, 8([[REG]])
; CHECK: stfd 1, 0([[REG]])
; CHECK: blr
declare ppc_fp128 @test_result()
define void @caller_result() {
entry:
%call = tail call ppc_fp128 @test_result()
tail call void @test(ppc_fp128 %call)
ret void
}
; CHECK: @caller_result
; CHECK: bl test_result
; CHECK-NEXT: nop
; CHECK-NEXT: bl test
; CHECK-NEXT: nop
define i128 @convert_from(ppc_fp128 %x) {
entry:
%0 = bitcast ppc_fp128 %x to i128
ret i128 %0
}
; CHECK: @convert_from
; CHECK: stfd 1, [[OFF1:.*]](1)
; CHECK: stfd 2, [[OFF2:.*]](1)
; CHECK: ld 3, [[OFF1]](1)
; CHECK: ld 4, [[OFF2]](1)
; CHECK: blr
define ppc_fp128 @convert_to(i128 %x) {
entry:
%0 = bitcast i128 %x to ppc_fp128
ret ppc_fp128 %0
}
; CHECK: convert_to:
2014-07-03 23:06:47 +08:00
; CHECK: std 3, [[OFF1:.*]](1)
; CHECK: std 4, [[OFF2:.*]](1)
; CHECK: ori 2, 2, 0
2014-07-03 23:06:47 +08:00
; CHECK: lfd 1, [[OFF1]](1)
; CHECK: lfd 2, [[OFF2]](1)
; CHECK: blr
define ppc_fp128 @convert_to2(i128 %x) {
entry:
%shl = shl i128 %x, 1
%0 = bitcast i128 %shl to ppc_fp128
ret ppc_fp128 %0
}
; CHECK: convert_to2:
2014-07-03 23:06:47 +08:00
; CHECK: std 3, [[OFF1:.*]](1)
; CHECK: std 5, [[OFF2:.*]](1)
; CHECK: ori 2, 2, 0
2014-07-03 23:06:47 +08:00
; CHECK: lfd 1, [[OFF1]](1)
; CHECK: lfd 2, [[OFF2]](1)
; CHECK: blr
define double @convert_vector(<4 x i32> %x) {
entry:
%cast = bitcast <4 x i32> %x to ppc_fp128
%conv = fptrunc ppc_fp128 %cast to double
ret double %conv
}
; CHECK: @convert_vector
; CHECK: addi [[REG:[0-9]+]], 1, [[OFF:.*]]
; CHECK: stvx 2, 0, [[REG]]
; CHECK: lfd 1, [[OFF]](1)
; CHECK: blr
declare void @llvm.va_start(i8*)
define double @vararg(i32 %a, ...) {
entry:
%va = alloca i8*, align 8
%va1 = bitcast i8** %va to i8*
call void @llvm.va_start(i8* %va1)
%arg = va_arg i8** %va, ppc_fp128
%conv = fptrunc ppc_fp128 %arg to double
ret double %conv
}
; CHECK: @vararg
; CHECK: lfd 1, 0({{[0-9]+}})
; CHECK: blr