From 812410f2d18e52df801a8fcac20848dee9052021 Mon Sep 17 00:00:00 2001 From: Adhemerval Zanella Date: Fri, 30 Nov 2012 13:05:44 +0000 Subject: [PATCH] This patch fixes the Altivec addend construction for the fused multiply-add instruction (vmaddfp) to conform with IEEE to ensure the sign of a zero result when resulting product is -0.0. The -0.0 vector addend to vmaddfp is generated by a creating a vector with full bits sets and then shifting each elements by 31-bits to the left, resulting in a vector of 0x80000000 (or -0.0 as float). The 'buildvec_canonicalize.ll' was adjusted to reflect this change and the 'vec_mul.ll' was complemented with the float vector multiplication test. llvm-svn: 168998 --- llvm/lib/Target/PowerPC/PPCInstrAltivec.td | 12 +++++---- .../CodeGen/PowerPC/buildvec_canonicalize.ll | 16 +++++------ llvm/test/CodeGen/PowerPC/vec_mul.ll | 27 +++++++++++++++++-- 3 files changed, 40 insertions(+), 15 deletions(-) diff --git a/llvm/lib/Target/PowerPC/PPCInstrAltivec.td b/llvm/lib/Target/PowerPC/PPCInstrAltivec.td index 87758e90fbd5..0cf28ae4b5c3 100644 --- a/llvm/lib/Target/PowerPC/PPCInstrAltivec.td +++ b/llvm/lib/Target/PowerPC/PPCInstrAltivec.td @@ -158,10 +158,6 @@ def vecspltisw : PatLeaf<(build_vector), [{ return PPC::get_VSPLTI_elt(N, 4, *CurDAG).getNode() != 0; }], VSPLTISW_get_imm>; -def V_immneg0 : PatLeaf<(build_vector), [{ - return PPC::isAllNegativeZeroVector(N); -}]>; - //===----------------------------------------------------------------------===// // Helpers for defining instructions that directly correspond to intrinsics. @@ -585,7 +581,12 @@ def VCMPGTUWo : VCMPo<646, "vcmpgtuw. $vD, $vA, $vB", v4i32>; def V_SET0 : VXForm_setzero<1220, (outs VRRC:$vD), (ins), "vxor $vD, $vD, $vD", VecFP, [(set VRRC:$vD, (v4i32 immAllZerosV))]>; +let IMM=-1 in { +def V_SETALLONES : VXForm_3<908, (outs VRRC:$vD), (ins), + "vspltisw $vD, -1", VecFP, + [(set VRRC:$vD, (v4i32 immAllOnesV))]>; } +} // VALU Operations. //===----------------------------------------------------------------------===// // Additional Altivec Patterns @@ -672,7 +673,8 @@ def : Pat<(v4i32 (and VRRC:$A, (vnot_ppc VRRC:$B))), (VANDC VRRC:$A, VRRC:$B)>; def : Pat<(fmul VRRC:$vA, VRRC:$vB), - (VMADDFP VRRC:$vA, VRRC:$vB, (v4i32 (V_SET0)))>; + (VMADDFP VRRC:$vA, VRRC:$vB, + (v4i32 (VSLW (V_SETALLONES), (V_SETALLONES))))>; // Fused multiply add and multiply sub for packed float. These are represented // separately from the real instructions above, for operations that must have diff --git a/llvm/test/CodeGen/PowerPC/buildvec_canonicalize.ll b/llvm/test/CodeGen/PowerPC/buildvec_canonicalize.ll index 0454c584bcfe..514ccdd6bd00 100644 --- a/llvm/test/CodeGen/PowerPC/buildvec_canonicalize.ll +++ b/llvm/test/CodeGen/PowerPC/buildvec_canonicalize.ll @@ -1,10 +1,4 @@ -; There should be exactly one vxor here. -; RUN: llc < %s -march=ppc32 -mcpu=g5 --enable-unsafe-fp-math | \ -; RUN: grep vxor | count 1 - -; There should be exactly one vsplti here. -; RUN: llc < %s -march=ppc32 -mcpu=g5 --enable-unsafe-fp-math | \ -; RUN: grep vsplti | count 1 +; RUN: llc < %s -mattr=+altivec --enable-unsafe-fp-math | FileCheck %s define void @VXOR(<4 x float>* %P1, <4 x i32>* %P2, <4 x float>* %P3) { %tmp = load <4 x float>* %P3 ; <<4 x float>> [#uses=1] @@ -15,10 +9,16 @@ define void @VXOR(<4 x float>* %P1, <4 x i32>* %P2, <4 x float>* %P3) { store <4 x i32> zeroinitializer, <4 x i32>* %P2 ret void } +; The fmul will spill a vspltisw to create a -0.0 vector used as the addend +; to vmaddfp (so it would IEEE compliant with zero sign propagation). +; CHECK: @VXOR +; CHECK: vsplti +; CHECK: vxor define void @VSPLTI(<4 x i32>* %P2, <8 x i16>* %P3) { store <4 x i32> bitcast (<16 x i8> < i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1 > to <4 x i32>), <4 x i32>* %P2 store <8 x i16> < i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1 >, <8 x i16>* %P3 ret void } - +; CHECK: @VSPLTI +; CHECK: vsplti diff --git a/llvm/test/CodeGen/PowerPC/vec_mul.ll b/llvm/test/CodeGen/PowerPC/vec_mul.ll index 80f4de4a1728..8fb435830aeb 100644 --- a/llvm/test/CodeGen/PowerPC/vec_mul.ll +++ b/llvm/test/CodeGen/PowerPC/vec_mul.ll @@ -1,5 +1,4 @@ -; RUN: llc < %s -march=ppc32 -mcpu=g5 | not grep mullw -; RUN: llc < %s -march=ppc32 -mcpu=g5 | grep vmsumuhm +; RUN: llc < %s -mattr=+altivec | FileCheck %s define <4 x i32> @test_v4i32(<4 x i32>* %X, <4 x i32>* %Y) { %tmp = load <4 x i32>* %X ; <<4 x i32>> [#uses=1] @@ -7,6 +6,9 @@ define <4 x i32> @test_v4i32(<4 x i32>* %X, <4 x i32>* %Y) { %tmp3 = mul <4 x i32> %tmp, %tmp2 ; <<4 x i32>> [#uses=1] ret <4 x i32> %tmp3 } +; CHECK: test_v4i32: +; CHECK: vmsumuhm +; CHECK-NOT: mullw define <8 x i16> @test_v8i16(<8 x i16>* %X, <8 x i16>* %Y) { %tmp = load <8 x i16>* %X ; <<8 x i16>> [#uses=1] @@ -14,6 +16,9 @@ define <8 x i16> @test_v8i16(<8 x i16>* %X, <8 x i16>* %Y) { %tmp3 = mul <8 x i16> %tmp, %tmp2 ; <<8 x i16>> [#uses=1] ret <8 x i16> %tmp3 } +; CHECK: test_v8i16: +; CHECK: vmladduhm +; CHECK-NOT: mullw define <16 x i8> @test_v16i8(<16 x i8>* %X, <16 x i8>* %Y) { %tmp = load <16 x i8>* %X ; <<16 x i8>> [#uses=1] @@ -21,3 +26,21 @@ define <16 x i8> @test_v16i8(<16 x i8>* %X, <16 x i8>* %Y) { %tmp3 = mul <16 x i8> %tmp, %tmp2 ; <<16 x i8>> [#uses=1] ret <16 x i8> %tmp3 } +; CHECK: test_v16i8: +; CHECK: vmuloub +; CHECK: vmuleub +; CHECK-NOT: mullw + +define <4 x float> @test_float(<4 x float>* %X, <4 x float>* %Y) { + %tmp = load <4 x float>* %X + %tmp2 = load <4 x float>* %Y + %tmp3 = fmul <4 x float> %tmp, %tmp2 + ret <4 x float> %tmp3 +} +; Check the creation of a negative zero float vector by creating a vector of +; all bits set and shifting it 31 bits to left, resulting a an vector of +; 4 x 0x80000000 (-0.0 as float). +; CHECK: test_float: +; CHECK: vspltisw [[ZNEG:[0-9]+]], -1 +; CHECK: vslw {{[0-9]+}}, [[ZNEG]], [[ZNEG]] +; CHECK: vmaddfp