2012-11-30 21:05:44 +08:00
|
|
|
; RUN: llc < %s -mattr=+altivec --enable-unsafe-fp-math | FileCheck %s
|
2006-04-13 01:36:04 +08:00
|
|
|
|
2008-03-25 12:26:08 +08:00
|
|
|
define void @VXOR(<4 x float>* %P1, <4 x i32>* %P2, <4 x float>* %P3) {
|
|
|
|
%tmp = load <4 x float>* %P3 ; <<4 x float>> [#uses=1]
|
|
|
|
%tmp3 = load <4 x float>* %P1 ; <<4 x float>> [#uses=1]
|
2009-06-05 06:49:04 +08:00
|
|
|
%tmp4 = fmul <4 x float> %tmp, %tmp3 ; <<4 x float>> [#uses=1]
|
2006-04-13 00:57:39 +08:00
|
|
|
store <4 x float> %tmp4, <4 x float>* %P3
|
2006-04-13 00:49:16 +08:00
|
|
|
store <4 x float> zeroinitializer, <4 x float>* %P1
|
2008-03-25 12:26:08 +08:00
|
|
|
store <4 x i32> zeroinitializer, <4 x i32>* %P2
|
2006-04-13 00:49:16 +08:00
|
|
|
ret void
|
|
|
|
}
|
2012-11-30 21:05:44 +08:00
|
|
|
; The fmul will spill a vspltisw to create a -0.0 vector used as the addend
|
|
|
|
; to vmaddfp (so it would IEEE compliant with zero sign propagation).
|
|
|
|
; CHECK: @VXOR
|
|
|
|
; CHECK: vsplti
|
|
|
|
; CHECK: vxor
|
2006-04-13 00:49:16 +08:00
|
|
|
|
2008-03-25 12:26:08 +08:00
|
|
|
define void @VSPLTI(<4 x i32>* %P2, <8 x i16>* %P3) {
|
|
|
|
store <4 x i32> bitcast (<16 x i8> < i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1 > to <4 x i32>), <4 x i32>* %P2
|
|
|
|
store <8 x i16> < i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1 >, <8 x i16>* %P3
|
2006-04-13 01:36:04 +08:00
|
|
|
ret void
|
|
|
|
}
|
2012-11-30 21:05:44 +08:00
|
|
|
; CHECK: @VSPLTI
|
|
|
|
; CHECK: vsplti
|