forked from OSchip/llvm-project
[WebAssembly] Process p2align operands for SIMD loads and stores
Reviewers: aheejin, dschuff Subscribers: sbc100, jgravelle-google, sunfish, llvm-commits Differential Revision: https://reviews.llvm.org/D53886 llvm-svn: 345795
This commit is contained in:
parent
6ff31fe34d
commit
b61232eacd
|
@ -89,6 +89,12 @@ bool WebAssemblySetP2AlignOperands::runOnMachineFunction(MachineFunction &MF) {
|
|||
case WebAssembly::LOAD_I64:
|
||||
case WebAssembly::LOAD_F32:
|
||||
case WebAssembly::LOAD_F64:
|
||||
case WebAssembly::LOAD_v16i8:
|
||||
case WebAssembly::LOAD_v8i16:
|
||||
case WebAssembly::LOAD_v4i32:
|
||||
case WebAssembly::LOAD_v2i64:
|
||||
case WebAssembly::LOAD_v4f32:
|
||||
case WebAssembly::LOAD_v2f64:
|
||||
case WebAssembly::LOAD8_S_I32:
|
||||
case WebAssembly::LOAD8_U_I32:
|
||||
case WebAssembly::LOAD16_S_I32:
|
||||
|
@ -164,6 +170,12 @@ bool WebAssemblySetP2AlignOperands::runOnMachineFunction(MachineFunction &MF) {
|
|||
case WebAssembly::STORE_I64:
|
||||
case WebAssembly::STORE_F32:
|
||||
case WebAssembly::STORE_F64:
|
||||
case WebAssembly::STORE_v16i8:
|
||||
case WebAssembly::STORE_v8i16:
|
||||
case WebAssembly::STORE_v4i32:
|
||||
case WebAssembly::STORE_v2i64:
|
||||
case WebAssembly::STORE_v4f32:
|
||||
case WebAssembly::STORE_v2f64:
|
||||
case WebAssembly::STORE8_I32:
|
||||
case WebAssembly::STORE16_I32:
|
||||
case WebAssembly::STORE8_I64:
|
||||
|
|
|
@ -0,0 +1,534 @@
|
|||
; RUN: llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt -wasm-disable-explicit-locals -wasm-keep-registers -wasm-enable-unimplemented-simd -mattr=+simd128 | FileCheck %s
|
||||
|
||||
; Test loads and stores with custom alignment values.
|
||||
|
||||
target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
|
||||
target triple = "wasm32-unknown-unknown"
|
||||
|
||||
; ==============================================================================
|
||||
; 16 x i8
|
||||
; ==============================================================================
|
||||
|
||||
; CHECK-LABEL: load_v16i8_a1:
|
||||
; CHECK-NEXT: .param i32{{$}}
|
||||
; CHECK-NEXT: .result v128{{$}}
|
||||
; CHECK-NEXT: v128.load $push[[R:[0-9]+]]=, 0($0):p2align=0{{$}}
|
||||
; CHECK-NEXT: return $pop[[R]]{{$}}
|
||||
define <16 x i8> @load_v16i8_a1(<16 x i8> *%p) {
|
||||
%v = load <16 x i8>, <16 x i8>* %p, align 1
|
||||
ret <16 x i8> %v
|
||||
}
|
||||
|
||||
; CHECK-LABEL: load_v16i8_a4:
|
||||
; CHECK-NEXT: .param i32{{$}}
|
||||
; CHECK-NEXT: .result v128{{$}}
|
||||
; CHECK-NEXT: v128.load $push[[R:[0-9]+]]=, 0($0):p2align=2{{$}}
|
||||
; CHECK-NEXT: return $pop[[R]]{{$}}
|
||||
define <16 x i8> @load_v16i8_a4(<16 x i8> *%p) {
|
||||
%v = load <16 x i8>, <16 x i8>* %p, align 4
|
||||
ret <16 x i8> %v
|
||||
}
|
||||
|
||||
; 16 is the default alignment for v128 so no attribute is needed.
|
||||
|
||||
; CHECK-LABEL: load_v16i8_a16:
|
||||
; CHECK-NEXT: .param i32{{$}}
|
||||
; CHECK-NEXT: .result v128{{$}}
|
||||
; CHECK-NEXT: v128.load $push[[R:[0-9]+]]=, 0($0){{$}}
|
||||
; CHECK-NEXT: return $pop[[R]]{{$}}
|
||||
define <16 x i8> @load_v16i8_a16(<16 x i8> *%p) {
|
||||
%v = load <16 x i8>, <16 x i8>* %p, align 16
|
||||
ret <16 x i8> %v
|
||||
}
|
||||
|
||||
; 32 is greater than the default alignment so it is ignored.
|
||||
|
||||
; CHECK-LABEL: load_v16i8_a32:
|
||||
; CHECK-NEXT: .param i32{{$}}
|
||||
; CHECK-NEXT: .result v128{{$}}
|
||||
; CHECK-NEXT: v128.load $push[[R:[0-9]+]]=, 0($0){{$}}
|
||||
; CHECK-NEXT: return $pop[[R]]{{$}}
|
||||
define <16 x i8> @load_v16i8_a32(<16 x i8> *%p) {
|
||||
%v = load <16 x i8>, <16 x i8>* %p, align 32
|
||||
ret <16 x i8> %v
|
||||
}
|
||||
|
||||
; CHECK-LABEL: store_v16i8_a1:
|
||||
; CHECK-NEXT: .param i32, v128{{$}}
|
||||
; CHECK-NEXT: v128.store 0($0):p2align=0, $1{{$}}
|
||||
; CHECK-NEXT: return{{$}}
|
||||
define void @store_v16i8_a1(<16 x i8> *%p, <16 x i8> %v) {
|
||||
store <16 x i8> %v, <16 x i8>* %p, align 1
|
||||
ret void
|
||||
}
|
||||
|
||||
; CHECK-LABEL: store_v16i8_a4:
|
||||
; CHECK-NEXT: .param i32, v128{{$}}
|
||||
; CHECK-NEXT: v128.store 0($0):p2align=2, $1{{$}}
|
||||
; CHECK-NEXT: return{{$}}
|
||||
define void @store_v16i8_a4(<16 x i8> *%p, <16 x i8> %v) {
|
||||
store <16 x i8> %v, <16 x i8>* %p, align 4
|
||||
ret void
|
||||
}
|
||||
|
||||
; 16 is the default alignment for v128 so no attribute is needed.
|
||||
|
||||
; CHECK-LABEL: store_v16i8_a16:
|
||||
; CHECK-NEXT: .param i32, v128{{$}}
|
||||
; CHECK-NEXT: v128.store 0($0), $1{{$}}
|
||||
; CHECK-NEXT: return{{$}}
|
||||
define void @store_v16i8_a16(<16 x i8> *%p, <16 x i8> %v) {
|
||||
store <16 x i8> %v, <16 x i8>* %p, align 16
|
||||
ret void
|
||||
}
|
||||
|
||||
; 32 is greater than the default alignment so it is ignored.
|
||||
|
||||
; CHECK-LABEL: store_v16i8_a32:
|
||||
; CHECK-NEXT: .param i32, v128{{$}}
|
||||
; CHECK-NEXT: v128.store 0($0), $1{{$}}
|
||||
; CHECK-NEXT: return{{$}}
|
||||
define void @store_v16i8_a32(<16 x i8> *%p, <16 x i8> %v) {
|
||||
store <16 x i8> %v, <16 x i8>* %p, align 32
|
||||
ret void
|
||||
}
|
||||
|
||||
; ==============================================================================
|
||||
; 8 x i16
|
||||
; ==============================================================================
|
||||
|
||||
; CHECK-LABEL: load_v8i16_a1:
|
||||
; CHECK-NEXT: .param i32{{$}}
|
||||
; CHECK-NEXT: .result v128{{$}}
|
||||
; CHECK-NEXT: v128.load $push[[R:[0-9]+]]=, 0($0):p2align=0{{$}}
|
||||
; CHECK-NEXT: return $pop[[R]]{{$}}
|
||||
define <8 x i16> @load_v8i16_a1(<8 x i16> *%p) {
|
||||
%v = load <8 x i16>, <8 x i16>* %p, align 1
|
||||
ret <8 x i16> %v
|
||||
}
|
||||
|
||||
; CHECK-LABEL: load_v8i16_a4:
|
||||
; CHECK-NEXT: .param i32{{$}}
|
||||
; CHECK-NEXT: .result v128{{$}}
|
||||
; CHECK-NEXT: v128.load $push[[R:[0-9]+]]=, 0($0):p2align=2{{$}}
|
||||
; CHECK-NEXT: return $pop[[R]]{{$}}
|
||||
define <8 x i16> @load_v8i16_a4(<8 x i16> *%p) {
|
||||
%v = load <8 x i16>, <8 x i16>* %p, align 4
|
||||
ret <8 x i16> %v
|
||||
}
|
||||
|
||||
; 8 is the default alignment for v128 so no attribute is needed.
|
||||
|
||||
; CHECK-LABEL: load_v8i16_a16:
|
||||
; CHECK-NEXT: .param i32{{$}}
|
||||
; CHECK-NEXT: .result v128{{$}}
|
||||
; CHECK-NEXT: v128.load $push[[R:[0-9]+]]=, 0($0){{$}}
|
||||
; CHECK-NEXT: return $pop[[R]]{{$}}
|
||||
define <8 x i16> @load_v8i16_a16(<8 x i16> *%p) {
|
||||
%v = load <8 x i16>, <8 x i16>* %p, align 16
|
||||
ret <8 x i16> %v
|
||||
}
|
||||
|
||||
; 32 is greater than the default alignment so it is ignored.
|
||||
|
||||
; CHECK-LABEL: load_v8i16_a32:
|
||||
; CHECK-NEXT: .param i32{{$}}
|
||||
; CHECK-NEXT: .result v128{{$}}
|
||||
; CHECK-NEXT: v128.load $push[[R:[0-9]+]]=, 0($0){{$}}
|
||||
; CHECK-NEXT: return $pop[[R]]{{$}}
|
||||
define <8 x i16> @load_v8i16_a32(<8 x i16> *%p) {
|
||||
%v = load <8 x i16>, <8 x i16>* %p, align 32
|
||||
ret <8 x i16> %v
|
||||
}
|
||||
|
||||
; CHECK-LABEL: store_v8i16_a1:
|
||||
; CHECK-NEXT: .param i32, v128{{$}}
|
||||
; CHECK-NEXT: v128.store 0($0):p2align=0, $1{{$}}
|
||||
; CHECK-NEXT: return{{$}}
|
||||
define void @store_v8i16_a1(<8 x i16> *%p, <8 x i16> %v) {
|
||||
store <8 x i16> %v, <8 x i16>* %p, align 1
|
||||
ret void
|
||||
}
|
||||
|
||||
; CHECK-LABEL: store_v8i16_a4:
|
||||
; CHECK-NEXT: .param i32, v128{{$}}
|
||||
; CHECK-NEXT: v128.store 0($0):p2align=2, $1{{$}}
|
||||
; CHECK-NEXT: return{{$}}
|
||||
define void @store_v8i16_a4(<8 x i16> *%p, <8 x i16> %v) {
|
||||
store <8 x i16> %v, <8 x i16>* %p, align 4
|
||||
ret void
|
||||
}
|
||||
|
||||
; 16 is the default alignment for v128 so no attribute is needed.
|
||||
|
||||
; CHECK-LABEL: store_v8i16_a16:
|
||||
; CHECK-NEXT: .param i32, v128{{$}}
|
||||
; CHECK-NEXT: v128.store 0($0), $1{{$}}
|
||||
; CHECK-NEXT: return{{$}}
|
||||
define void @store_v8i16_a16(<8 x i16> *%p, <8 x i16> %v) {
|
||||
store <8 x i16> %v, <8 x i16>* %p, align 16
|
||||
ret void
|
||||
}
|
||||
|
||||
; 32 is greater than the default alignment so it is ignored.
|
||||
|
||||
; CHECK-LABEL: store_v8i16_a32:
|
||||
; CHECK-NEXT: .param i32, v128{{$}}
|
||||
; CHECK-NEXT: v128.store 0($0), $1{{$}}
|
||||
; CHECK-NEXT: return{{$}}
|
||||
define void @store_v8i16_a32(<8 x i16> *%p, <8 x i16> %v) {
|
||||
store <8 x i16> %v, <8 x i16>* %p, align 32
|
||||
ret void
|
||||
}
|
||||
|
||||
; ==============================================================================
|
||||
; 4 x i32
|
||||
; ==============================================================================
|
||||
|
||||
; CHECK-LABEL: load_v4i32_a1:
|
||||
; CHECK-NEXT: .param i32{{$}}
|
||||
; CHECK-NEXT: .result v128{{$}}
|
||||
; CHECK-NEXT: v128.load $push[[R:[0-9]+]]=, 0($0):p2align=0{{$}}
|
||||
; CHECK-NEXT: return $pop[[R]]{{$}}
|
||||
define <4 x i32> @load_v4i32_a1(<4 x i32> *%p) {
|
||||
%v = load <4 x i32>, <4 x i32>* %p, align 1
|
||||
ret <4 x i32> %v
|
||||
}
|
||||
|
||||
; CHECK-LABEL: load_v4i32_a4:
|
||||
; CHECK-NEXT: .param i32{{$}}
|
||||
; CHECK-NEXT: .result v128{{$}}
|
||||
; CHECK-NEXT: v128.load $push[[R:[0-9]+]]=, 0($0):p2align=2{{$}}
|
||||
; CHECK-NEXT: return $pop[[R]]{{$}}
|
||||
define <4 x i32> @load_v4i32_a4(<4 x i32> *%p) {
|
||||
%v = load <4 x i32>, <4 x i32>* %p, align 4
|
||||
ret <4 x i32> %v
|
||||
}
|
||||
|
||||
; 4 is the default alignment for v128 so no attribute is needed.
|
||||
|
||||
; CHECK-LABEL: load_v4i32_a16:
|
||||
; CHECK-NEXT: .param i32{{$}}
|
||||
; CHECK-NEXT: .result v128{{$}}
|
||||
; CHECK-NEXT: v128.load $push[[R:[0-9]+]]=, 0($0){{$}}
|
||||
; CHECK-NEXT: return $pop[[R]]{{$}}
|
||||
define <4 x i32> @load_v4i32_a16(<4 x i32> *%p) {
|
||||
%v = load <4 x i32>, <4 x i32>* %p, align 16
|
||||
ret <4 x i32> %v
|
||||
}
|
||||
|
||||
; 32 is greater than the default alignment so it is ignored.
|
||||
|
||||
; CHECK-LABEL: load_v4i32_a32:
|
||||
; CHECK-NEXT: .param i32{{$}}
|
||||
; CHECK-NEXT: .result v128{{$}}
|
||||
; CHECK-NEXT: v128.load $push[[R:[0-9]+]]=, 0($0){{$}}
|
||||
; CHECK-NEXT: return $pop[[R]]{{$}}
|
||||
define <4 x i32> @load_v4i32_a32(<4 x i32> *%p) {
|
||||
%v = load <4 x i32>, <4 x i32>* %p, align 32
|
||||
ret <4 x i32> %v
|
||||
}
|
||||
|
||||
; CHECK-LABEL: store_v4i32_a1:
|
||||
; CHECK-NEXT: .param i32, v128{{$}}
|
||||
; CHECK-NEXT: v128.store 0($0):p2align=0, $1{{$}}
|
||||
; CHECK-NEXT: return{{$}}
|
||||
define void @store_v4i32_a1(<4 x i32> *%p, <4 x i32> %v) {
|
||||
store <4 x i32> %v, <4 x i32>* %p, align 1
|
||||
ret void
|
||||
}
|
||||
|
||||
; CHECK-LABEL: store_v4i32_a4:
|
||||
; CHECK-NEXT: .param i32, v128{{$}}
|
||||
; CHECK-NEXT: v128.store 0($0):p2align=2, $1{{$}}
|
||||
; CHECK-NEXT: return{{$}}
|
||||
define void @store_v4i32_a4(<4 x i32> *%p, <4 x i32> %v) {
|
||||
store <4 x i32> %v, <4 x i32>* %p, align 4
|
||||
ret void
|
||||
}
|
||||
|
||||
; 16 is the default alignment for v128 so no attribute is needed.
|
||||
|
||||
; CHECK-LABEL: store_v4i32_a16:
|
||||
; CHECK-NEXT: .param i32, v128{{$}}
|
||||
; CHECK-NEXT: v128.store 0($0), $1{{$}}
|
||||
; CHECK-NEXT: return{{$}}
|
||||
define void @store_v4i32_a16(<4 x i32> *%p, <4 x i32> %v) {
|
||||
store <4 x i32> %v, <4 x i32>* %p, align 16
|
||||
ret void
|
||||
}
|
||||
|
||||
; 32 is greater than the default alignment so it is ignored.
|
||||
|
||||
; CHECK-LABEL: store_v4i32_a32:
|
||||
; CHECK-NEXT: .param i32, v128{{$}}
|
||||
; CHECK-NEXT: v128.store 0($0), $1{{$}}
|
||||
; CHECK-NEXT: return{{$}}
|
||||
define void @store_v4i32_a32(<4 x i32> *%p, <4 x i32> %v) {
|
||||
store <4 x i32> %v, <4 x i32>* %p, align 32
|
||||
ret void
|
||||
}
|
||||
|
||||
; ==============================================================================
|
||||
; 2 x i64
|
||||
; ==============================================================================
|
||||
|
||||
; CHECK-LABEL: load_v2i64_a1:
|
||||
; CHECK-NEXT: .param i32{{$}}
|
||||
; CHECK-NEXT: .result v128{{$}}
|
||||
; CHECK-NEXT: v128.load $push[[R:[0-9]+]]=, 0($0):p2align=0{{$}}
|
||||
; CHECK-NEXT: return $pop[[R]]{{$}}
|
||||
define <2 x i64> @load_v2i64_a1(<2 x i64> *%p) {
|
||||
%v = load <2 x i64>, <2 x i64>* %p, align 1
|
||||
ret <2 x i64> %v
|
||||
}
|
||||
|
||||
; CHECK-LABEL: load_v2i64_a4:
|
||||
; CHECK-NEXT: .param i32{{$}}
|
||||
; CHECK-NEXT: .result v128{{$}}
|
||||
; CHECK-NEXT: v128.load $push[[R:[0-9]+]]=, 0($0):p2align=2{{$}}
|
||||
; CHECK-NEXT: return $pop[[R]]{{$}}
|
||||
define <2 x i64> @load_v2i64_a4(<2 x i64> *%p) {
|
||||
%v = load <2 x i64>, <2 x i64>* %p, align 4
|
||||
ret <2 x i64> %v
|
||||
}
|
||||
|
||||
; 2 is the default alignment for v128 so no attribute is needed.
|
||||
|
||||
; CHECK-LABEL: load_v2i64_a16:
|
||||
; CHECK-NEXT: .param i32{{$}}
|
||||
; CHECK-NEXT: .result v128{{$}}
|
||||
; CHECK-NEXT: v128.load $push[[R:[0-9]+]]=, 0($0){{$}}
|
||||
; CHECK-NEXT: return $pop[[R]]{{$}}
|
||||
define <2 x i64> @load_v2i64_a16(<2 x i64> *%p) {
|
||||
%v = load <2 x i64>, <2 x i64>* %p, align 16
|
||||
ret <2 x i64> %v
|
||||
}
|
||||
|
||||
; 32 is greater than the default alignment so it is ignored.
|
||||
|
||||
; CHECK-LABEL: load_v2i64_a32:
|
||||
; CHECK-NEXT: .param i32{{$}}
|
||||
; CHECK-NEXT: .result v128{{$}}
|
||||
; CHECK-NEXT: v128.load $push[[R:[0-9]+]]=, 0($0){{$}}
|
||||
; CHECK-NEXT: return $pop[[R]]{{$}}
|
||||
define <2 x i64> @load_v2i64_a32(<2 x i64> *%p) {
|
||||
%v = load <2 x i64>, <2 x i64>* %p, align 32
|
||||
ret <2 x i64> %v
|
||||
}
|
||||
|
||||
; CHECK-LABEL: store_v2i64_a1:
|
||||
; CHECK-NEXT: .param i32, v128{{$}}
|
||||
; CHECK-NEXT: v128.store 0($0):p2align=0, $1{{$}}
|
||||
; CHECK-NEXT: return{{$}}
|
||||
define void @store_v2i64_a1(<2 x i64> *%p, <2 x i64> %v) {
|
||||
store <2 x i64> %v, <2 x i64>* %p, align 1
|
||||
ret void
|
||||
}
|
||||
|
||||
; CHECK-LABEL: store_v2i64_a4:
|
||||
; CHECK-NEXT: .param i32, v128{{$}}
|
||||
; CHECK-NEXT: v128.store 0($0):p2align=2, $1{{$}}
|
||||
; CHECK-NEXT: return{{$}}
|
||||
define void @store_v2i64_a4(<2 x i64> *%p, <2 x i64> %v) {
|
||||
store <2 x i64> %v, <2 x i64>* %p, align 4
|
||||
ret void
|
||||
}
|
||||
|
||||
; 16 is the default alignment for v128 so no attribute is needed.
|
||||
|
||||
; CHECK-LABEL: store_v2i64_a16:
|
||||
; CHECK-NEXT: .param i32, v128{{$}}
|
||||
; CHECK-NEXT: v128.store 0($0), $1{{$}}
|
||||
; CHECK-NEXT: return{{$}}
|
||||
define void @store_v2i64_a16(<2 x i64> *%p, <2 x i64> %v) {
|
||||
store <2 x i64> %v, <2 x i64>* %p, align 16
|
||||
ret void
|
||||
}
|
||||
|
||||
; 32 is greater than the default alignment so it is ignored.
|
||||
|
||||
; CHECK-LABEL: store_v2i64_a32:
|
||||
; CHECK-NEXT: .param i32, v128{{$}}
|
||||
; CHECK-NEXT: v128.store 0($0), $1{{$}}
|
||||
; CHECK-NEXT: return{{$}}
|
||||
define void @store_v2i64_a32(<2 x i64> *%p, <2 x i64> %v) {
|
||||
store <2 x i64> %v, <2 x i64>* %p, align 32
|
||||
ret void
|
||||
}
|
||||
|
||||
; ==============================================================================
|
||||
; 4 x float
|
||||
; ==============================================================================
|
||||
|
||||
; CHECK-LABEL: load_v4f32_a1:
|
||||
; CHECK-NEXT: .param i32{{$}}
|
||||
; CHECK-NEXT: .result v128{{$}}
|
||||
; CHECK-NEXT: v128.load $push[[R:[0-9]+]]=, 0($0):p2align=0{{$}}
|
||||
; CHECK-NEXT: return $pop[[R]]{{$}}
|
||||
define <4 x float> @load_v4f32_a1(<4 x float> *%p) {
|
||||
%v = load <4 x float>, <4 x float>* %p, align 1
|
||||
ret <4 x float> %v
|
||||
}
|
||||
|
||||
; CHECK-LABEL: load_v4f32_a4:
|
||||
; CHECK-NEXT: .param i32{{$}}
|
||||
; CHECK-NEXT: .result v128{{$}}
|
||||
; CHECK-NEXT: v128.load $push[[R:[0-9]+]]=, 0($0):p2align=2{{$}}
|
||||
; CHECK-NEXT: return $pop[[R]]{{$}}
|
||||
define <4 x float> @load_v4f32_a4(<4 x float> *%p) {
|
||||
%v = load <4 x float>, <4 x float>* %p, align 4
|
||||
ret <4 x float> %v
|
||||
}
|
||||
|
||||
; 4 is the default alignment for v128 so no attribute is needed.
|
||||
|
||||
; CHECK-LABEL: load_v4f32_a16:
|
||||
; CHECK-NEXT: .param i32{{$}}
|
||||
; CHECK-NEXT: .result v128{{$}}
|
||||
; CHECK-NEXT: v128.load $push[[R:[0-9]+]]=, 0($0){{$}}
|
||||
; CHECK-NEXT: return $pop[[R]]{{$}}
|
||||
define <4 x float> @load_v4f32_a16(<4 x float> *%p) {
|
||||
%v = load <4 x float>, <4 x float>* %p, align 16
|
||||
ret <4 x float> %v
|
||||
}
|
||||
|
||||
; 32 is greater than the default alignment so it is ignored.
|
||||
|
||||
; CHECK-LABEL: load_v4f32_a32:
|
||||
; CHECK-NEXT: .param i32{{$}}
|
||||
; CHECK-NEXT: .result v128{{$}}
|
||||
; CHECK-NEXT: v128.load $push[[R:[0-9]+]]=, 0($0){{$}}
|
||||
; CHECK-NEXT: return $pop[[R]]{{$}}
|
||||
define <4 x float> @load_v4f32_a32(<4 x float> *%p) {
|
||||
%v = load <4 x float>, <4 x float>* %p, align 32
|
||||
ret <4 x float> %v
|
||||
}
|
||||
|
||||
; CHECK-LABEL: store_v4f32_a1:
|
||||
; CHECK-NEXT: .param i32, v128{{$}}
|
||||
; CHECK-NEXT: v128.store 0($0):p2align=0, $1{{$}}
|
||||
; CHECK-NEXT: return{{$}}
|
||||
define void @store_v4f32_a1(<4 x float> *%p, <4 x float> %v) {
|
||||
store <4 x float> %v, <4 x float>* %p, align 1
|
||||
ret void
|
||||
}
|
||||
|
||||
; CHECK-LABEL: store_v4f32_a4:
|
||||
; CHECK-NEXT: .param i32, v128{{$}}
|
||||
; CHECK-NEXT: v128.store 0($0):p2align=2, $1{{$}}
|
||||
; CHECK-NEXT: return{{$}}
|
||||
define void @store_v4f32_a4(<4 x float> *%p, <4 x float> %v) {
|
||||
store <4 x float> %v, <4 x float>* %p, align 4
|
||||
ret void
|
||||
}
|
||||
|
||||
; 16 is the default alignment for v128 so no attribute is needed.
|
||||
|
||||
; CHECK-LABEL: store_v4f32_a16:
|
||||
; CHECK-NEXT: .param i32, v128{{$}}
|
||||
; CHECK-NEXT: v128.store 0($0), $1{{$}}
|
||||
; CHECK-NEXT: return{{$}}
|
||||
define void @store_v4f32_a16(<4 x float> *%p, <4 x float> %v) {
|
||||
store <4 x float> %v, <4 x float>* %p, align 16
|
||||
ret void
|
||||
}
|
||||
|
||||
; 32 is greater than the default alignment so it is ignored.
|
||||
|
||||
; CHECK-LABEL: store_v4f32_a32:
|
||||
; CHECK-NEXT: .param i32, v128{{$}}
|
||||
; CHECK-NEXT: v128.store 0($0), $1{{$}}
|
||||
; CHECK-NEXT: return{{$}}
|
||||
define void @store_v4f32_a32(<4 x float> *%p, <4 x float> %v) {
|
||||
store <4 x float> %v, <4 x float>* %p, align 32
|
||||
ret void
|
||||
}
|
||||
|
||||
; ==============================================================================
|
||||
; 2 x double
|
||||
; ==============================================================================
|
||||
|
||||
; CHECK-LABEL: load_v2f64_a1:
|
||||
; CHECK-NEXT: .param i32{{$}}
|
||||
; CHECK-NEXT: .result v128{{$}}
|
||||
; CHECK-NEXT: v128.load $push[[R:[0-9]+]]=, 0($0):p2align=0{{$}}
|
||||
; CHECK-NEXT: return $pop[[R]]{{$}}
|
||||
define <2 x double> @load_v2f64_a1(<2 x double> *%p) {
|
||||
%v = load <2 x double>, <2 x double>* %p, align 1
|
||||
ret <2 x double> %v
|
||||
}
|
||||
|
||||
; CHECK-LABEL: load_v2f64_a4:
|
||||
; CHECK-NEXT: .param i32{{$}}
|
||||
; CHECK-NEXT: .result v128{{$}}
|
||||
; CHECK-NEXT: v128.load $push[[R:[0-9]+]]=, 0($0):p2align=2{{$}}
|
||||
; CHECK-NEXT: return $pop[[R]]{{$}}
|
||||
define <2 x double> @load_v2f64_a4(<2 x double> *%p) {
|
||||
%v = load <2 x double>, <2 x double>* %p, align 4
|
||||
ret <2 x double> %v
|
||||
}
|
||||
|
||||
; 2 is the default alignment for v128 so no attribute is needed.
|
||||
|
||||
; CHECK-LABEL: load_v2f64_a16:
|
||||
; CHECK-NEXT: .param i32{{$}}
|
||||
; CHECK-NEXT: .result v128{{$}}
|
||||
; CHECK-NEXT: v128.load $push[[R:[0-9]+]]=, 0($0){{$}}
|
||||
; CHECK-NEXT: return $pop[[R]]{{$}}
|
||||
define <2 x double> @load_v2f64_a16(<2 x double> *%p) {
|
||||
%v = load <2 x double>, <2 x double>* %p, align 16
|
||||
ret <2 x double> %v
|
||||
}
|
||||
|
||||
; 32 is greater than the default alignment so it is ignored.
|
||||
|
||||
; CHECK-LABEL: load_v2f64_a32:
|
||||
; CHECK-NEXT: .param i32{{$}}
|
||||
; CHECK-NEXT: .result v128{{$}}
|
||||
; CHECK-NEXT: v128.load $push[[R:[0-9]+]]=, 0($0){{$}}
|
||||
; CHECK-NEXT: return $pop[[R]]{{$}}
|
||||
define <2 x double> @load_v2f64_a32(<2 x double> *%p) {
|
||||
%v = load <2 x double>, <2 x double>* %p, align 32
|
||||
ret <2 x double> %v
|
||||
}
|
||||
|
||||
; CHECK-LABEL: store_v2f64_a1:
|
||||
; CHECK-NEXT: .param i32, v128{{$}}
|
||||
; CHECK-NEXT: v128.store 0($0):p2align=0, $1{{$}}
|
||||
; CHECK-NEXT: return{{$}}
|
||||
define void @store_v2f64_a1(<2 x double> *%p, <2 x double> %v) {
|
||||
store <2 x double> %v, <2 x double>* %p, align 1
|
||||
ret void
|
||||
}
|
||||
|
||||
; CHECK-LABEL: store_v2f64_a4:
|
||||
; CHECK-NEXT: .param i32, v128{{$}}
|
||||
; CHECK-NEXT: v128.store 0($0):p2align=2, $1{{$}}
|
||||
; CHECK-NEXT: return{{$}}
|
||||
define void @store_v2f64_a4(<2 x double> *%p, <2 x double> %v) {
|
||||
store <2 x double> %v, <2 x double>* %p, align 4
|
||||
ret void
|
||||
}
|
||||
|
||||
; 16 is the default alignment for v128 so no attribute is needed.
|
||||
|
||||
; CHECK-LABEL: store_v2f64_a16:
|
||||
; CHECK-NEXT: .param i32, v128{{$}}
|
||||
; CHECK-NEXT: v128.store 0($0), $1{{$}}
|
||||
; CHECK-NEXT: return{{$}}
|
||||
define void @store_v2f64_a16(<2 x double> *%p, <2 x double> %v) {
|
||||
store <2 x double> %v, <2 x double>* %p, align 16
|
||||
ret void
|
||||
}
|
||||
|
||||
; 32 is greater than the default alignment so it is ignored.
|
||||
|
||||
; CHECK-LABEL: store_v2f64_a32:
|
||||
; CHECK-NEXT: .param i32, v128{{$}}
|
||||
; CHECK-NEXT: v128.store 0($0), $1{{$}}
|
||||
; CHECK-NEXT: return{{$}}
|
||||
define void @store_v2f64_a32(<2 x double> *%p, <2 x double> %v) {
|
||||
store <2 x double> %v, <2 x double>* %p, align 32
|
||||
ret void
|
||||
}
|
|
@ -14,7 +14,7 @@ target triple = "wasm32-unknown-unknown"
|
|||
; NO-SIMD128-NOT: v128
|
||||
; SIMD128-NEXT: .param i32{{$}}
|
||||
; SIMD128-NEXT: .result v128{{$}}
|
||||
; SIMD128-NEXT: v128.load $push[[R:[0-9]+]]=, 0($0):p2align=0{{$}}
|
||||
; SIMD128-NEXT: v128.load $push[[R:[0-9]+]]=, 0($0){{$}}
|
||||
; SIMD128-NEXT: return $pop[[R]]{{$}}
|
||||
define <16 x i8> @load_v16i8(<16 x i8>* %p) {
|
||||
%v = load <16 x i8>, <16 x i8>* %p
|
||||
|
@ -25,7 +25,7 @@ define <16 x i8> @load_v16i8(<16 x i8>* %p) {
|
|||
; NO-SIMD128-NOT: v128
|
||||
; SIMD128-NEXT: .param i32{{$}}
|
||||
; SIMD128-NEXT: .result v128{{$}}
|
||||
; SIMD128-NEXT: v128.load $push[[R:[0-9]+]]=, 16($0):p2align=0{{$}}
|
||||
; SIMD128-NEXT: v128.load $push[[R:[0-9]+]]=, 16($0){{$}}
|
||||
; SIMD128-NEXT: return $pop[[R]]{{$}}
|
||||
define <16 x i8> @load_v16i8_with_folded_offset(<16 x i8>* %p) {
|
||||
%q = ptrtoint <16 x i8>* %p to i32
|
||||
|
@ -39,7 +39,7 @@ define <16 x i8> @load_v16i8_with_folded_offset(<16 x i8>* %p) {
|
|||
; NO-SIMD128-NOT: v128
|
||||
; SIMD128-NEXT: .param i32{{$}}
|
||||
; SIMD128-NEXT: .result v128{{$}}
|
||||
; SIMD128-NEXT: v128.load $push[[R:[0-9]+]]=, 16($0):p2align=0{{$}}
|
||||
; SIMD128-NEXT: v128.load $push[[R:[0-9]+]]=, 16($0){{$}}
|
||||
; SIMD128-NEXT: return $pop[[R]]{{$}}
|
||||
define <16 x i8> @load_v16i8_with_folded_gep_offset(<16 x i8>* %p) {
|
||||
%s = getelementptr inbounds <16 x i8>, <16 x i8>* %p, i32 1
|
||||
|
@ -53,7 +53,7 @@ define <16 x i8> @load_v16i8_with_folded_gep_offset(<16 x i8>* %p) {
|
|||
; SIMD128-NEXT: .result v128{{$}}
|
||||
; SIMD128-NEXT: i32.const $push[[L0:[0-9]+]]=, -16{{$}}
|
||||
; SIMD128-NEXT: i32.add $push[[L1:[0-9]+]]=, $0, $pop[[L0]]{{$}}
|
||||
; SIMD128-NEXT: v128.load $push[[R:[0-9]+]]=, 0($pop[[L1]]):p2align=0{{$}}
|
||||
; SIMD128-NEXT: v128.load $push[[R:[0-9]+]]=, 0($pop[[L1]]){{$}}
|
||||
; SIMD128-NEXT: return $pop[[R]]{{$}}
|
||||
define <16 x i8> @load_v16i8_with_unfolded_gep_negative_offset(<16 x i8>* %p) {
|
||||
%s = getelementptr inbounds <16 x i8>, <16 x i8>* %p, i32 -1
|
||||
|
@ -67,7 +67,7 @@ define <16 x i8> @load_v16i8_with_unfolded_gep_negative_offset(<16 x i8>* %p) {
|
|||
; SIMD128-NEXT: .result v128{{$}}
|
||||
; SIMD128-NEXT: i32.const $push[[L0:[0-9]+]]=, 16{{$}}
|
||||
; SIMD128-NEXT: i32.add $push[[L1:[0-9]+]]=, $0, $pop[[L0]]{{$}}
|
||||
; SIMD128-NEXT: v128.load $push[[R:[0-9]+]]=, 0($pop[[L1]]):p2align=0{{$}}
|
||||
; SIMD128-NEXT: v128.load $push[[R:[0-9]+]]=, 0($pop[[L1]]){{$}}
|
||||
; SIMD128-NEXT: return $pop[[R]]{{$}}
|
||||
define <16 x i8> @load_v16i8_with_unfolded_offset(<16 x i8>* %p) {
|
||||
%q = ptrtoint <16 x i8>* %p to i32
|
||||
|
@ -83,7 +83,7 @@ define <16 x i8> @load_v16i8_with_unfolded_offset(<16 x i8>* %p) {
|
|||
; SIMD128-NEXT: .result v128{{$}}
|
||||
; SIMD128-NEXT: i32.const $push[[L0:[0-9]+]]=, 16{{$}}
|
||||
; SIMD128-NEXT: i32.add $push[[L1:[0-9]+]]=, $0, $pop[[L0]]{{$}}
|
||||
; SIMD128-NEXT: v128.load $push[[R:[0-9]+]]=, 0($pop[[L1]]):p2align=0{{$}}
|
||||
; SIMD128-NEXT: v128.load $push[[R:[0-9]+]]=, 0($pop[[L1]]){{$}}
|
||||
; SIMD128-NEXT: return $pop[[R]]{{$}}
|
||||
define <16 x i8> @load_v16i8_with_unfolded_gep_offset(<16 x i8>* %p) {
|
||||
%s = getelementptr <16 x i8>, <16 x i8>* %p, i32 1
|
||||
|
@ -95,7 +95,7 @@ define <16 x i8> @load_v16i8_with_unfolded_gep_offset(<16 x i8>* %p) {
|
|||
; NO-SIMD128-NOT: v128
|
||||
; SIMD128-NEXT: .result v128{{$}}
|
||||
; SIMD128-NEXT: i32.const $push[[L0:[0-9]+]]=, 0{{$}}
|
||||
; SIMD128-NEXT: v128.load $push[[R:[0-9]+]]=, 32($pop[[L0]]):p2align=0{{$}}
|
||||
; SIMD128-NEXT: v128.load $push[[R:[0-9]+]]=, 32($pop[[L0]]){{$}}
|
||||
; SIMD128-NEXT: return $pop[[R]]{{$}}
|
||||
define <16 x i8> @load_v16i8_from_numeric_address() {
|
||||
%s = inttoptr i32 32 to <16 x i8>*
|
||||
|
@ -107,7 +107,7 @@ define <16 x i8> @load_v16i8_from_numeric_address() {
|
|||
; NO-SIMD128-NOT: v128
|
||||
; SIMD128-NEXT: .result v128{{$}}
|
||||
; SIMD128-NEXT: i32.const $push[[L0:[0-9]+]]=, 0{{$}}
|
||||
; SIMD128-NEXT: v128.load $push[[R:[0-9]+]]=, gv_v16i8($pop[[L0]]):p2align=0{{$}}
|
||||
; SIMD128-NEXT: v128.load $push[[R:[0-9]+]]=, gv_v16i8($pop[[L0]]){{$}}
|
||||
; SIMD128-NEXT: return $pop[[R]]{{$}}
|
||||
@gv_v16i8 = global <16 x i8> <i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42>
|
||||
define <16 x i8> @load_v16i8_from_global_address() {
|
||||
|
@ -118,7 +118,7 @@ define <16 x i8> @load_v16i8_from_global_address() {
|
|||
; CHECK-LABEL: store_v16i8:
|
||||
; NO-SIMD128-NOT: v128
|
||||
; SIMD128-NEXT: .param v128, i32{{$}}
|
||||
; SIMD128-NEXT: v128.store 0($1):p2align=0, $0{{$}}
|
||||
; SIMD128-NEXT: v128.store 0($1), $0{{$}}
|
||||
define void @store_v16i8(<16 x i8> %v, <16 x i8>* %p) {
|
||||
store <16 x i8> %v , <16 x i8>* %p
|
||||
ret void
|
||||
|
@ -127,7 +127,7 @@ define void @store_v16i8(<16 x i8> %v, <16 x i8>* %p) {
|
|||
; CHECK-LABEL: store_v16i8_with_folded_offset:
|
||||
; NO-SIMD128-NOT: v128
|
||||
; SIMD128-NEXT: .param v128, i32{{$}}
|
||||
; SIMD128-NEXT: v128.store 16($1):p2align=0, $0{{$}}
|
||||
; SIMD128-NEXT: v128.store 16($1), $0{{$}}
|
||||
define void @store_v16i8_with_folded_offset(<16 x i8> %v, <16 x i8>* %p) {
|
||||
%q = ptrtoint <16 x i8>* %p to i32
|
||||
%r = add nuw i32 %q, 16
|
||||
|
@ -139,7 +139,7 @@ define void @store_v16i8_with_folded_offset(<16 x i8> %v, <16 x i8>* %p) {
|
|||
; CHECK-LABEL: store_v16i8_with_folded_gep_offset:
|
||||
; NO-SIMD128-NOT: v128
|
||||
; SIMD128-NEXT: .param v128, i32{{$}}
|
||||
; SIMD128-NEXT: v128.store 16($1):p2align=0, $0{{$}}
|
||||
; SIMD128-NEXT: v128.store 16($1), $0{{$}}
|
||||
define void @store_v16i8_with_folded_gep_offset(<16 x i8> %v, <16 x i8>* %p) {
|
||||
%s = getelementptr inbounds <16 x i8>, <16 x i8>* %p, i32 1
|
||||
store <16 x i8> %v , <16 x i8>* %s
|
||||
|
@ -151,7 +151,7 @@ define void @store_v16i8_with_folded_gep_offset(<16 x i8> %v, <16 x i8>* %p) {
|
|||
; SIMD128-NEXT: .param v128, i32{{$}}
|
||||
; SIMD128-NEXT: i32.const $push[[L0:[0-9]+]]=, -16{{$}}
|
||||
; SIMD128-NEXT: i32.add $push[[R:[0-9]+]]=, $1, $pop[[L0]]{{$}}
|
||||
; SIMD128-NEXT: v128.store 0($pop[[R]]):p2align=0, $0{{$}}
|
||||
; SIMD128-NEXT: v128.store 0($pop[[R]]), $0{{$}}
|
||||
define void @store_v16i8_with_unfolded_gep_negative_offset(<16 x i8> %v, <16 x i8>* %p) {
|
||||
%s = getelementptr inbounds <16 x i8>, <16 x i8>* %p, i32 -1
|
||||
store <16 x i8> %v , <16 x i8>* %s
|
||||
|
@ -163,7 +163,7 @@ define void @store_v16i8_with_unfolded_gep_negative_offset(<16 x i8> %v, <16 x i
|
|||
; SIMD128-NEXT: .param v128, i32{{$}}
|
||||
; SIMD128-NEXT: i32.const $push[[L0:[0-9]+]]=, -16{{$}}
|
||||
; SIMD128-NEXT: i32.add $push[[R:[0-9]+]]=, $1, $pop[[L0]]{{$}}
|
||||
; SIMD128-NEXT: v128.store 0($pop[[R]]):p2align=0, $0{{$}}
|
||||
; SIMD128-NEXT: v128.store 0($pop[[R]]), $0{{$}}
|
||||
define void @store_v16i8_with_unfolded_offset(<16 x i8> %v, <16 x i8>* %p) {
|
||||
%s = getelementptr inbounds <16 x i8>, <16 x i8>* %p, i32 -1
|
||||
store <16 x i8> %v , <16 x i8>* %s
|
||||
|
@ -175,7 +175,7 @@ define void @store_v16i8_with_unfolded_offset(<16 x i8> %v, <16 x i8>* %p) {
|
|||
; SIMD128-NEXT: .param v128, i32{{$}}
|
||||
; SIMD128-NEXT: i32.const $push[[L0:[0-9]+]]=, 16{{$}}
|
||||
; SIMD128-NEXT: i32.add $push[[R:[0-9]+]]=, $1, $pop[[L0]]{{$}}
|
||||
; SIMD128-NEXT: v128.store 0($pop[[R]]):p2align=0, $0{{$}}
|
||||
; SIMD128-NEXT: v128.store 0($pop[[R]]), $0{{$}}
|
||||
define void @store_v16i8_with_unfolded_gep_offset(<16 x i8> %v, <16 x i8>* %p) {
|
||||
%s = getelementptr <16 x i8>, <16 x i8>* %p, i32 1
|
||||
store <16 x i8> %v , <16 x i8>* %s
|
||||
|
@ -186,7 +186,7 @@ define void @store_v16i8_with_unfolded_gep_offset(<16 x i8> %v, <16 x i8>* %p) {
|
|||
; NO-SIMD128-NOT: v128
|
||||
; SIMD128-NEXT: .param v128{{$}}
|
||||
; SIMD128-NEXT: i32.const $push[[R:[0-9]+]]=, 0{{$}}
|
||||
; SIMD128-NEXT: v128.store 32($pop[[R]]):p2align=0, $0{{$}}
|
||||
; SIMD128-NEXT: v128.store 32($pop[[R]]), $0{{$}}
|
||||
define void @store_v16i8_to_numeric_address(<16 x i8> %v) {
|
||||
%s = inttoptr i32 32 to <16 x i8>*
|
||||
store <16 x i8> %v , <16 x i8>* %s
|
||||
|
@ -197,7 +197,7 @@ define void @store_v16i8_to_numeric_address(<16 x i8> %v) {
|
|||
; NO-SIMD128-NOT: v128
|
||||
; SIMD128-NEXT: .param v128{{$}}
|
||||
; SIMD128-NEXT: i32.const $push[[R:[0-9]+]]=, 0{{$}}
|
||||
; SIMD128-NEXT: v128.store gv_v16i8($pop[[R]]):p2align=0, $0{{$}}
|
||||
; SIMD128-NEXT: v128.store gv_v16i8($pop[[R]]), $0{{$}}
|
||||
define void @store_v16i8_to_global_address(<16 x i8> %v) {
|
||||
store <16 x i8> %v , <16 x i8>* @gv_v16i8
|
||||
ret void
|
||||
|
@ -210,7 +210,7 @@ define void @store_v16i8_to_global_address(<16 x i8> %v) {
|
|||
; NO-SIMD128-NOT: v128
|
||||
; SIMD128-NEXT: .param i32{{$}}
|
||||
; SIMD128-NEXT: .result v128{{$}}
|
||||
; SIMD128-NEXT: v128.load $push[[R:[0-9]+]]=, 0($0):p2align=0{{$}}
|
||||
; SIMD128-NEXT: v128.load $push[[R:[0-9]+]]=, 0($0){{$}}
|
||||
; SIMD128-NEXT: return $pop[[R]]{{$}}
|
||||
define <8 x i16> @load_v8i16(<8 x i16>* %p) {
|
||||
%v = load <8 x i16>, <8 x i16>* %p
|
||||
|
@ -221,7 +221,7 @@ define <8 x i16> @load_v8i16(<8 x i16>* %p) {
|
|||
; NO-SIMD128-NOT: v128
|
||||
; SIMD128-NEXT: .param i32{{$}}
|
||||
; SIMD128-NEXT: .result v128{{$}}
|
||||
; SIMD128-NEXT: v128.load $push[[R:[0-9]+]]=, 16($0):p2align=0{{$}}
|
||||
; SIMD128-NEXT: v128.load $push[[R:[0-9]+]]=, 16($0){{$}}
|
||||
; SIMD128-NEXT: return $pop[[R]]{{$}}
|
||||
define <8 x i16> @load_v8i16_with_folded_offset(<8 x i16>* %p) {
|
||||
%q = ptrtoint <8 x i16>* %p to i32
|
||||
|
@ -235,7 +235,7 @@ define <8 x i16> @load_v8i16_with_folded_offset(<8 x i16>* %p) {
|
|||
; NO-SIMD128-NOT: v128
|
||||
; SIMD128-NEXT: .param i32{{$}}
|
||||
; SIMD128-NEXT: .result v128{{$}}
|
||||
; SIMD128-NEXT: v128.load $push[[R:[0-9]+]]=, 16($0):p2align=0{{$}}
|
||||
; SIMD128-NEXT: v128.load $push[[R:[0-9]+]]=, 16($0){{$}}
|
||||
; SIMD128-NEXT: return $pop[[R]]{{$}}
|
||||
define <8 x i16> @load_v8i16_with_folded_gep_offset(<8 x i16>* %p) {
|
||||
%s = getelementptr inbounds <8 x i16>, <8 x i16>* %p, i32 1
|
||||
|
@ -249,7 +249,7 @@ define <8 x i16> @load_v8i16_with_folded_gep_offset(<8 x i16>* %p) {
|
|||
; SIMD128-NEXT: .result v128{{$}}
|
||||
; SIMD128-NEXT: i32.const $push[[L0:[0-9]+]]=, -16{{$}}
|
||||
; SIMD128-NEXT: i32.add $push[[L1:[0-9]+]]=, $0, $pop[[L0]]{{$}}
|
||||
; SIMD128-NEXT: v128.load $push[[R:[0-9]+]]=, 0($pop[[L1]]):p2align=0{{$}}
|
||||
; SIMD128-NEXT: v128.load $push[[R:[0-9]+]]=, 0($pop[[L1]]){{$}}
|
||||
; SIMD128-NEXT: return $pop[[R]]{{$}}
|
||||
define <8 x i16> @load_v8i16_with_unfolded_gep_negative_offset(<8 x i16>* %p) {
|
||||
%s = getelementptr inbounds <8 x i16>, <8 x i16>* %p, i32 -1
|
||||
|
@ -263,7 +263,7 @@ define <8 x i16> @load_v8i16_with_unfolded_gep_negative_offset(<8 x i16>* %p) {
|
|||
; SIMD128-NEXT: .result v128{{$}}
|
||||
; SIMD128-NEXT: i32.const $push[[L0:[0-9]+]]=, 16{{$}}
|
||||
; SIMD128-NEXT: i32.add $push[[L1:[0-9]+]]=, $0, $pop[[L0]]{{$}}
|
||||
; SIMD128-NEXT: v128.load $push[[L0:[0-9]+]]=, 0($pop[[L1]]):p2align=0{{$}}
|
||||
; SIMD128-NEXT: v128.load $push[[L0:[0-9]+]]=, 0($pop[[L1]]){{$}}
|
||||
; SIMD128-NEXT: return $pop[[L0]]{{$}}
|
||||
define <8 x i16> @load_v8i16_with_unfolded_offset(<8 x i16>* %p) {
|
||||
%q = ptrtoint <8 x i16>* %p to i32
|
||||
|
@ -279,7 +279,7 @@ define <8 x i16> @load_v8i16_with_unfolded_offset(<8 x i16>* %p) {
|
|||
; SIMD128-NEXT: .result v128{{$}}
|
||||
; SIMD128-NEXT: i32.const $push[[L0:[0-9]+]]=, 16{{$}}
|
||||
; SIMD128-NEXT: i32.add $push[[L1:[0-9]+]]=, $0, $pop[[L0]]{{$}}
|
||||
; SIMD128-NEXT: v128.load $push[[R:[0-9]+]]=, 0($pop[[L1]]):p2align=0{{$}}
|
||||
; SIMD128-NEXT: v128.load $push[[R:[0-9]+]]=, 0($pop[[L1]]){{$}}
|
||||
; SIMD128-NEXT: return $pop[[R]]{{$}}
|
||||
define <8 x i16> @load_v8i16_with_unfolded_gep_offset(<8 x i16>* %p) {
|
||||
%s = getelementptr <8 x i16>, <8 x i16>* %p, i32 1
|
||||
|
@ -291,7 +291,7 @@ define <8 x i16> @load_v8i16_with_unfolded_gep_offset(<8 x i16>* %p) {
|
|||
; NO-SIMD128-NOT: v128
|
||||
; SIMD128-NEXT: .result v128{{$}}
|
||||
; SIMD128-NEXT: i32.const $push[[L0:[0-9]+]]=, 0{{$}}
|
||||
; SIMD128-NEXT: v128.load $push[[R:[0-9]+]]=, 32($pop[[L0]]):p2align=0{{$}}
|
||||
; SIMD128-NEXT: v128.load $push[[R:[0-9]+]]=, 32($pop[[L0]]){{$}}
|
||||
; SIMD128-NEXT: return $pop[[R]]{{$}}
|
||||
define <8 x i16> @load_v8i16_from_numeric_address() {
|
||||
%s = inttoptr i32 32 to <8 x i16>*
|
||||
|
@ -303,7 +303,7 @@ define <8 x i16> @load_v8i16_from_numeric_address() {
|
|||
; NO-SIMD128-NOT: v128
|
||||
; SIMD128-NEXT: .result v128{{$}}
|
||||
; SIMD128-NEXT: i32.const $push[[L0:[0-9]+]]=, 0{{$}}
|
||||
; SIMD128-NEXT: v128.load $push[[R:[0-9]+]]=, gv_v8i16($pop[[L0]]):p2align=0{{$}}
|
||||
; SIMD128-NEXT: v128.load $push[[R:[0-9]+]]=, gv_v8i16($pop[[L0]]){{$}}
|
||||
; SIMD128-NEXT: return $pop[[R]]{{$}}
|
||||
@gv_v8i16 = global <8 x i16> <i16 42, i16 42, i16 42, i16 42, i16 42, i16 42, i16 42, i16 42>
|
||||
define <8 x i16> @load_v8i16_from_global_address() {
|
||||
|
@ -314,7 +314,7 @@ define <8 x i16> @load_v8i16_from_global_address() {
|
|||
; CHECK-LABEL: store_v8i16:
|
||||
; NO-SIMD128-NOT: v128
|
||||
; SIMD128-NEXT: .param v128, i32{{$}}
|
||||
; SIMD128-NEXT: v128.store 0($1):p2align=0, $0{{$}}
|
||||
; SIMD128-NEXT: v128.store 0($1), $0{{$}}
|
||||
define void @store_v8i16(<8 x i16> %v, <8 x i16>* %p) {
|
||||
store <8 x i16> %v , <8 x i16>* %p
|
||||
ret void
|
||||
|
@ -323,7 +323,7 @@ define void @store_v8i16(<8 x i16> %v, <8 x i16>* %p) {
|
|||
; CHECK-LABEL: store_v8i16_with_folded_offset:
|
||||
; NO-SIMD128-NOT: v128
|
||||
; SIMD128-NEXT: .param v128, i32{{$}}
|
||||
; SIMD128-NEXT: v128.store 16($1):p2align=0, $0{{$}}
|
||||
; SIMD128-NEXT: v128.store 16($1), $0{{$}}
|
||||
define void @store_v8i16_with_folded_offset(<8 x i16> %v, <8 x i16>* %p) {
|
||||
%q = ptrtoint <8 x i16>* %p to i32
|
||||
%r = add nuw i32 %q, 16
|
||||
|
@ -335,7 +335,7 @@ define void @store_v8i16_with_folded_offset(<8 x i16> %v, <8 x i16>* %p) {
|
|||
; CHECK-LABEL: store_v8i16_with_folded_gep_offset:
|
||||
; NO-SIMD128-NOT: v128
|
||||
; SIMD128-NEXT: .param v128, i32{{$}}
|
||||
; SIMD128-NEXT: v128.store 16($1):p2align=0, $0{{$}}
|
||||
; SIMD128-NEXT: v128.store 16($1), $0{{$}}
|
||||
define void @store_v8i16_with_folded_gep_offset(<8 x i16> %v, <8 x i16>* %p) {
|
||||
%s = getelementptr inbounds <8 x i16>, <8 x i16>* %p, i32 1
|
||||
store <8 x i16> %v , <8 x i16>* %s
|
||||
|
@ -347,7 +347,7 @@ define void @store_v8i16_with_folded_gep_offset(<8 x i16> %v, <8 x i16>* %p) {
|
|||
; SIMD128-NEXT: .param v128, i32{{$}}
|
||||
; SIMD128-NEXT: i32.const $push[[L0:[0-9]+]]=, -16{{$}}
|
||||
; SIMD128-NEXT: i32.add $push[[R:[0-9]+]]=, $1, $pop[[L0]]{{$}}
|
||||
; SIMD128-NEXT: v128.store 0($pop[[R]]):p2align=0, $0{{$}}
|
||||
; SIMD128-NEXT: v128.store 0($pop[[R]]), $0{{$}}
|
||||
define void @store_v8i16_with_unfolded_gep_negative_offset(<8 x i16> %v, <8 x i16>* %p) {
|
||||
%s = getelementptr inbounds <8 x i16>, <8 x i16>* %p, i32 -1
|
||||
store <8 x i16> %v , <8 x i16>* %s
|
||||
|
@ -359,7 +359,7 @@ define void @store_v8i16_with_unfolded_gep_negative_offset(<8 x i16> %v, <8 x i1
|
|||
; SIMD128-NEXT: .param v128, i32{{$}}
|
||||
; SIMD128-NEXT: i32.const $push[[L0:[0-9]+]]=, -16{{$}}
|
||||
; SIMD128-NEXT: i32.add $push[[R:[0-9]+]]=, $1, $pop[[L0]]{{$}}
|
||||
; SIMD128-NEXT: v128.store 0($pop[[R]]):p2align=0, $0{{$}}
|
||||
; SIMD128-NEXT: v128.store 0($pop[[R]]), $0{{$}}
|
||||
define void @store_v8i16_with_unfolded_offset(<8 x i16> %v, <8 x i16>* %p) {
|
||||
%s = getelementptr inbounds <8 x i16>, <8 x i16>* %p, i32 -1
|
||||
store <8 x i16> %v , <8 x i16>* %s
|
||||
|
@ -371,7 +371,7 @@ define void @store_v8i16_with_unfolded_offset(<8 x i16> %v, <8 x i16>* %p) {
|
|||
; SIMD128-NEXT: .param v128, i32{{$}}
|
||||
; SIMD128-NEXT: i32.const $push[[L0:[0-9]+]]=, 16{{$}}
|
||||
; SIMD128-NEXT: i32.add $push[[R:[0-9]+]]=, $1, $pop[[L0]]{{$}}
|
||||
; SIMD128-NEXT: v128.store 0($pop[[R]]):p2align=0, $0{{$}}
|
||||
; SIMD128-NEXT: v128.store 0($pop[[R]]), $0{{$}}
|
||||
define void @store_v8i16_with_unfolded_gep_offset(<8 x i16> %v, <8 x i16>* %p) {
|
||||
%s = getelementptr <8 x i16>, <8 x i16>* %p, i32 1
|
||||
store <8 x i16> %v , <8 x i16>* %s
|
||||
|
@ -382,7 +382,7 @@ define void @store_v8i16_with_unfolded_gep_offset(<8 x i16> %v, <8 x i16>* %p) {
|
|||
; NO-SIMD128-NOT: v128
|
||||
; SIMD128-NEXT: .param v128{{$}}
|
||||
; SIMD128-NEXT: i32.const $push[[L0:[0-9]+]]=, 0{{$}}
|
||||
; SIMD128-NEXT: v128.store 32($pop[[L0]]):p2align=0, $0{{$}}
|
||||
; SIMD128-NEXT: v128.store 32($pop[[L0]]), $0{{$}}
|
||||
define void @store_v8i16_to_numeric_address(<8 x i16> %v) {
|
||||
%s = inttoptr i32 32 to <8 x i16>*
|
||||
store <8 x i16> %v , <8 x i16>* %s
|
||||
|
@ -393,7 +393,7 @@ define void @store_v8i16_to_numeric_address(<8 x i16> %v) {
|
|||
; NO-SIMD128-NOT: v128
|
||||
; SIMD128-NEXT: .param v128{{$}}
|
||||
; SIMD128-NEXT: i32.const $push[[R:[0-9]+]]=, 0{{$}}
|
||||
; SIMD128-NEXT: v128.store gv_v8i16($pop[[R]]):p2align=0, $0{{$}}
|
||||
; SIMD128-NEXT: v128.store gv_v8i16($pop[[R]]), $0{{$}}
|
||||
define void @store_v8i16_to_global_address(<8 x i16> %v) {
|
||||
store <8 x i16> %v , <8 x i16>* @gv_v8i16
|
||||
ret void
|
||||
|
@ -406,7 +406,7 @@ define void @store_v8i16_to_global_address(<8 x i16> %v) {
|
|||
; NO-SIMD128-NOT: v128
|
||||
; SIMD128-NEXT: .param i32{{$}}
|
||||
; SIMD128-NEXT: .result v128{{$}}
|
||||
; SIMD128-NEXT: v128.load $push[[R:[0-9]+]]=, 0($0):p2align=0{{$}}
|
||||
; SIMD128-NEXT: v128.load $push[[R:[0-9]+]]=, 0($0){{$}}
|
||||
; SIMD128-NEXT: return $pop[[R]]{{$}}
|
||||
define <4 x i32> @load_v4i32(<4 x i32>* %p) {
|
||||
%v = load <4 x i32>, <4 x i32>* %p
|
||||
|
@ -417,7 +417,7 @@ define <4 x i32> @load_v4i32(<4 x i32>* %p) {
|
|||
; NO-SIMD128-NOT: v128
|
||||
; SIMD128-NEXT: .param i32{{$}}
|
||||
; SIMD128-NEXT: .result v128{{$}}
|
||||
; SIMD128-NEXT: v128.load $push[[R:[0-9]+]]=, 16($0):p2align=0{{$}}
|
||||
; SIMD128-NEXT: v128.load $push[[R:[0-9]+]]=, 16($0){{$}}
|
||||
; SIMD128-NEXT: return $pop[[R]]{{$}}
|
||||
define <4 x i32> @load_v4i32_with_folded_offset(<4 x i32>* %p) {
|
||||
%q = ptrtoint <4 x i32>* %p to i32
|
||||
|
@ -431,7 +431,7 @@ define <4 x i32> @load_v4i32_with_folded_offset(<4 x i32>* %p) {
|
|||
; NO-SIMD128-NOT: v128
|
||||
; SIMD128-NEXT: .param i32{{$}}
|
||||
; SIMD128-NEXT: .result v128{{$}}
|
||||
; SIMD128-NEXT: v128.load $push[[R:[0-9]+]]=, 16($0):p2align=0{{$}}
|
||||
; SIMD128-NEXT: v128.load $push[[R:[0-9]+]]=, 16($0){{$}}
|
||||
; SIMD128-NEXT: return $pop[[R]]{{$}}
|
||||
define <4 x i32> @load_v4i32_with_folded_gep_offset(<4 x i32>* %p) {
|
||||
%s = getelementptr inbounds <4 x i32>, <4 x i32>* %p, i32 1
|
||||
|
@ -445,7 +445,7 @@ define <4 x i32> @load_v4i32_with_folded_gep_offset(<4 x i32>* %p) {
|
|||
; SIMD128-NEXT: .result v128{{$}}
|
||||
; SIMD128-NEXT: i32.const $push[[L0:[0-9]+]]=, -16{{$}}
|
||||
; SIMD128-NEXT: i32.add $push[[L1:[0-9]+]]=, $0, $pop[[L0]]{{$}}
|
||||
; SIMD128-NEXT: v128.load $push[[R:[0-9]+]]=, 0($pop[[L1]]):p2align=0{{$}}
|
||||
; SIMD128-NEXT: v128.load $push[[R:[0-9]+]]=, 0($pop[[L1]]){{$}}
|
||||
; SIMD128-NEXT: return $pop[[R]]{{$}}
|
||||
define <4 x i32> @load_v4i32_with_unfolded_gep_negative_offset(<4 x i32>* %p) {
|
||||
%s = getelementptr inbounds <4 x i32>, <4 x i32>* %p, i32 -1
|
||||
|
@ -459,7 +459,7 @@ define <4 x i32> @load_v4i32_with_unfolded_gep_negative_offset(<4 x i32>* %p) {
|
|||
; SIMD128-NEXT: .result v128{{$}}
|
||||
; SIMD128-NEXT: i32.const $push[[L0:[0-9]+]]=, 16{{$}}
|
||||
; SIMD128-NEXT: i32.add $push[[L1:[0-9]+]]=, $0, $pop[[L0]]{{$}}
|
||||
; SIMD128-NEXT: v128.load $push[[R:[0-9]+]]=, 0($pop[[L1]]):p2align=0{{$}}
|
||||
; SIMD128-NEXT: v128.load $push[[R:[0-9]+]]=, 0($pop[[L1]]){{$}}
|
||||
; SIMD128-NEXT: return $pop[[R]]{{$}}
|
||||
define <4 x i32> @load_v4i32_with_unfolded_offset(<4 x i32>* %p) {
|
||||
%q = ptrtoint <4 x i32>* %p to i32
|
||||
|
@ -475,7 +475,7 @@ define <4 x i32> @load_v4i32_with_unfolded_offset(<4 x i32>* %p) {
|
|||
; SIMD128-NEXT: .result v128{{$}}
|
||||
; SIMD128-NEXT: i32.const $push[[L0:[0-9]+]]=, 16{{$}}
|
||||
; SIMD128-NEXT: i32.add $push[[L1:[0-9]+]]=, $0, $pop[[L0]]{{$}}
|
||||
; SIMD128-NEXT: v128.load $push[[R:[0-9]+]]=, 0($pop[[L1]]):p2align=0{{$}}
|
||||
; SIMD128-NEXT: v128.load $push[[R:[0-9]+]]=, 0($pop[[L1]]){{$}}
|
||||
; SIMD128-NEXT: return $pop[[R]]{{$}}
|
||||
define <4 x i32> @load_v4i32_with_unfolded_gep_offset(<4 x i32>* %p) {
|
||||
%s = getelementptr <4 x i32>, <4 x i32>* %p, i32 1
|
||||
|
@ -487,7 +487,7 @@ define <4 x i32> @load_v4i32_with_unfolded_gep_offset(<4 x i32>* %p) {
|
|||
; NO-SIMD128-NOT: v128
|
||||
; SIMD128-NEXT: .result v128{{$}}
|
||||
; SIMD128-NEXT: i32.const $push[[L0:[0-9]+]]=, 0{{$}}
|
||||
; SIMD128-NEXT: v128.load $push[[R:[0-9]+]]=, 32($pop[[L0]]):p2align=0{{$}}
|
||||
; SIMD128-NEXT: v128.load $push[[R:[0-9]+]]=, 32($pop[[L0]]){{$}}
|
||||
; SIMD128-NEXT: return $pop[[R]]{{$}}
|
||||
define <4 x i32> @load_v4i32_from_numeric_address() {
|
||||
%s = inttoptr i32 32 to <4 x i32>*
|
||||
|
@ -499,7 +499,7 @@ define <4 x i32> @load_v4i32_from_numeric_address() {
|
|||
; NO-SIMD128-NOT: v128
|
||||
; SIMD128-NEXT: .result v128{{$}}
|
||||
; SIMD128-NEXT: i32.const $push[[L0:[0-9]+]]=, 0{{$}}
|
||||
; SIMD128-NEXT: v128.load $push[[R:[0-9]+]]=, gv_v4i32($pop[[L0]]):p2align=0{{$}}
|
||||
; SIMD128-NEXT: v128.load $push[[R:[0-9]+]]=, gv_v4i32($pop[[L0]]){{$}}
|
||||
; SIMD128-NEXT: return $pop[[R]]{{$}}
|
||||
@gv_v4i32 = global <4 x i32> <i32 42, i32 42, i32 42, i32 42>
|
||||
define <4 x i32> @load_v4i32_from_global_address() {
|
||||
|
@ -510,7 +510,7 @@ define <4 x i32> @load_v4i32_from_global_address() {
|
|||
; CHECK-LABEL: store_v4i32:
|
||||
; NO-SIMD128-NOT: v128
|
||||
; SIMD128-NEXT: .param v128, i32{{$}}
|
||||
; SIMD128-NEXT: v128.store 0($1):p2align=0, $0{{$}}
|
||||
; SIMD128-NEXT: v128.store 0($1), $0{{$}}
|
||||
define void @store_v4i32(<4 x i32> %v, <4 x i32>* %p) {
|
||||
store <4 x i32> %v , <4 x i32>* %p
|
||||
ret void
|
||||
|
@ -519,7 +519,7 @@ define void @store_v4i32(<4 x i32> %v, <4 x i32>* %p) {
|
|||
; CHECK-LABEL: store_v4i32_with_folded_offset:
|
||||
; NO-SIMD128-NOT: v128
|
||||
; SIMD128-NEXT: .param v128, i32{{$}}
|
||||
; SIMD128-NEXT: v128.store 16($1):p2align=0, $0{{$}}
|
||||
; SIMD128-NEXT: v128.store 16($1), $0{{$}}
|
||||
define void @store_v4i32_with_folded_offset(<4 x i32> %v, <4 x i32>* %p) {
|
||||
%q = ptrtoint <4 x i32>* %p to i32
|
||||
%r = add nuw i32 %q, 16
|
||||
|
@ -531,7 +531,7 @@ define void @store_v4i32_with_folded_offset(<4 x i32> %v, <4 x i32>* %p) {
|
|||
; CHECK-LABEL: store_v4i32_with_folded_gep_offset:
|
||||
; NO-SIMD128-NOT: v128
|
||||
; SIMD128-NEXT: .param v128, i32{{$}}
|
||||
; SIMD128-NEXT: v128.store 16($1):p2align=0, $0{{$}}
|
||||
; SIMD128-NEXT: v128.store 16($1), $0{{$}}
|
||||
define void @store_v4i32_with_folded_gep_offset(<4 x i32> %v, <4 x i32>* %p) {
|
||||
%s = getelementptr inbounds <4 x i32>, <4 x i32>* %p, i32 1
|
||||
store <4 x i32> %v , <4 x i32>* %s
|
||||
|
@ -543,7 +543,7 @@ define void @store_v4i32_with_folded_gep_offset(<4 x i32> %v, <4 x i32>* %p) {
|
|||
; SIMD128-NEXT: .param v128, i32{{$}}
|
||||
; SIMD128-NEXT: i32.const $push[[L0:[0-9]+]]=, -16{{$}}
|
||||
; SIMD128-NEXT: i32.add $push[[R:[0-9]+]]=, $1, $pop[[L0]]{{$}}
|
||||
; SIMD128-NEXT: v128.store 0($pop[[R]]):p2align=0, $0{{$}}
|
||||
; SIMD128-NEXT: v128.store 0($pop[[R]]), $0{{$}}
|
||||
define void @store_v4i32_with_unfolded_gep_negative_offset(<4 x i32> %v, <4 x i32>* %p) {
|
||||
%s = getelementptr inbounds <4 x i32>, <4 x i32>* %p, i32 -1
|
||||
store <4 x i32> %v , <4 x i32>* %s
|
||||
|
@ -555,7 +555,7 @@ define void @store_v4i32_with_unfolded_gep_negative_offset(<4 x i32> %v, <4 x i3
|
|||
; SIMD128-NEXT: .param v128, i32{{$}}
|
||||
; SIMD128-NEXT: i32.const $push[[L0:[0-9]+]]=, -16{{$}}
|
||||
; SIMD128-NEXT: i32.add $push[[R:[0-9]+]]=, $1, $pop[[L0]]{{$}}
|
||||
; SIMD128-NEXT: v128.store 0($pop[[R]]):p2align=0, $0{{$}}
|
||||
; SIMD128-NEXT: v128.store 0($pop[[R]]), $0{{$}}
|
||||
define void @store_v4i32_with_unfolded_offset(<4 x i32> %v, <4 x i32>* %p) {
|
||||
%s = getelementptr inbounds <4 x i32>, <4 x i32>* %p, i32 -1
|
||||
store <4 x i32> %v , <4 x i32>* %s
|
||||
|
@ -567,7 +567,7 @@ define void @store_v4i32_with_unfolded_offset(<4 x i32> %v, <4 x i32>* %p) {
|
|||
; SIMD128-NEXT: .param v128, i32{{$}}
|
||||
; SIMD128-NEXT: i32.const $push[[L0:[0-9]+]]=, 16{{$}}
|
||||
; SIMD128-NEXT: i32.add $push[[R:[0-9]+]]=, $1, $pop[[L0]]{{$}}
|
||||
; SIMD128-NEXT: v128.store 0($pop[[R]]):p2align=0, $0{{$}}
|
||||
; SIMD128-NEXT: v128.store 0($pop[[R]]), $0{{$}}
|
||||
define void @store_v4i32_with_unfolded_gep_offset(<4 x i32> %v, <4 x i32>* %p) {
|
||||
%s = getelementptr <4 x i32>, <4 x i32>* %p, i32 1
|
||||
store <4 x i32> %v , <4 x i32>* %s
|
||||
|
@ -578,7 +578,7 @@ define void @store_v4i32_with_unfolded_gep_offset(<4 x i32> %v, <4 x i32>* %p) {
|
|||
; NO-SIMD128-NOT: v128
|
||||
; SIMD128-NEXT: .param v128{{$}}
|
||||
; SIMD128-NEXT: i32.const $push[[L0:[0-9]+]]=, 0{{$}}
|
||||
; SIMD128-NEXT: v128.store 32($pop[[L0]]):p2align=0, $0{{$}}
|
||||
; SIMD128-NEXT: v128.store 32($pop[[L0]]), $0{{$}}
|
||||
define void @store_v4i32_to_numeric_address(<4 x i32> %v) {
|
||||
%s = inttoptr i32 32 to <4 x i32>*
|
||||
store <4 x i32> %v , <4 x i32>* %s
|
||||
|
@ -589,7 +589,7 @@ define void @store_v4i32_to_numeric_address(<4 x i32> %v) {
|
|||
; NO-SIMD128-NOT: v128
|
||||
; SIMD128-NEXT: .param v128{{$}}
|
||||
; SIMD128-NEXT: i32.const $push[[R:[0-9]+]]=, 0{{$}}
|
||||
; SIMD128-NEXT: v128.store gv_v4i32($pop[[R]]):p2align=0, $0{{$}}
|
||||
; SIMD128-NEXT: v128.store gv_v4i32($pop[[R]]), $0{{$}}
|
||||
define void @store_v4i32_to_global_address(<4 x i32> %v) {
|
||||
store <4 x i32> %v , <4 x i32>* @gv_v4i32
|
||||
ret void
|
||||
|
@ -603,7 +603,7 @@ define void @store_v4i32_to_global_address(<4 x i32> %v) {
|
|||
; SIMD128-VM-NOT: v128
|
||||
; SIMD128-NEXT: .param i32{{$}}
|
||||
; SIMD128-NEXT: .result v128{{$}}
|
||||
; SIMD128-NEXT: v128.load $push[[R:[0-9]+]]=, 0($0):p2align=0{{$}}
|
||||
; SIMD128-NEXT: v128.load $push[[R:[0-9]+]]=, 0($0){{$}}
|
||||
; SIMD128-NEXT: return $pop[[R]]{{$}}
|
||||
define <2 x i64> @load_v2i64(<2 x i64>* %p) {
|
||||
%v = load <2 x i64>, <2 x i64>* %p
|
||||
|
@ -615,7 +615,7 @@ define <2 x i64> @load_v2i64(<2 x i64>* %p) {
|
|||
; SIMD128-VM-NOT: v128
|
||||
; SIMD128-NEXT: .param i32{{$}}
|
||||
; SIMD128-NEXT: .result v128{{$}}
|
||||
; SIMD128-NEXT: v128.load $push[[R:[0-9]+]]=, 16($0):p2align=0{{$}}
|
||||
; SIMD128-NEXT: v128.load $push[[R:[0-9]+]]=, 16($0){{$}}
|
||||
; SIMD128-NEXT: return $pop[[R]]{{$}}
|
||||
define <2 x i64> @load_v2i64_with_folded_offset(<2 x i64>* %p) {
|
||||
%q = ptrtoint <2 x i64>* %p to i32
|
||||
|
@ -630,7 +630,7 @@ define <2 x i64> @load_v2i64_with_folded_offset(<2 x i64>* %p) {
|
|||
; SIMD128-VM-NOT: v128
|
||||
; SIMD128-NEXT: .param i32{{$}}
|
||||
; SIMD128-NEXT: .result v128{{$}}
|
||||
; SIMD128-NEXT: v128.load $push[[R:[0-9]+]]=, 16($0):p2align=0{{$}}
|
||||
; SIMD128-NEXT: v128.load $push[[R:[0-9]+]]=, 16($0){{$}}
|
||||
; SIMD128-NEXT: return $pop[[R]]{{$}}
|
||||
define <2 x i64> @load_v2i64_with_folded_gep_offset(<2 x i64>* %p) {
|
||||
%s = getelementptr inbounds <2 x i64>, <2 x i64>* %p, i32 1
|
||||
|
@ -645,7 +645,7 @@ define <2 x i64> @load_v2i64_with_folded_gep_offset(<2 x i64>* %p) {
|
|||
; SIMD128-NEXT: .result v128{{$}}
|
||||
; SIMD128-NEXT: i32.const $push[[L0:[0-9]+]]=, -16{{$}}
|
||||
; SIMD128-NEXT: i32.add $push[[L1:[0-9]+]]=, $0, $pop[[L0]]{{$}}
|
||||
; SIMD128-NEXT: v128.load $push[[R:[0-9]+]]=, 0($pop[[L1]]):p2align=0{{$}}
|
||||
; SIMD128-NEXT: v128.load $push[[R:[0-9]+]]=, 0($pop[[L1]]){{$}}
|
||||
; SIMD128-NEXT: return $pop[[R]]{{$}}
|
||||
define <2 x i64> @load_v2i64_with_unfolded_gep_negative_offset(<2 x i64>* %p) {
|
||||
%s = getelementptr inbounds <2 x i64>, <2 x i64>* %p, i32 -1
|
||||
|
@ -660,7 +660,7 @@ define <2 x i64> @load_v2i64_with_unfolded_gep_negative_offset(<2 x i64>* %p) {
|
|||
; SIMD128-NEXT: .result v128{{$}}
|
||||
; SIMD128-NEXT: i32.const $push[[L0:[0-9]+]]=, 16{{$}}
|
||||
; SIMD128-NEXT: i32.add $push[[L1:[0-9]+]]=, $0, $pop[[L0]]{{$}}
|
||||
; SIMD128-NEXT: v128.load $push[[R:[0-9]+]]=, 0($pop[[L1]]):p2align=0{{$}}
|
||||
; SIMD128-NEXT: v128.load $push[[R:[0-9]+]]=, 0($pop[[L1]]){{$}}
|
||||
; SIMD128-NEXT: return $pop[[R]]{{$}}
|
||||
define <2 x i64> @load_v2i64_with_unfolded_offset(<2 x i64>* %p) {
|
||||
%q = ptrtoint <2 x i64>* %p to i32
|
||||
|
@ -677,7 +677,7 @@ define <2 x i64> @load_v2i64_with_unfolded_offset(<2 x i64>* %p) {
|
|||
; SIMD128-NEXT: .result v128{{$}}
|
||||
; SIMD128-NEXT: i32.const $push[[L0:[0-9]+]]=, 16{{$}}
|
||||
; SIMD128-NEXT: i32.add $push[[L1:[0-9]+]]=, $0, $pop[[L0]]{{$}}
|
||||
; SIMD128-NEXT: v128.load $push[[R:[0-9]+]]=, 0($pop[[L1]]):p2align=0{{$}}
|
||||
; SIMD128-NEXT: v128.load $push[[R:[0-9]+]]=, 0($pop[[L1]]){{$}}
|
||||
; SIMD128-NEXT: return $pop[[R]]{{$}}
|
||||
define <2 x i64> @load_v2i64_with_unfolded_gep_offset(<2 x i64>* %p) {
|
||||
%s = getelementptr <2 x i64>, <2 x i64>* %p, i32 1
|
||||
|
@ -690,7 +690,7 @@ define <2 x i64> @load_v2i64_with_unfolded_gep_offset(<2 x i64>* %p) {
|
|||
; SIMD128-VM-NOT: v128
|
||||
; SIMD128-NEXT: .result v128{{$}}
|
||||
; SIMD128-NEXT: i32.const $push[[L0:[0-9]+]]=, 0{{$}}
|
||||
; SIMD128-NEXT: v128.load $push[[R:[0-9]+]]=, 32($pop[[L0]]):p2align=0{{$}}
|
||||
; SIMD128-NEXT: v128.load $push[[R:[0-9]+]]=, 32($pop[[L0]]){{$}}
|
||||
; SIMD128-NEXT: return $pop[[R]]{{$}}
|
||||
define <2 x i64> @load_v2i64_from_numeric_address() {
|
||||
%s = inttoptr i32 32 to <2 x i64>*
|
||||
|
@ -703,7 +703,7 @@ define <2 x i64> @load_v2i64_from_numeric_address() {
|
|||
; SIMD128-VM-NOT: v128
|
||||
; SIMD128-NEXT: .result v128{{$}}
|
||||
; SIMD128-NEXT: i32.const $push[[L0:[0-9]+]]=, 0{{$}}
|
||||
; SIMD128-NEXT: v128.load $push[[R:[0-9]+]]=, gv_v2i64($pop[[L0]]):p2align=0{{$}}
|
||||
; SIMD128-NEXT: v128.load $push[[R:[0-9]+]]=, gv_v2i64($pop[[L0]]){{$}}
|
||||
; SIMD128-NEXT: return $pop[[R]]{{$}}
|
||||
@gv_v2i64 = global <2 x i64> <i64 42, i64 42>
|
||||
define <2 x i64> @load_v2i64_from_global_address() {
|
||||
|
@ -715,7 +715,7 @@ define <2 x i64> @load_v2i64_from_global_address() {
|
|||
; NO-SIMD128-NOT: v128
|
||||
; SIMD128-VM-NOT: v128
|
||||
; SIMD128-NEXT: .param v128, i32{{$}}
|
||||
; SIMD128-NEXT: v128.store 0($1):p2align=0, $0{{$}}
|
||||
; SIMD128-NEXT: v128.store 0($1), $0{{$}}
|
||||
define void @store_v2i64(<2 x i64> %v, <2 x i64>* %p) {
|
||||
store <2 x i64> %v , <2 x i64>* %p
|
||||
ret void
|
||||
|
@ -725,7 +725,7 @@ define void @store_v2i64(<2 x i64> %v, <2 x i64>* %p) {
|
|||
; NO-SIMD128-NOT: v128
|
||||
; SIMD128-VM-NOT: v128
|
||||
; SIMD128-NEXT: .param v128, i32{{$}}
|
||||
; SIMD128-NEXT: v128.store 16($1):p2align=0, $0{{$}}
|
||||
; SIMD128-NEXT: v128.store 16($1), $0{{$}}
|
||||
define void @store_v2i64_with_folded_offset(<2 x i64> %v, <2 x i64>* %p) {
|
||||
%q = ptrtoint <2 x i64>* %p to i32
|
||||
%r = add nuw i32 %q, 16
|
||||
|
@ -738,7 +738,7 @@ define void @store_v2i64_with_folded_offset(<2 x i64> %v, <2 x i64>* %p) {
|
|||
; NO-SIMD128-NOT: v128
|
||||
; SIMD128-VM-NOT: v128
|
||||
; SIMD128-NEXT: .param v128, i32{{$}}
|
||||
; SIMD128-NEXT: v128.store 16($1):p2align=0, $0{{$}}
|
||||
; SIMD128-NEXT: v128.store 16($1), $0{{$}}
|
||||
define void @store_v2i64_with_folded_gep_offset(<2 x i64> %v, <2 x i64>* %p) {
|
||||
%s = getelementptr inbounds <2 x i64>, <2 x i64>* %p, i32 1
|
||||
store <2 x i64> %v , <2 x i64>* %s
|
||||
|
@ -751,7 +751,7 @@ define void @store_v2i64_with_folded_gep_offset(<2 x i64> %v, <2 x i64>* %p) {
|
|||
; SIMD128-NEXT: .param v128, i32{{$}}
|
||||
; SIMD128-NEXT: i32.const $push[[L0:[0-9]+]]=, -16{{$}}
|
||||
; SIMD128-NEXT: i32.add $push[[R:[0-9]+]]=, $1, $pop[[L0]]{{$}}
|
||||
; SIMD128-NEXT: v128.store 0($pop[[R]]):p2align=0, $0{{$}}
|
||||
; SIMD128-NEXT: v128.store 0($pop[[R]]), $0{{$}}
|
||||
define void @store_v2i64_with_unfolded_gep_negative_offset(<2 x i64> %v, <2 x i64>* %p) {
|
||||
%s = getelementptr inbounds <2 x i64>, <2 x i64>* %p, i32 -1
|
||||
store <2 x i64> %v , <2 x i64>* %s
|
||||
|
@ -764,7 +764,7 @@ define void @store_v2i64_with_unfolded_gep_negative_offset(<2 x i64> %v, <2 x i6
|
|||
; SIMD128-NEXT: .param v128, i32{{$}}
|
||||
; SIMD128-NEXT: i32.const $push[[L0:[0-9]+]]=, -16{{$}}
|
||||
; SIMD128-NEXT: i32.add $push[[R:[0-9]+]]=, $1, $pop[[L0]]{{$}}
|
||||
; SIMD128-NEXT: v128.store 0($pop[[R]]):p2align=0, $0{{$}}
|
||||
; SIMD128-NEXT: v128.store 0($pop[[R]]), $0{{$}}
|
||||
define void @store_v2i64_with_unfolded_offset(<2 x i64> %v, <2 x i64>* %p) {
|
||||
%s = getelementptr inbounds <2 x i64>, <2 x i64>* %p, i32 -1
|
||||
store <2 x i64> %v , <2 x i64>* %s
|
||||
|
@ -777,7 +777,7 @@ define void @store_v2i64_with_unfolded_offset(<2 x i64> %v, <2 x i64>* %p) {
|
|||
; SIMD128-NEXT: .param v128, i32{{$}}
|
||||
; SIMD128-NEXT: i32.const $push[[L0:[0-9]+]]=, 16{{$}}
|
||||
; SIMD128-NEXT: i32.add $push[[R:[0-9]+]]=, $1, $pop[[L0]]{{$}}
|
||||
; SIMD128-NEXT: v128.store 0($pop[[R]]):p2align=0, $0{{$}}
|
||||
; SIMD128-NEXT: v128.store 0($pop[[R]]), $0{{$}}
|
||||
define void @store_v2i64_with_unfolded_gep_offset(<2 x i64> %v, <2 x i64>* %p) {
|
||||
%s = getelementptr <2 x i64>, <2 x i64>* %p, i32 1
|
||||
store <2 x i64> %v , <2 x i64>* %s
|
||||
|
@ -789,7 +789,7 @@ define void @store_v2i64_with_unfolded_gep_offset(<2 x i64> %v, <2 x i64>* %p) {
|
|||
; SIMD128-VM-NOT: v128
|
||||
; SIMD128-NEXT: .param v128{{$}}
|
||||
; SIMD128-NEXT: i32.const $push[[L0:[0-9]+]]=, 0{{$}}
|
||||
; SIMD128-NEXT: v128.store 32($pop[[L0]]):p2align=0, $0{{$}}
|
||||
; SIMD128-NEXT: v128.store 32($pop[[L0]]), $0{{$}}
|
||||
define void @store_v2i64_to_numeric_address(<2 x i64> %v) {
|
||||
%s = inttoptr i32 32 to <2 x i64>*
|
||||
store <2 x i64> %v , <2 x i64>* %s
|
||||
|
@ -801,7 +801,7 @@ define void @store_v2i64_to_numeric_address(<2 x i64> %v) {
|
|||
; SIMD128-VM-NOT: v128
|
||||
; SIMD128-NEXT: .param v128{{$}}
|
||||
; SIMD128-NEXT: i32.const $push[[R:[0-9]+]]=, 0{{$}}
|
||||
; SIMD128-NEXT: v128.store gv_v2i64($pop[[R]]):p2align=0, $0{{$}}
|
||||
; SIMD128-NEXT: v128.store gv_v2i64($pop[[R]]), $0{{$}}
|
||||
define void @store_v2i64_to_global_address(<2 x i64> %v) {
|
||||
store <2 x i64> %v , <2 x i64>* @gv_v2i64
|
||||
ret void
|
||||
|
@ -814,7 +814,7 @@ define void @store_v2i64_to_global_address(<2 x i64> %v) {
|
|||
; NO-SIMD128-NOT: v128
|
||||
; SIMD128-NEXT: .param i32{{$}}
|
||||
; SIMD128-NEXT: .result v128{{$}}
|
||||
; SIMD128-NEXT: v128.load $push[[R:[0-9]+]]=, 0($0):p2align=0{{$}}
|
||||
; SIMD128-NEXT: v128.load $push[[R:[0-9]+]]=, 0($0){{$}}
|
||||
; SIMD128-NEXT: return $pop[[R]]{{$}}
|
||||
define <4 x float> @load_v4f32(<4 x float>* %p) {
|
||||
%v = load <4 x float>, <4 x float>* %p
|
||||
|
@ -825,7 +825,7 @@ define <4 x float> @load_v4f32(<4 x float>* %p) {
|
|||
; NO-SIMD128-NOT: v128
|
||||
; SIMD128-NEXT: .param i32{{$}}
|
||||
; SIMD128-NEXT: .result v128{{$}}
|
||||
; SIMD128-NEXT: v128.load $push[[R:[0-9]+]]=, 16($0):p2align=0{{$}}
|
||||
; SIMD128-NEXT: v128.load $push[[R:[0-9]+]]=, 16($0){{$}}
|
||||
; SIMD128-NEXT: return $pop[[R]]{{$}}
|
||||
define <4 x float> @load_v4f32_with_folded_offset(<4 x float>* %p) {
|
||||
%q = ptrtoint <4 x float>* %p to i32
|
||||
|
@ -839,7 +839,7 @@ define <4 x float> @load_v4f32_with_folded_offset(<4 x float>* %p) {
|
|||
; NO-SIMD128-NOT: v128
|
||||
; SIMD128-NEXT: .param i32{{$}}
|
||||
; SIMD128-NEXT: .result v128{{$}}
|
||||
; SIMD128-NEXT: v128.load $push[[R:[0-9]+]]=, 16($0):p2align=0{{$}}
|
||||
; SIMD128-NEXT: v128.load $push[[R:[0-9]+]]=, 16($0){{$}}
|
||||
; SIMD128-NEXT: return $pop[[R]]{{$}}
|
||||
define <4 x float> @load_v4f32_with_folded_gep_offset(<4 x float>* %p) {
|
||||
%s = getelementptr inbounds <4 x float>, <4 x float>* %p, i32 1
|
||||
|
@ -853,7 +853,7 @@ define <4 x float> @load_v4f32_with_folded_gep_offset(<4 x float>* %p) {
|
|||
; SIMD128-NEXT: .result v128{{$}}
|
||||
; SIMD128-NEXT: i32.const $push[[L0:[0-9]+]]=, -16{{$}}
|
||||
; SIMD128-NEXT: i32.add $push[[L1:[0-9]+]]=, $0, $pop[[L0]]{{$}}
|
||||
; SIMD128-NEXT: v128.load $push[[R:[0-9]+]]=, 0($pop[[L1]]):p2align=0{{$}}
|
||||
; SIMD128-NEXT: v128.load $push[[R:[0-9]+]]=, 0($pop[[L1]]){{$}}
|
||||
; SIMD128-NEXT: return $pop[[R]]{{$}}
|
||||
define <4 x float> @load_v4f32_with_unfolded_gep_negative_offset(<4 x float>* %p) {
|
||||
%s = getelementptr inbounds <4 x float>, <4 x float>* %p, i32 -1
|
||||
|
@ -867,7 +867,7 @@ define <4 x float> @load_v4f32_with_unfolded_gep_negative_offset(<4 x float>* %p
|
|||
; SIMD128-NEXT: .result v128{{$}}
|
||||
; SIMD128-NEXT: i32.const $push[[L0:[0-9]+]]=, 16{{$}}
|
||||
; SIMD128-NEXT: i32.add $push[[L1:[0-9]+]]=, $0, $pop[[L0]]{{$}}
|
||||
; SIMD128-NEXT: v128.load $push[[R:[0-9]+]]=, 0($pop[[L1]]):p2align=0{{$}}
|
||||
; SIMD128-NEXT: v128.load $push[[R:[0-9]+]]=, 0($pop[[L1]]){{$}}
|
||||
; SIMD128-NEXT: return $pop[[R]]{{$}}
|
||||
define <4 x float> @load_v4f32_with_unfolded_offset(<4 x float>* %p) {
|
||||
%q = ptrtoint <4 x float>* %p to i32
|
||||
|
@ -883,7 +883,7 @@ define <4 x float> @load_v4f32_with_unfolded_offset(<4 x float>* %p) {
|
|||
; SIMD128-NEXT: .result v128{{$}}
|
||||
; SIMD128-NEXT: i32.const $push[[L0:[0-9]+]]=, 16{{$}}
|
||||
; SIMD128-NEXT: i32.add $push[[L1:[0-9]+]]=, $0, $pop[[L0]]{{$}}
|
||||
; SIMD128-NEXT: v128.load $push[[R:[0-9]+]]=, 0($pop[[L1]]):p2align=0{{$}}
|
||||
; SIMD128-NEXT: v128.load $push[[R:[0-9]+]]=, 0($pop[[L1]]){{$}}
|
||||
; SIMD128-NEXT: return $pop[[R]]{{$}}
|
||||
define <4 x float> @load_v4f32_with_unfolded_gep_offset(<4 x float>* %p) {
|
||||
%s = getelementptr <4 x float>, <4 x float>* %p, i32 1
|
||||
|
@ -895,7 +895,7 @@ define <4 x float> @load_v4f32_with_unfolded_gep_offset(<4 x float>* %p) {
|
|||
; NO-SIMD128-NOT: v128
|
||||
; SIMD128-NEXT: .result v128{{$}}
|
||||
; SIMD128-NEXT: i32.const $push[[L0:[0-9]+]]=, 0{{$}}
|
||||
; SIMD128-NEXT: v128.load $push[[R:[0-9]+]]=, 32($pop[[L0]]):p2align=0{{$}}
|
||||
; SIMD128-NEXT: v128.load $push[[R:[0-9]+]]=, 32($pop[[L0]]){{$}}
|
||||
; SIMD128-NEXT: return $pop[[R]]{{$}}
|
||||
define <4 x float> @load_v4f32_from_numeric_address() {
|
||||
%s = inttoptr i32 32 to <4 x float>*
|
||||
|
@ -907,7 +907,7 @@ define <4 x float> @load_v4f32_from_numeric_address() {
|
|||
; NO-SIMD128-NOT: v128
|
||||
; SIMD128-NEXT: .result v128{{$}}
|
||||
; SIMD128-NEXT: i32.const $push[[L0:[0-9]+]]=, 0{{$}}
|
||||
; SIMD128-NEXT: v128.load $push[[R:[0-9]+]]=, gv_v4f32($pop[[L0]]):p2align=0{{$}}
|
||||
; SIMD128-NEXT: v128.load $push[[R:[0-9]+]]=, gv_v4f32($pop[[L0]]){{$}}
|
||||
; SIMD128-NEXT: return $pop[[R]]{{$}}
|
||||
@gv_v4f32 = global <4 x float> <float 42., float 42., float 42., float 42.>
|
||||
define <4 x float> @load_v4f32_from_global_address() {
|
||||
|
@ -918,7 +918,7 @@ define <4 x float> @load_v4f32_from_global_address() {
|
|||
; CHECK-LABEL: store_v4f32:
|
||||
; NO-SIMD128-NOT: v128
|
||||
; SIMD128-NEXT: .param v128, i32{{$}}
|
||||
; SIMD128-NEXT: v128.store 0($1):p2align=0, $0{{$}}
|
||||
; SIMD128-NEXT: v128.store 0($1), $0{{$}}
|
||||
define void @store_v4f32(<4 x float> %v, <4 x float>* %p) {
|
||||
store <4 x float> %v , <4 x float>* %p
|
||||
ret void
|
||||
|
@ -927,7 +927,7 @@ define void @store_v4f32(<4 x float> %v, <4 x float>* %p) {
|
|||
; CHECK-LABEL: store_v4f32_with_folded_offset:
|
||||
; NO-SIMD128-NOT: v128
|
||||
; SIMD128-NEXT: .param v128, i32{{$}}
|
||||
; SIMD128-NEXT: v128.store 16($1):p2align=0, $0{{$}}
|
||||
; SIMD128-NEXT: v128.store 16($1), $0{{$}}
|
||||
define void @store_v4f32_with_folded_offset(<4 x float> %v, <4 x float>* %p) {
|
||||
%q = ptrtoint <4 x float>* %p to i32
|
||||
%r = add nuw i32 %q, 16
|
||||
|
@ -939,7 +939,7 @@ define void @store_v4f32_with_folded_offset(<4 x float> %v, <4 x float>* %p) {
|
|||
; CHECK-LABEL: store_v4f32_with_folded_gep_offset:
|
||||
; NO-SIMD128-NOT: v128
|
||||
; SIMD128-NEXT: .param v128, i32{{$}}
|
||||
; SIMD128-NEXT: v128.store 16($1):p2align=0, $0{{$}}
|
||||
; SIMD128-NEXT: v128.store 16($1), $0{{$}}
|
||||
define void @store_v4f32_with_folded_gep_offset(<4 x float> %v, <4 x float>* %p) {
|
||||
%s = getelementptr inbounds <4 x float>, <4 x float>* %p, i32 1
|
||||
store <4 x float> %v , <4 x float>* %s
|
||||
|
@ -951,7 +951,7 @@ define void @store_v4f32_with_folded_gep_offset(<4 x float> %v, <4 x float>* %p)
|
|||
; SIMD128-NEXT: .param v128, i32{{$}}
|
||||
; SIMD128-NEXT: i32.const $push[[L0:[0-9]+]]=, -16{{$}}
|
||||
; SIMD128-NEXT: i32.add $push[[R:[0-9]+]]=, $1, $pop[[L0]]{{$}}
|
||||
; SIMD128-NEXT: v128.store 0($pop[[R]]):p2align=0, $0{{$}}
|
||||
; SIMD128-NEXT: v128.store 0($pop[[R]]), $0{{$}}
|
||||
define void @store_v4f32_with_unfolded_gep_negative_offset(<4 x float> %v, <4 x float>* %p) {
|
||||
%s = getelementptr inbounds <4 x float>, <4 x float>* %p, i32 -1
|
||||
store <4 x float> %v , <4 x float>* %s
|
||||
|
@ -963,7 +963,7 @@ define void @store_v4f32_with_unfolded_gep_negative_offset(<4 x float> %v, <4 x
|
|||
; SIMD128-NEXT: .param v128, i32{{$}}
|
||||
; SIMD128-NEXT: i32.const $push[[L0:[0-9]+]]=, -16{{$}}
|
||||
; SIMD128-NEXT: i32.add $push[[R:[0-9]+]]=, $1, $pop[[L0]]{{$}}
|
||||
; SIMD128-NEXT: v128.store 0($pop[[R]]):p2align=0, $0{{$}}
|
||||
; SIMD128-NEXT: v128.store 0($pop[[R]]), $0{{$}}
|
||||
define void @store_v4f32_with_unfolded_offset(<4 x float> %v, <4 x float>* %p) {
|
||||
%s = getelementptr inbounds <4 x float>, <4 x float>* %p, i32 -1
|
||||
store <4 x float> %v , <4 x float>* %s
|
||||
|
@ -975,7 +975,7 @@ define void @store_v4f32_with_unfolded_offset(<4 x float> %v, <4 x float>* %p) {
|
|||
; SIMD128-NEXT: .param v128, i32{{$}}
|
||||
; SIMD128-NEXT: i32.const $push[[L0:[0-9]+]]=, 16{{$}}
|
||||
; SIMD128-NEXT: i32.add $push[[R:[0-9]+]]=, $1, $pop[[L0]]{{$}}
|
||||
; SIMD128-NEXT: v128.store 0($pop[[R]]):p2align=0, $0{{$}}
|
||||
; SIMD128-NEXT: v128.store 0($pop[[R]]), $0{{$}}
|
||||
define void @store_v4f32_with_unfolded_gep_offset(<4 x float> %v, <4 x float>* %p) {
|
||||
%s = getelementptr <4 x float>, <4 x float>* %p, i32 1
|
||||
store <4 x float> %v , <4 x float>* %s
|
||||
|
@ -986,7 +986,7 @@ define void @store_v4f32_with_unfolded_gep_offset(<4 x float> %v, <4 x float>* %
|
|||
; NO-SIMD128-NOT: v128
|
||||
; SIMD128-NEXT: .param v128{{$}}
|
||||
; SIMD128-NEXT: i32.const $push[[L0:[0-9]+]]=, 0{{$}}
|
||||
; SIMD128-NEXT: v128.store 32($pop[[L0]]):p2align=0, $0{{$}}
|
||||
; SIMD128-NEXT: v128.store 32($pop[[L0]]), $0{{$}}
|
||||
define void @store_v4f32_to_numeric_address(<4 x float> %v) {
|
||||
%s = inttoptr i32 32 to <4 x float>*
|
||||
store <4 x float> %v , <4 x float>* %s
|
||||
|
@ -997,7 +997,7 @@ define void @store_v4f32_to_numeric_address(<4 x float> %v) {
|
|||
; NO-SIMD128-NOT: v128
|
||||
; SIMD128-NEXT: .param v128{{$}}
|
||||
; SIMD128-NEXT: i32.const $push[[R:[0-9]+]]=, 0{{$}}
|
||||
; SIMD128-NEXT: v128.store gv_v4f32($pop[[R]]):p2align=0, $0{{$}}
|
||||
; SIMD128-NEXT: v128.store gv_v4f32($pop[[R]]), $0{{$}}
|
||||
define void @store_v4f32_to_global_address(<4 x float> %v) {
|
||||
store <4 x float> %v , <4 x float>* @gv_v4f32
|
||||
ret void
|
||||
|
@ -1011,7 +1011,7 @@ define void @store_v4f32_to_global_address(<4 x float> %v) {
|
|||
; SIMD128-VM-NOT: v128
|
||||
; SIMD128-NEXT: .param i32{{$}}
|
||||
; SIMD128-NEXT: .result v128{{$}}
|
||||
; SIMD128-NEXT: v128.load $push[[R:[0-9]+]]=, 0($0):p2align=0{{$}}
|
||||
; SIMD128-NEXT: v128.load $push[[R:[0-9]+]]=, 0($0){{$}}
|
||||
; SIMD128-NEXT: return $pop[[R]]{{$}}
|
||||
define <2 x double> @load_v2f64(<2 x double>* %p) {
|
||||
%v = load <2 x double>, <2 x double>* %p
|
||||
|
@ -1023,7 +1023,7 @@ define <2 x double> @load_v2f64(<2 x double>* %p) {
|
|||
; SIMD128-VM-NOT: v128
|
||||
; SIMD128-NEXT: .param i32{{$}}
|
||||
; SIMD128-NEXT: .result v128{{$}}
|
||||
; SIMD128-NEXT: v128.load $push[[R:[0-9]+]]=, 16($0):p2align=0{{$}}
|
||||
; SIMD128-NEXT: v128.load $push[[R:[0-9]+]]=, 16($0){{$}}
|
||||
; SIMD128-NEXT: return $pop[[R]]{{$}}
|
||||
define <2 x double> @load_v2f64_with_folded_offset(<2 x double>* %p) {
|
||||
%q = ptrtoint <2 x double>* %p to i32
|
||||
|
@ -1038,7 +1038,7 @@ define <2 x double> @load_v2f64_with_folded_offset(<2 x double>* %p) {
|
|||
; SIMD128-VM-NOT: v128
|
||||
; SIMD128-NEXT: .param i32{{$}}
|
||||
; SIMD128-NEXT: .result v128{{$}}
|
||||
; SIMD128-NEXT: v128.load $push[[R:[0-9]+]]=, 16($0):p2align=0{{$}}
|
||||
; SIMD128-NEXT: v128.load $push[[R:[0-9]+]]=, 16($0){{$}}
|
||||
; SIMD128-NEXT: return $pop[[R]]{{$}}
|
||||
define <2 x double> @load_v2f64_with_folded_gep_offset(<2 x double>* %p) {
|
||||
%s = getelementptr inbounds <2 x double>, <2 x double>* %p, i32 1
|
||||
|
@ -1053,7 +1053,7 @@ define <2 x double> @load_v2f64_with_folded_gep_offset(<2 x double>* %p) {
|
|||
; SIMD128-NEXT: .result v128{{$}}
|
||||
; SIMD128-NEXT: i32.const $push[[L0:[0-9]+]]=, -16{{$}}
|
||||
; SIMD128-NEXT: i32.add $push[[L1:[0-9]+]]=, $0, $pop[[L0]]{{$}}
|
||||
; SIMD128-NEXT: v128.load $push[[R:[0-9]+]]=, 0($pop[[L1]]):p2align=0{{$}}
|
||||
; SIMD128-NEXT: v128.load $push[[R:[0-9]+]]=, 0($pop[[L1]]){{$}}
|
||||
; SIMD128-NEXT: return $pop[[R]]{{$}}
|
||||
define <2 x double> @load_v2f64_with_unfolded_gep_negative_offset(<2 x double>* %p) {
|
||||
%s = getelementptr inbounds <2 x double>, <2 x double>* %p, i32 -1
|
||||
|
@ -1068,7 +1068,7 @@ define <2 x double> @load_v2f64_with_unfolded_gep_negative_offset(<2 x double>*
|
|||
; SIMD128-NEXT: .result v128{{$}}
|
||||
; SIMD128-NEXT: i32.const $push[[L0:[0-9]+]]=, 16{{$}}
|
||||
; SIMD128-NEXT: i32.add $push[[L1:[0-9]+]]=, $0, $pop[[L0]]{{$}}
|
||||
; SIMD128-NEXT: v128.load $push[[R:[0-9]+]]=, 0($pop[[L1]]):p2align=0{{$}}
|
||||
; SIMD128-NEXT: v128.load $push[[R:[0-9]+]]=, 0($pop[[L1]]){{$}}
|
||||
; SIMD128-NEXT: return $pop[[R]]{{$}}
|
||||
define <2 x double> @load_v2f64_with_unfolded_offset(<2 x double>* %p) {
|
||||
%q = ptrtoint <2 x double>* %p to i32
|
||||
|
@ -1085,7 +1085,7 @@ define <2 x double> @load_v2f64_with_unfolded_offset(<2 x double>* %p) {
|
|||
; SIMD128-NEXT: .result v128{{$}}
|
||||
; SIMD128-NEXT: i32.const $push[[L0:[0-9]+]]=, 16{{$}}
|
||||
; SIMD128-NEXT: i32.add $push[[L1:[0-9]+]]=, $0, $pop[[L0]]{{$}}
|
||||
; SIMD128-NEXT: v128.load $push[[R:[0-9]+]]=, 0($pop[[L1]]):p2align=0{{$}}
|
||||
; SIMD128-NEXT: v128.load $push[[R:[0-9]+]]=, 0($pop[[L1]]){{$}}
|
||||
; SIMD128-NEXT: return $pop[[R]]{{$}}
|
||||
define <2 x double> @load_v2f64_with_unfolded_gep_offset(<2 x double>* %p) {
|
||||
%s = getelementptr <2 x double>, <2 x double>* %p, i32 1
|
||||
|
@ -1098,7 +1098,7 @@ define <2 x double> @load_v2f64_with_unfolded_gep_offset(<2 x double>* %p) {
|
|||
; SIMD128-VM-NOT: v128
|
||||
; SIMD128-NEXT: .result v128{{$}}
|
||||
; SIMD128-NEXT: i32.const $push[[L0:[0-9]+]]=, 0{{$}}
|
||||
; SIMD128-NEXT: v128.load $push[[R:[0-9]+]]=, 32($pop[[L0]]):p2align=0{{$}}
|
||||
; SIMD128-NEXT: v128.load $push[[R:[0-9]+]]=, 32($pop[[L0]]){{$}}
|
||||
; SIMD128-NEXT: return $pop[[R]]{{$}}
|
||||
define <2 x double> @load_v2f64_from_numeric_address() {
|
||||
%s = inttoptr i32 32 to <2 x double>*
|
||||
|
@ -1111,7 +1111,7 @@ define <2 x double> @load_v2f64_from_numeric_address() {
|
|||
; SIMD128-VM-NOT: v128
|
||||
; SIMD128-NEXT: .result v128{{$}}
|
||||
; SIMD128-NEXT: i32.const $push[[L0:[0-9]+]]=, 0{{$}}
|
||||
; SIMD128-NEXT: v128.load $push[[R:[0-9]+]]=, gv_v2f64($pop[[L0]]):p2align=0{{$}}
|
||||
; SIMD128-NEXT: v128.load $push[[R:[0-9]+]]=, gv_v2f64($pop[[L0]]){{$}}
|
||||
; SIMD128-NEXT: return $pop[[R]]{{$}}
|
||||
@gv_v2f64 = global <2 x double> <double 42., double 42.>
|
||||
define <2 x double> @load_v2f64_from_global_address() {
|
||||
|
@ -1123,7 +1123,7 @@ define <2 x double> @load_v2f64_from_global_address() {
|
|||
; NO-SIMD128-NOT: v128
|
||||
; SIMD128-VM-NOT: v128
|
||||
; SIMD128-NEXT: .param v128, i32{{$}}
|
||||
; SIMD128-NEXT: v128.store 0($1):p2align=0, $0{{$}}
|
||||
; SIMD128-NEXT: v128.store 0($1), $0{{$}}
|
||||
define void @store_v2f64(<2 x double> %v, <2 x double>* %p) {
|
||||
store <2 x double> %v , <2 x double>* %p
|
||||
ret void
|
||||
|
@ -1133,7 +1133,7 @@ define void @store_v2f64(<2 x double> %v, <2 x double>* %p) {
|
|||
; NO-SIMD128-NOT: v128
|
||||
; SIMD128-VM-NOT: v128
|
||||
; SIMD128-NEXT: .param v128, i32{{$}}
|
||||
; SIMD128-NEXT: v128.store 16($1):p2align=0, $0{{$}}
|
||||
; SIMD128-NEXT: v128.store 16($1), $0{{$}}
|
||||
define void @store_v2f64_with_folded_offset(<2 x double> %v, <2 x double>* %p) {
|
||||
%q = ptrtoint <2 x double>* %p to i32
|
||||
%r = add nuw i32 %q, 16
|
||||
|
@ -1146,7 +1146,7 @@ define void @store_v2f64_with_folded_offset(<2 x double> %v, <2 x double>* %p) {
|
|||
; NO-SIMD128-NOT: v128
|
||||
; SIMD128-VM-NOT: v128
|
||||
; SIMD128-NEXT: .param v128, i32{{$}}
|
||||
; SIMD128-NEXT: v128.store 16($1):p2align=0, $0{{$}}
|
||||
; SIMD128-NEXT: v128.store 16($1), $0{{$}}
|
||||
define void @store_v2f64_with_folded_gep_offset(<2 x double> %v, <2 x double>* %p) {
|
||||
%s = getelementptr inbounds <2 x double>, <2 x double>* %p, i32 1
|
||||
store <2 x double> %v , <2 x double>* %s
|
||||
|
@ -1159,7 +1159,7 @@ define void @store_v2f64_with_folded_gep_offset(<2 x double> %v, <2 x double>* %
|
|||
; SIMD128-NEXT: .param v128, i32{{$}}
|
||||
; SIMD128-NEXT: i32.const $push[[L0:[0-9]+]]=, -16{{$}}
|
||||
; SIMD128-NEXT: i32.add $push[[R:[0-9]+]]=, $1, $pop[[L0]]{{$}}
|
||||
; SIMD128-NEXT: v128.store 0($pop[[R]]):p2align=0, $0{{$}}
|
||||
; SIMD128-NEXT: v128.store 0($pop[[R]]), $0{{$}}
|
||||
define void @store_v2f64_with_unfolded_gep_negative_offset(<2 x double> %v, <2 x double>* %p) {
|
||||
%s = getelementptr inbounds <2 x double>, <2 x double>* %p, i32 -1
|
||||
store <2 x double> %v , <2 x double>* %s
|
||||
|
@ -1172,7 +1172,7 @@ define void @store_v2f64_with_unfolded_gep_negative_offset(<2 x double> %v, <2 x
|
|||
; SIMD128-NEXT: .param v128, i32{{$}}
|
||||
; SIMD128-NEXT: i32.const $push[[L0:[0-9]+]]=, -16{{$}}
|
||||
; SIMD128-NEXT: i32.add $push[[R:[0-9]+]]=, $1, $pop[[L0]]{{$}}
|
||||
; SIMD128-NEXT: v128.store 0($pop[[R]]):p2align=0, $0{{$}}
|
||||
; SIMD128-NEXT: v128.store 0($pop[[R]]), $0{{$}}
|
||||
define void @store_v2f64_with_unfolded_offset(<2 x double> %v, <2 x double>* %p) {
|
||||
%s = getelementptr inbounds <2 x double>, <2 x double>* %p, i32 -1
|
||||
store <2 x double> %v , <2 x double>* %s
|
||||
|
@ -1185,7 +1185,7 @@ define void @store_v2f64_with_unfolded_offset(<2 x double> %v, <2 x double>* %p)
|
|||
; SIMD128-NEXT: .param v128, i32{{$}}
|
||||
; SIMD128-NEXT: i32.const $push[[L0:[0-9]+]]=, 16{{$}}
|
||||
; SIMD128-NEXT: i32.add $push[[R:[0-9]+]]=, $1, $pop[[L0]]{{$}}
|
||||
; SIMD128-NEXT: v128.store 0($pop[[R]]):p2align=0, $0{{$}}
|
||||
; SIMD128-NEXT: v128.store 0($pop[[R]]), $0{{$}}
|
||||
define void @store_v2f64_with_unfolded_gep_offset(<2 x double> %v, <2 x double>* %p) {
|
||||
%s = getelementptr <2 x double>, <2 x double>* %p, i32 1
|
||||
store <2 x double> %v , <2 x double>* %s
|
||||
|
@ -1197,7 +1197,7 @@ define void @store_v2f64_with_unfolded_gep_offset(<2 x double> %v, <2 x double>*
|
|||
; SIMD128-VM-NOT: v128
|
||||
; SIMD128-NEXT: .param v128{{$}}
|
||||
; SIMD128-NEXT: i32.const $push[[L0:[0-9]+]]=, 0{{$}}
|
||||
; SIMD128-NEXT: v128.store 32($pop[[L0]]):p2align=0, $0{{$}}
|
||||
; SIMD128-NEXT: v128.store 32($pop[[L0]]), $0{{$}}
|
||||
define void @store_v2f64_to_numeric_address(<2 x double> %v) {
|
||||
%s = inttoptr i32 32 to <2 x double>*
|
||||
store <2 x double> %v , <2 x double>* %s
|
||||
|
@ -1209,7 +1209,7 @@ define void @store_v2f64_to_numeric_address(<2 x double> %v) {
|
|||
; SIMD128-VM-NOT: v128
|
||||
; SIMD128-NEXT: .param v128{{$}}
|
||||
; SIMD128-NEXT: i32.const $push[[R:[0-9]+]]=, 0{{$}}
|
||||
; SIMD128-NEXT: v128.store gv_v2f64($pop[[R]]):p2align=0, $0{{$}}
|
||||
; SIMD128-NEXT: v128.store gv_v2f64($pop[[R]]), $0{{$}}
|
||||
define void @store_v2f64_to_global_address(<2 x double> %v) {
|
||||
store <2 x double> %v , <2 x double>* @gv_v2f64
|
||||
ret void
|
||||
|
|
Loading…
Reference in New Issue