forked from OSchip/llvm-project
[AArch64] Regenerate some test checks. NFC
This regenerates some of the tests that had very-close-to-updated check line already, in order to make them more maintainable.
This commit is contained in:
parent
187d9f8cd9
commit
d338e535ec
|
@ -1,10 +1,12 @@
|
|||
; RUN: llc < %s -mtriple=arm64-eabi -aarch64-redzone | FileCheck %s
|
||||
; RUN: llc < %s -mtriple=arm64_32-apple-ios -aarch64-redzone | FileCheck %s
|
||||
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
||||
; RUN: llc < %s -mtriple=arm64-apple-ios -aarch64-redzone | FileCheck %s --check-prefixes=CHECK,CHECK64
|
||||
; RUN: llc < %s -mtriple=arm64_32-apple-ios -aarch64-redzone | FileCheck %s --check-prefixes=CHECK,CHECK32
|
||||
|
||||
define i64* @store64(i64* %ptr, i64 %index, i64 %spacing) {
|
||||
; CHECK-LABEL: store64:
|
||||
; CHECK: str x{{[0-9+]}}, [x{{[0-9+]}}], #8
|
||||
; CHECK: ret
|
||||
; CHECK: ; %bb.0:
|
||||
; CHECK-NEXT: str x2, [x0], #8
|
||||
; CHECK-NEXT: ret
|
||||
%incdec.ptr = getelementptr inbounds i64, i64* %ptr, i64 1
|
||||
store i64 %spacing, i64* %ptr, align 4
|
||||
ret i64* %incdec.ptr
|
||||
|
@ -12,9 +14,11 @@ define i64* @store64(i64* %ptr, i64 %index, i64 %spacing) {
|
|||
|
||||
define i64* @store64idxpos256(i64* %ptr, i64 %index, i64 %spacing) {
|
||||
; CHECK-LABEL: store64idxpos256:
|
||||
; CHECK: add x{{[0-9+]}}, x{{[0-9+]}}, #256
|
||||
; CHECK: str x{{[0-9+]}}, [x{{[0-9+]}}]
|
||||
; CHECK: ret
|
||||
; CHECK: ; %bb.0:
|
||||
; CHECK-NEXT: add x8, x0, #256
|
||||
; CHECK-NEXT: str x2, [x0]
|
||||
; CHECK-NEXT: mov x0, x8
|
||||
; CHECK-NEXT: ret
|
||||
%incdec.ptr = getelementptr inbounds i64, i64* %ptr, i64 32
|
||||
store i64 %spacing, i64* %ptr, align 4
|
||||
ret i64* %incdec.ptr
|
||||
|
@ -22,8 +26,9 @@ define i64* @store64idxpos256(i64* %ptr, i64 %index, i64 %spacing) {
|
|||
|
||||
define i64* @store64idxneg256(i64* %ptr, i64 %index, i64 %spacing) {
|
||||
; CHECK-LABEL: store64idxneg256:
|
||||
; CHECK: str x{{[0-9+]}}, [x{{[0-9+]}}], #-256
|
||||
; CHECK: ret
|
||||
; CHECK: ; %bb.0:
|
||||
; CHECK-NEXT: str x2, [x0], #-256
|
||||
; CHECK-NEXT: ret
|
||||
%incdec.ptr = getelementptr inbounds i64, i64* %ptr, i64 -32
|
||||
store i64 %spacing, i64* %ptr, align 4
|
||||
ret i64* %incdec.ptr
|
||||
|
@ -31,8 +36,9 @@ define i64* @store64idxneg256(i64* %ptr, i64 %index, i64 %spacing) {
|
|||
|
||||
define i32* @store32(i32* %ptr, i32 %index, i32 %spacing) {
|
||||
; CHECK-LABEL: store32:
|
||||
; CHECK: str w{{[0-9+]}}, [x{{[0-9+]}}], #4
|
||||
; CHECK: ret
|
||||
; CHECK: ; %bb.0:
|
||||
; CHECK-NEXT: str w2, [x0], #4
|
||||
; CHECK-NEXT: ret
|
||||
%incdec.ptr = getelementptr inbounds i32, i32* %ptr, i64 1
|
||||
store i32 %spacing, i32* %ptr, align 4
|
||||
ret i32* %incdec.ptr
|
||||
|
@ -40,9 +46,11 @@ define i32* @store32(i32* %ptr, i32 %index, i32 %spacing) {
|
|||
|
||||
define i32* @store32idxpos256(i32* %ptr, i32 %index, i32 %spacing) {
|
||||
; CHECK-LABEL: store32idxpos256:
|
||||
; CHECK: add x{{[0-9+]}}, x{{[0-9+]}}, #256
|
||||
; CHECK: str w{{[0-9+]}}, [x{{[0-9+]}}]
|
||||
; CHECK: ret
|
||||
; CHECK: ; %bb.0:
|
||||
; CHECK-NEXT: add x8, x0, #256
|
||||
; CHECK-NEXT: str w2, [x0]
|
||||
; CHECK-NEXT: mov x0, x8
|
||||
; CHECK-NEXT: ret
|
||||
%incdec.ptr = getelementptr inbounds i32, i32* %ptr, i64 64
|
||||
store i32 %spacing, i32* %ptr, align 4
|
||||
ret i32* %incdec.ptr
|
||||
|
@ -50,8 +58,9 @@ define i32* @store32idxpos256(i32* %ptr, i32 %index, i32 %spacing) {
|
|||
|
||||
define i32* @store32idxneg256(i32* %ptr, i32 %index, i32 %spacing) {
|
||||
; CHECK-LABEL: store32idxneg256:
|
||||
; CHECK: str w{{[0-9+]}}, [x{{[0-9+]}}], #-256
|
||||
; CHECK: ret
|
||||
; CHECK: ; %bb.0:
|
||||
; CHECK-NEXT: str w2, [x0], #-256
|
||||
; CHECK-NEXT: ret
|
||||
%incdec.ptr = getelementptr inbounds i32, i32* %ptr, i64 -64
|
||||
store i32 %spacing, i32* %ptr, align 4
|
||||
ret i32* %incdec.ptr
|
||||
|
@ -59,8 +68,9 @@ define i32* @store32idxneg256(i32* %ptr, i32 %index, i32 %spacing) {
|
|||
|
||||
define i16* @store16(i16* %ptr, i16 %index, i16 %spacing) {
|
||||
; CHECK-LABEL: store16:
|
||||
; CHECK: strh w{{[0-9+]}}, [x{{[0-9+]}}], #2
|
||||
; CHECK: ret
|
||||
; CHECK: ; %bb.0:
|
||||
; CHECK-NEXT: strh w2, [x0], #2
|
||||
; CHECK-NEXT: ret
|
||||
%incdec.ptr = getelementptr inbounds i16, i16* %ptr, i64 1
|
||||
store i16 %spacing, i16* %ptr, align 4
|
||||
ret i16* %incdec.ptr
|
||||
|
@ -68,9 +78,11 @@ define i16* @store16(i16* %ptr, i16 %index, i16 %spacing) {
|
|||
|
||||
define i16* @store16idxpos256(i16* %ptr, i16 %index, i16 %spacing) {
|
||||
; CHECK-LABEL: store16idxpos256:
|
||||
; CHECK: add x{{[0-9+]}}, x{{[0-9+]}}, #256
|
||||
; CHECK: strh w{{[0-9+]}}, [x{{[0-9+]}}]
|
||||
; CHECK: ret
|
||||
; CHECK: ; %bb.0:
|
||||
; CHECK-NEXT: add x8, x0, #256
|
||||
; CHECK-NEXT: strh w2, [x0]
|
||||
; CHECK-NEXT: mov x0, x8
|
||||
; CHECK-NEXT: ret
|
||||
%incdec.ptr = getelementptr inbounds i16, i16* %ptr, i64 128
|
||||
store i16 %spacing, i16* %ptr, align 4
|
||||
ret i16* %incdec.ptr
|
||||
|
@ -78,8 +90,9 @@ define i16* @store16idxpos256(i16* %ptr, i16 %index, i16 %spacing) {
|
|||
|
||||
define i16* @store16idxneg256(i16* %ptr, i16 %index, i16 %spacing) {
|
||||
; CHECK-LABEL: store16idxneg256:
|
||||
; CHECK: strh w{{[0-9+]}}, [x{{[0-9+]}}], #-256
|
||||
; CHECK: ret
|
||||
; CHECK: ; %bb.0:
|
||||
; CHECK-NEXT: strh w2, [x0], #-256
|
||||
; CHECK-NEXT: ret
|
||||
%incdec.ptr = getelementptr inbounds i16, i16* %ptr, i64 -128
|
||||
store i16 %spacing, i16* %ptr, align 4
|
||||
ret i16* %incdec.ptr
|
||||
|
@ -87,8 +100,9 @@ define i16* @store16idxneg256(i16* %ptr, i16 %index, i16 %spacing) {
|
|||
|
||||
define i8* @store8(i8* %ptr, i8 %index, i8 %spacing) {
|
||||
; CHECK-LABEL: store8:
|
||||
; CHECK: strb w{{[0-9+]}}, [x{{[0-9+]}}], #1
|
||||
; CHECK: ret
|
||||
; CHECK: ; %bb.0:
|
||||
; CHECK-NEXT: strb w2, [x0], #1
|
||||
; CHECK-NEXT: ret
|
||||
%incdec.ptr = getelementptr inbounds i8, i8* %ptr, i64 1
|
||||
store i8 %spacing, i8* %ptr, align 4
|
||||
ret i8* %incdec.ptr
|
||||
|
@ -96,9 +110,11 @@ define i8* @store8(i8* %ptr, i8 %index, i8 %spacing) {
|
|||
|
||||
define i8* @store8idxpos256(i8* %ptr, i8 %index, i8 %spacing) {
|
||||
; CHECK-LABEL: store8idxpos256:
|
||||
; CHECK: add x{{[0-9+]}}, x{{[0-9+]}}, #256
|
||||
; CHECK: strb w{{[0-9+]}}, [x{{[0-9+]}}]
|
||||
; CHECK: ret
|
||||
; CHECK: ; %bb.0:
|
||||
; CHECK-NEXT: add x8, x0, #256
|
||||
; CHECK-NEXT: strb w2, [x0]
|
||||
; CHECK-NEXT: mov x0, x8
|
||||
; CHECK-NEXT: ret
|
||||
%incdec.ptr = getelementptr inbounds i8, i8* %ptr, i64 256
|
||||
store i8 %spacing, i8* %ptr, align 4
|
||||
ret i8* %incdec.ptr
|
||||
|
@ -106,8 +122,9 @@ define i8* @store8idxpos256(i8* %ptr, i8 %index, i8 %spacing) {
|
|||
|
||||
define i8* @store8idxneg256(i8* %ptr, i8 %index, i8 %spacing) {
|
||||
; CHECK-LABEL: store8idxneg256:
|
||||
; CHECK: strb w{{[0-9+]}}, [x{{[0-9+]}}], #-256
|
||||
; CHECK: ret
|
||||
; CHECK: ; %bb.0:
|
||||
; CHECK-NEXT: strb w2, [x0], #-256
|
||||
; CHECK-NEXT: ret
|
||||
%incdec.ptr = getelementptr inbounds i8, i8* %ptr, i64 -256
|
||||
store i8 %spacing, i8* %ptr, align 4
|
||||
ret i8* %incdec.ptr
|
||||
|
@ -115,8 +132,9 @@ define i8* @store8idxneg256(i8* %ptr, i8 %index, i8 %spacing) {
|
|||
|
||||
define i32* @truncst64to32(i32* %ptr, i32 %index, i64 %spacing) {
|
||||
; CHECK-LABEL: truncst64to32:
|
||||
; CHECK: str w{{[0-9+]}}, [x{{[0-9+]}}], #4
|
||||
; CHECK: ret
|
||||
; CHECK: ; %bb.0:
|
||||
; CHECK-NEXT: str w2, [x0], #4
|
||||
; CHECK-NEXT: ret
|
||||
%incdec.ptr = getelementptr inbounds i32, i32* %ptr, i64 1
|
||||
%trunc = trunc i64 %spacing to i32
|
||||
store i32 %trunc, i32* %ptr, align 4
|
||||
|
@ -125,8 +143,9 @@ define i32* @truncst64to32(i32* %ptr, i32 %index, i64 %spacing) {
|
|||
|
||||
define i16* @truncst64to16(i16* %ptr, i16 %index, i64 %spacing) {
|
||||
; CHECK-LABEL: truncst64to16:
|
||||
; CHECK: strh w{{[0-9+]}}, [x{{[0-9+]}}], #2
|
||||
; CHECK: ret
|
||||
; CHECK: ; %bb.0:
|
||||
; CHECK-NEXT: strh w2, [x0], #2
|
||||
; CHECK-NEXT: ret
|
||||
%incdec.ptr = getelementptr inbounds i16, i16* %ptr, i64 1
|
||||
%trunc = trunc i64 %spacing to i16
|
||||
store i16 %trunc, i16* %ptr, align 4
|
||||
|
@ -135,8 +154,9 @@ define i16* @truncst64to16(i16* %ptr, i16 %index, i64 %spacing) {
|
|||
|
||||
define i8* @truncst64to8(i8* %ptr, i8 %index, i64 %spacing) {
|
||||
; CHECK-LABEL: truncst64to8:
|
||||
; CHECK: strb w{{[0-9+]}}, [x{{[0-9+]}}], #1
|
||||
; CHECK: ret
|
||||
; CHECK: ; %bb.0:
|
||||
; CHECK-NEXT: strb w2, [x0], #1
|
||||
; CHECK-NEXT: ret
|
||||
%incdec.ptr = getelementptr inbounds i8, i8* %ptr, i64 1
|
||||
%trunc = trunc i64 %spacing to i8
|
||||
store i8 %trunc, i8* %ptr, align 4
|
||||
|
@ -146,8 +166,9 @@ define i8* @truncst64to8(i8* %ptr, i8 %index, i64 %spacing) {
|
|||
|
||||
define half* @storef16(half* %ptr, half %index, half %spacing) nounwind {
|
||||
; CHECK-LABEL: storef16:
|
||||
; CHECK: str h{{[0-9+]}}, [x{{[0-9+]}}], #2
|
||||
; CHECK: ret
|
||||
; CHECK: ; %bb.0:
|
||||
; CHECK-NEXT: str h1, [x0], #2
|
||||
; CHECK-NEXT: ret
|
||||
%incdec.ptr = getelementptr inbounds half, half* %ptr, i64 1
|
||||
store half %spacing, half* %ptr, align 2
|
||||
ret half* %incdec.ptr
|
||||
|
@ -155,8 +176,9 @@ define half* @storef16(half* %ptr, half %index, half %spacing) nounwind {
|
|||
|
||||
define float* @storef32(float* %ptr, float %index, float %spacing) {
|
||||
; CHECK-LABEL: storef32:
|
||||
; CHECK: str s{{[0-9+]}}, [x{{[0-9+]}}], #4
|
||||
; CHECK: ret
|
||||
; CHECK: ; %bb.0:
|
||||
; CHECK-NEXT: str s1, [x0], #4
|
||||
; CHECK-NEXT: ret
|
||||
%incdec.ptr = getelementptr inbounds float, float* %ptr, i64 1
|
||||
store float %spacing, float* %ptr, align 4
|
||||
ret float* %incdec.ptr
|
||||
|
@ -164,8 +186,9 @@ define float* @storef32(float* %ptr, float %index, float %spacing) {
|
|||
|
||||
define double* @storef64(double* %ptr, double %index, double %spacing) {
|
||||
; CHECK-LABEL: storef64:
|
||||
; CHECK: str d{{[0-9+]}}, [x{{[0-9+]}}], #8
|
||||
; CHECK: ret
|
||||
; CHECK: ; %bb.0:
|
||||
; CHECK-NEXT: str d1, [x0], #8
|
||||
; CHECK-NEXT: ret
|
||||
%incdec.ptr = getelementptr inbounds double, double* %ptr, i64 1
|
||||
store double %spacing, double* %ptr, align 4
|
||||
ret double* %incdec.ptr
|
||||
|
@ -174,8 +197,9 @@ define double* @storef64(double* %ptr, double %index, double %spacing) {
|
|||
|
||||
define double* @pref64(double* %ptr, double %spacing) {
|
||||
; CHECK-LABEL: pref64:
|
||||
; CHECK: str d0, [x0, #32]!
|
||||
; CHECK-NEXT: ret
|
||||
; CHECK: ; %bb.0:
|
||||
; CHECK-NEXT: str d0, [x0, #32]!
|
||||
; CHECK-NEXT: ret
|
||||
%incdec.ptr = getelementptr inbounds double, double* %ptr, i64 4
|
||||
store double %spacing, double* %incdec.ptr, align 4
|
||||
ret double *%incdec.ptr
|
||||
|
@ -183,8 +207,9 @@ define double* @pref64(double* %ptr, double %spacing) {
|
|||
|
||||
define float* @pref32(float* %ptr, float %spacing) {
|
||||
; CHECK-LABEL: pref32:
|
||||
; CHECK: str s0, [x0, #12]!
|
||||
; CHECK-NEXT: ret
|
||||
; CHECK: ; %bb.0:
|
||||
; CHECK-NEXT: str s0, [x0, #12]!
|
||||
; CHECK-NEXT: ret
|
||||
%incdec.ptr = getelementptr inbounds float, float* %ptr, i64 3
|
||||
store float %spacing, float* %incdec.ptr, align 4
|
||||
ret float *%incdec.ptr
|
||||
|
@ -192,8 +217,9 @@ define float* @pref32(float* %ptr, float %spacing) {
|
|||
|
||||
define half* @pref16(half* %ptr, half %spacing) nounwind {
|
||||
; CHECK-LABEL: pref16:
|
||||
; CHECK: str h0, [x0, #6]!
|
||||
; CHECK-NEXT: ret
|
||||
; CHECK: ; %bb.0:
|
||||
; CHECK-NEXT: str h0, [x0, #6]!
|
||||
; CHECK-NEXT: ret
|
||||
%incdec.ptr = getelementptr inbounds half, half* %ptr, i64 3
|
||||
store half %spacing, half* %incdec.ptr, align 2
|
||||
ret half *%incdec.ptr
|
||||
|
@ -201,8 +227,9 @@ define half* @pref16(half* %ptr, half %spacing) nounwind {
|
|||
|
||||
define i64* @pre64(i64* %ptr, i64 %spacing) {
|
||||
; CHECK-LABEL: pre64:
|
||||
; CHECK: str x1, [x0, #16]!
|
||||
; CHECK-NEXT: ret
|
||||
; CHECK: ; %bb.0:
|
||||
; CHECK-NEXT: str x1, [x0, #16]!
|
||||
; CHECK-NEXT: ret
|
||||
%incdec.ptr = getelementptr inbounds i64, i64* %ptr, i64 2
|
||||
store i64 %spacing, i64* %incdec.ptr, align 4
|
||||
ret i64 *%incdec.ptr
|
||||
|
@ -210,10 +237,11 @@ define i64* @pre64(i64* %ptr, i64 %spacing) {
|
|||
|
||||
define i64* @pre64idxpos256(i64* %ptr, i64 %spacing) {
|
||||
; CHECK-LABEL: pre64idxpos256:
|
||||
; CHECK: add x8, x0, #256
|
||||
; CHECK-NEXT: str x1, [x0, #256]
|
||||
; CHECK-NEXT: mov x0, x8
|
||||
; CHECK-NEXT: ret
|
||||
; CHECK: ; %bb.0:
|
||||
; CHECK-NEXT: add x8, x0, #256
|
||||
; CHECK-NEXT: str x1, [x0, #256]
|
||||
; CHECK-NEXT: mov x0, x8
|
||||
; CHECK-NEXT: ret
|
||||
%incdec.ptr = getelementptr inbounds i64, i64* %ptr, i64 32
|
||||
store i64 %spacing, i64* %incdec.ptr, align 4
|
||||
ret i64 *%incdec.ptr
|
||||
|
@ -221,8 +249,9 @@ define i64* @pre64idxpos256(i64* %ptr, i64 %spacing) {
|
|||
|
||||
define i64* @pre64idxneg256(i64* %ptr, i64 %spacing) {
|
||||
; CHECK-LABEL: pre64idxneg256:
|
||||
; CHECK: str x1, [x0, #-256]!
|
||||
; CHECK-NEXT: ret
|
||||
; CHECK: ; %bb.0:
|
||||
; CHECK-NEXT: str x1, [x0, #-256]!
|
||||
; CHECK-NEXT: ret
|
||||
%incdec.ptr = getelementptr inbounds i64, i64* %ptr, i64 -32
|
||||
store i64 %spacing, i64* %incdec.ptr, align 4
|
||||
ret i64 *%incdec.ptr
|
||||
|
@ -230,8 +259,9 @@ define i64* @pre64idxneg256(i64* %ptr, i64 %spacing) {
|
|||
|
||||
define i32* @pre32(i32* %ptr, i32 %spacing) {
|
||||
; CHECK-LABEL: pre32:
|
||||
; CHECK: str w1, [x0, #8]!
|
||||
; CHECK-NEXT: ret
|
||||
; CHECK: ; %bb.0:
|
||||
; CHECK-NEXT: str w1, [x0, #8]!
|
||||
; CHECK-NEXT: ret
|
||||
%incdec.ptr = getelementptr inbounds i32, i32* %ptr, i64 2
|
||||
store i32 %spacing, i32* %incdec.ptr, align 4
|
||||
ret i32 *%incdec.ptr
|
||||
|
@ -239,10 +269,11 @@ define i32* @pre32(i32* %ptr, i32 %spacing) {
|
|||
|
||||
define i32* @pre32idxpos256(i32* %ptr, i32 %spacing) {
|
||||
; CHECK-LABEL: pre32idxpos256:
|
||||
; CHECK: add x8, x0, #256
|
||||
; CHECK-NEXT: str w1, [x0, #256]
|
||||
; CHECK-NEXT: mov x0, x8
|
||||
; CHECK-NEXT: ret
|
||||
; CHECK: ; %bb.0:
|
||||
; CHECK-NEXT: add x8, x0, #256
|
||||
; CHECK-NEXT: str w1, [x0, #256]
|
||||
; CHECK-NEXT: mov x0, x8
|
||||
; CHECK-NEXT: ret
|
||||
%incdec.ptr = getelementptr inbounds i32, i32* %ptr, i64 64
|
||||
store i32 %spacing, i32* %incdec.ptr, align 4
|
||||
ret i32 *%incdec.ptr
|
||||
|
@ -250,8 +281,9 @@ define i32* @pre32idxpos256(i32* %ptr, i32 %spacing) {
|
|||
|
||||
define i32* @pre32idxneg256(i32* %ptr, i32 %spacing) {
|
||||
; CHECK-LABEL: pre32idxneg256:
|
||||
; CHECK: str w1, [x0, #-256]!
|
||||
; CHECK-NEXT: ret
|
||||
; CHECK: ; %bb.0:
|
||||
; CHECK-NEXT: str w1, [x0, #-256]!
|
||||
; CHECK-NEXT: ret
|
||||
%incdec.ptr = getelementptr inbounds i32, i32* %ptr, i64 -64
|
||||
store i32 %spacing, i32* %incdec.ptr, align 4
|
||||
ret i32 *%incdec.ptr
|
||||
|
@ -259,8 +291,9 @@ define i32* @pre32idxneg256(i32* %ptr, i32 %spacing) {
|
|||
|
||||
define i16* @pre16(i16* %ptr, i16 %spacing) {
|
||||
; CHECK-LABEL: pre16:
|
||||
; CHECK: strh w1, [x0, #4]!
|
||||
; CHECK-NEXT: ret
|
||||
; CHECK: ; %bb.0:
|
||||
; CHECK-NEXT: strh w1, [x0, #4]!
|
||||
; CHECK-NEXT: ret
|
||||
%incdec.ptr = getelementptr inbounds i16, i16* %ptr, i64 2
|
||||
store i16 %spacing, i16* %incdec.ptr, align 4
|
||||
ret i16 *%incdec.ptr
|
||||
|
@ -268,10 +301,11 @@ define i16* @pre16(i16* %ptr, i16 %spacing) {
|
|||
|
||||
define i16* @pre16idxpos256(i16* %ptr, i16 %spacing) {
|
||||
; CHECK-LABEL: pre16idxpos256:
|
||||
; CHECK: add x8, x0, #256
|
||||
; CHECK-NEXT: strh w1, [x0, #256]
|
||||
; CHECK-NEXT: mov x0, x8
|
||||
; CHECK-NEXT: ret
|
||||
; CHECK: ; %bb.0:
|
||||
; CHECK-NEXT: add x8, x0, #256
|
||||
; CHECK-NEXT: strh w1, [x0, #256]
|
||||
; CHECK-NEXT: mov x0, x8
|
||||
; CHECK-NEXT: ret
|
||||
%incdec.ptr = getelementptr inbounds i16, i16* %ptr, i64 128
|
||||
store i16 %spacing, i16* %incdec.ptr, align 4
|
||||
ret i16 *%incdec.ptr
|
||||
|
@ -279,8 +313,9 @@ define i16* @pre16idxpos256(i16* %ptr, i16 %spacing) {
|
|||
|
||||
define i16* @pre16idxneg256(i16* %ptr, i16 %spacing) {
|
||||
; CHECK-LABEL: pre16idxneg256:
|
||||
; CHECK: strh w1, [x0, #-256]!
|
||||
; CHECK-NEXT: ret
|
||||
; CHECK: ; %bb.0:
|
||||
; CHECK-NEXT: strh w1, [x0, #-256]!
|
||||
; CHECK-NEXT: ret
|
||||
%incdec.ptr = getelementptr inbounds i16, i16* %ptr, i64 -128
|
||||
store i16 %spacing, i16* %incdec.ptr, align 4
|
||||
ret i16 *%incdec.ptr
|
||||
|
@ -288,8 +323,9 @@ define i16* @pre16idxneg256(i16* %ptr, i16 %spacing) {
|
|||
|
||||
define i8* @pre8(i8* %ptr, i8 %spacing) {
|
||||
; CHECK-LABEL: pre8:
|
||||
; CHECK: strb w1, [x0, #2]!
|
||||
; CHECK-NEXT: ret
|
||||
; CHECK: ; %bb.0:
|
||||
; CHECK-NEXT: strb w1, [x0, #2]!
|
||||
; CHECK-NEXT: ret
|
||||
%incdec.ptr = getelementptr inbounds i8, i8* %ptr, i64 2
|
||||
store i8 %spacing, i8* %incdec.ptr, align 4
|
||||
ret i8 *%incdec.ptr
|
||||
|
@ -297,10 +333,11 @@ define i8* @pre8(i8* %ptr, i8 %spacing) {
|
|||
|
||||
define i8* @pre8idxpos256(i8* %ptr, i8 %spacing) {
|
||||
; CHECK-LABEL: pre8idxpos256:
|
||||
; CHECK: add x8, x0, #256
|
||||
; CHECK-NEXT: strb w1, [x0, #256]
|
||||
; CHECK-NEXT: mov x0, x8
|
||||
; CHECK-NEXT: ret
|
||||
; CHECK: ; %bb.0:
|
||||
; CHECK-NEXT: add x8, x0, #256
|
||||
; CHECK-NEXT: strb w1, [x0, #256]
|
||||
; CHECK-NEXT: mov x0, x8
|
||||
; CHECK-NEXT: ret
|
||||
%incdec.ptr = getelementptr inbounds i8, i8* %ptr, i64 256
|
||||
store i8 %spacing, i8* %incdec.ptr, align 4
|
||||
ret i8 *%incdec.ptr
|
||||
|
@ -308,8 +345,9 @@ define i8* @pre8idxpos256(i8* %ptr, i8 %spacing) {
|
|||
|
||||
define i8* @pre8idxneg256(i8* %ptr, i8 %spacing) {
|
||||
; CHECK-LABEL: pre8idxneg256:
|
||||
; CHECK: strb w1, [x0, #-256]!
|
||||
; CHECK-NEXT: ret
|
||||
; CHECK: ; %bb.0:
|
||||
; CHECK-NEXT: strb w1, [x0, #-256]!
|
||||
; CHECK-NEXT: ret
|
||||
%incdec.ptr = getelementptr inbounds i8, i8* %ptr, i64 -256
|
||||
store i8 %spacing, i8* %incdec.ptr, align 4
|
||||
ret i8 *%incdec.ptr
|
||||
|
@ -317,8 +355,9 @@ define i8* @pre8idxneg256(i8* %ptr, i8 %spacing) {
|
|||
|
||||
define i32* @pretrunc64to32(i32* %ptr, i64 %spacing) {
|
||||
; CHECK-LABEL: pretrunc64to32:
|
||||
; CHECK: str w1, [x0, #8]!
|
||||
; CHECK-NEXT: ret
|
||||
; CHECK: ; %bb.0:
|
||||
; CHECK-NEXT: str w1, [x0, #8]!
|
||||
; CHECK-NEXT: ret
|
||||
%incdec.ptr = getelementptr inbounds i32, i32* %ptr, i64 2
|
||||
%trunc = trunc i64 %spacing to i32
|
||||
store i32 %trunc, i32* %incdec.ptr, align 4
|
||||
|
@ -327,8 +366,9 @@ define i32* @pretrunc64to32(i32* %ptr, i64 %spacing) {
|
|||
|
||||
define i16* @pretrunc64to16(i16* %ptr, i64 %spacing) {
|
||||
; CHECK-LABEL: pretrunc64to16:
|
||||
; CHECK: strh w1, [x0, #4]!
|
||||
; CHECK-NEXT: ret
|
||||
; CHECK: ; %bb.0:
|
||||
; CHECK-NEXT: strh w1, [x0, #4]!
|
||||
; CHECK-NEXT: ret
|
||||
%incdec.ptr = getelementptr inbounds i16, i16* %ptr, i64 2
|
||||
%trunc = trunc i64 %spacing to i16
|
||||
store i16 %trunc, i16* %incdec.ptr, align 4
|
||||
|
@ -337,8 +377,9 @@ define i16* @pretrunc64to16(i16* %ptr, i64 %spacing) {
|
|||
|
||||
define i8* @pretrunc64to8(i8* %ptr, i64 %spacing) {
|
||||
; CHECK-LABEL: pretrunc64to8:
|
||||
; CHECK: strb w1, [x0, #2]!
|
||||
; CHECK-NEXT: ret
|
||||
; CHECK: ; %bb.0:
|
||||
; CHECK-NEXT: strb w1, [x0, #2]!
|
||||
; CHECK-NEXT: ret
|
||||
%incdec.ptr = getelementptr inbounds i8, i8* %ptr, i64 2
|
||||
%trunc = trunc i64 %spacing to i8
|
||||
store i8 %trunc, i8* %incdec.ptr, align 4
|
||||
|
@ -350,9 +391,10 @@ define i8* @pretrunc64to8(i8* %ptr, i64 %spacing) {
|
|||
;-----
|
||||
define double* @preidxf64(double* %src, double* %out) {
|
||||
; CHECK-LABEL: preidxf64:
|
||||
; CHECK: ldr d0, [x0, #8]!
|
||||
; CHECK: str d0, [x1]
|
||||
; CHECK: ret
|
||||
; CHECK: ; %bb.0:
|
||||
; CHECK-NEXT: ldr d0, [x0, #8]!
|
||||
; CHECK-NEXT: str d0, [x1]
|
||||
; CHECK-NEXT: ret
|
||||
%ptr = getelementptr inbounds double, double* %src, i64 1
|
||||
%tmp = load double, double* %ptr, align 4
|
||||
store double %tmp, double* %out, align 4
|
||||
|
@ -361,9 +403,10 @@ define double* @preidxf64(double* %src, double* %out) {
|
|||
|
||||
define float* @preidxf32(float* %src, float* %out) {
|
||||
; CHECK-LABEL: preidxf32:
|
||||
; CHECK: ldr s0, [x0, #4]!
|
||||
; CHECK: str s0, [x1]
|
||||
; CHECK: ret
|
||||
; CHECK: ; %bb.0:
|
||||
; CHECK-NEXT: ldr s0, [x0, #4]!
|
||||
; CHECK-NEXT: str s0, [x1]
|
||||
; CHECK-NEXT: ret
|
||||
%ptr = getelementptr inbounds float, float* %src, i64 1
|
||||
%tmp = load float, float* %ptr, align 4
|
||||
store float %tmp, float* %out, align 4
|
||||
|
@ -372,9 +415,10 @@ define float* @preidxf32(float* %src, float* %out) {
|
|||
|
||||
define half* @preidxf16(half* %src, half* %out) {
|
||||
; CHECK-LABEL: preidxf16:
|
||||
; CHECK: ldr h0, [x0, #2]!
|
||||
; CHECK: str h0, [x1]
|
||||
; CHECK: ret
|
||||
; CHECK: ; %bb.0:
|
||||
; CHECK-NEXT: ldr h0, [x0, #2]!
|
||||
; CHECK-NEXT: str h0, [x1]
|
||||
; CHECK-NEXT: ret
|
||||
%ptr = getelementptr inbounds half, half* %src, i64 1
|
||||
%tmp = load half, half* %ptr, align 2
|
||||
store half %tmp, half* %out, align 2
|
||||
|
@ -383,9 +427,10 @@ define half* @preidxf16(half* %src, half* %out) {
|
|||
|
||||
define i64* @preidx64(i64* %src, i64* %out) {
|
||||
; CHECK-LABEL: preidx64:
|
||||
; CHECK: ldr x[[REG:[0-9]+]], [x0, #8]!
|
||||
; CHECK: str x[[REG]], [x1]
|
||||
; CHECK: ret
|
||||
; CHECK: ; %bb.0:
|
||||
; CHECK-NEXT: ldr x8, [x0, #8]!
|
||||
; CHECK-NEXT: str x8, [x1]
|
||||
; CHECK-NEXT: ret
|
||||
%ptr = getelementptr inbounds i64, i64* %src, i64 1
|
||||
%tmp = load i64, i64* %ptr, align 4
|
||||
store i64 %tmp, i64* %out, align 4
|
||||
|
@ -393,9 +438,11 @@ define i64* @preidx64(i64* %src, i64* %out) {
|
|||
}
|
||||
|
||||
define i32* @preidx32(i32* %src, i32* %out) {
|
||||
; CHECK: ldr w[[REG:[0-9]+]], [x0, #4]!
|
||||
; CHECK: str w[[REG]], [x1]
|
||||
; CHECK: ret
|
||||
; CHECK-LABEL: preidx32:
|
||||
; CHECK: ; %bb.0:
|
||||
; CHECK-NEXT: ldr w8, [x0, #4]!
|
||||
; CHECK-NEXT: str w8, [x1]
|
||||
; CHECK-NEXT: ret
|
||||
%ptr = getelementptr inbounds i32, i32* %src, i64 1
|
||||
%tmp = load i32, i32* %ptr, align 4
|
||||
store i32 %tmp, i32* %out, align 4
|
||||
|
@ -403,9 +450,11 @@ define i32* @preidx32(i32* %src, i32* %out) {
|
|||
}
|
||||
|
||||
define i16* @preidx16zext32(i16* %src, i32* %out) {
|
||||
; CHECK: ldrh w[[REG:[0-9]+]], [x0, #2]!
|
||||
; CHECK: str w[[REG]], [x1]
|
||||
; CHECK: ret
|
||||
; CHECK-LABEL: preidx16zext32:
|
||||
; CHECK: ; %bb.0:
|
||||
; CHECK-NEXT: ldrh w8, [x0, #2]!
|
||||
; CHECK-NEXT: str w8, [x1]
|
||||
; CHECK-NEXT: ret
|
||||
%ptr = getelementptr inbounds i16, i16* %src, i64 1
|
||||
%tmp = load i16, i16* %ptr, align 4
|
||||
%ext = zext i16 %tmp to i32
|
||||
|
@ -414,9 +463,11 @@ define i16* @preidx16zext32(i16* %src, i32* %out) {
|
|||
}
|
||||
|
||||
define i16* @preidx16zext64(i16* %src, i64* %out) {
|
||||
; CHECK: ldrh w[[REG:[0-9]+]], [x0, #2]!
|
||||
; CHECK: str x[[REG]], [x1]
|
||||
; CHECK: ret
|
||||
; CHECK-LABEL: preidx16zext64:
|
||||
; CHECK: ; %bb.0:
|
||||
; CHECK-NEXT: ldrh w8, [x0, #2]!
|
||||
; CHECK-NEXT: str x8, [x1]
|
||||
; CHECK-NEXT: ret
|
||||
%ptr = getelementptr inbounds i16, i16* %src, i64 1
|
||||
%tmp = load i16, i16* %ptr, align 4
|
||||
%ext = zext i16 %tmp to i64
|
||||
|
@ -425,9 +476,11 @@ define i16* @preidx16zext64(i16* %src, i64* %out) {
|
|||
}
|
||||
|
||||
define i8* @preidx8zext32(i8* %src, i32* %out) {
|
||||
; CHECK: ldrb w[[REG:[0-9]+]], [x0, #1]!
|
||||
; CHECK: str w[[REG]], [x1]
|
||||
; CHECK: ret
|
||||
; CHECK-LABEL: preidx8zext32:
|
||||
; CHECK: ; %bb.0:
|
||||
; CHECK-NEXT: ldrb w8, [x0, #1]!
|
||||
; CHECK-NEXT: str w8, [x1]
|
||||
; CHECK-NEXT: ret
|
||||
%ptr = getelementptr inbounds i8, i8* %src, i64 1
|
||||
%tmp = load i8, i8* %ptr, align 4
|
||||
%ext = zext i8 %tmp to i32
|
||||
|
@ -436,9 +489,11 @@ define i8* @preidx8zext32(i8* %src, i32* %out) {
|
|||
}
|
||||
|
||||
define i8* @preidx8zext64(i8* %src, i64* %out) {
|
||||
; CHECK: ldrb w[[REG:[0-9]+]], [x0, #1]!
|
||||
; CHECK: str x[[REG]], [x1]
|
||||
; CHECK: ret
|
||||
; CHECK-LABEL: preidx8zext64:
|
||||
; CHECK: ; %bb.0:
|
||||
; CHECK-NEXT: ldrb w8, [x0, #1]!
|
||||
; CHECK-NEXT: str x8, [x1]
|
||||
; CHECK-NEXT: ret
|
||||
%ptr = getelementptr inbounds i8, i8* %src, i64 1
|
||||
%tmp = load i8, i8* %ptr, align 4
|
||||
%ext = zext i8 %tmp to i64
|
||||
|
@ -447,9 +502,11 @@ define i8* @preidx8zext64(i8* %src, i64* %out) {
|
|||
}
|
||||
|
||||
define i32* @preidx32sext64(i32* %src, i64* %out) {
|
||||
; CHECK: ldrsw x[[REG:[0-9]+]], [x0, #4]!
|
||||
; CHECK: str x[[REG]], [x1]
|
||||
; CHECK: ret
|
||||
; CHECK-LABEL: preidx32sext64:
|
||||
; CHECK: ; %bb.0:
|
||||
; CHECK-NEXT: ldrsw x8, [x0, #4]!
|
||||
; CHECK-NEXT: str x8, [x1]
|
||||
; CHECK-NEXT: ret
|
||||
%ptr = getelementptr inbounds i32, i32* %src, i64 1
|
||||
%tmp = load i32, i32* %ptr, align 4
|
||||
%ext = sext i32 %tmp to i64
|
||||
|
@ -458,9 +515,11 @@ define i32* @preidx32sext64(i32* %src, i64* %out) {
|
|||
}
|
||||
|
||||
define i16* @preidx16sext32(i16* %src, i32* %out) {
|
||||
; CHECK: ldrsh w[[REG:[0-9]+]], [x0, #2]!
|
||||
; CHECK: str w[[REG]], [x1]
|
||||
; CHECK: ret
|
||||
; CHECK-LABEL: preidx16sext32:
|
||||
; CHECK: ; %bb.0:
|
||||
; CHECK-NEXT: ldrsh w8, [x0, #2]!
|
||||
; CHECK-NEXT: str w8, [x1]
|
||||
; CHECK-NEXT: ret
|
||||
%ptr = getelementptr inbounds i16, i16* %src, i64 1
|
||||
%tmp = load i16, i16* %ptr, align 4
|
||||
%ext = sext i16 %tmp to i32
|
||||
|
@ -469,9 +528,11 @@ define i16* @preidx16sext32(i16* %src, i32* %out) {
|
|||
}
|
||||
|
||||
define i16* @preidx16sext64(i16* %src, i64* %out) {
|
||||
; CHECK: ldrsh x[[REG:[0-9]+]], [x0, #2]!
|
||||
; CHECK: str x[[REG]], [x1]
|
||||
; CHECK: ret
|
||||
; CHECK-LABEL: preidx16sext64:
|
||||
; CHECK: ; %bb.0:
|
||||
; CHECK-NEXT: ldrsh x8, [x0, #2]!
|
||||
; CHECK-NEXT: str x8, [x1]
|
||||
; CHECK-NEXT: ret
|
||||
%ptr = getelementptr inbounds i16, i16* %src, i64 1
|
||||
%tmp = load i16, i16* %ptr, align 4
|
||||
%ext = sext i16 %tmp to i64
|
||||
|
@ -480,9 +541,11 @@ define i16* @preidx16sext64(i16* %src, i64* %out) {
|
|||
}
|
||||
|
||||
define i8* @preidx8sext32(i8* %src, i32* %out) {
|
||||
; CHECK: ldrsb w[[REG:[0-9]+]], [x0, #1]!
|
||||
; CHECK: str w[[REG]], [x1]
|
||||
; CHECK: ret
|
||||
; CHECK-LABEL: preidx8sext32:
|
||||
; CHECK: ; %bb.0:
|
||||
; CHECK-NEXT: ldrsb w8, [x0, #1]!
|
||||
; CHECK-NEXT: str w8, [x1]
|
||||
; CHECK-NEXT: ret
|
||||
%ptr = getelementptr inbounds i8, i8* %src, i64 1
|
||||
%tmp = load i8, i8* %ptr, align 4
|
||||
%ext = sext i8 %tmp to i32
|
||||
|
@ -491,9 +554,11 @@ define i8* @preidx8sext32(i8* %src, i32* %out) {
|
|||
}
|
||||
|
||||
define i8* @preidx8sext64(i8* %src, i64* %out) {
|
||||
; CHECK: ldrsb x[[REG:[0-9]+]], [x0, #1]!
|
||||
; CHECK: str x[[REG]], [x1]
|
||||
; CHECK: ret
|
||||
; CHECK-LABEL: preidx8sext64:
|
||||
; CHECK: ; %bb.0:
|
||||
; CHECK-NEXT: ldrsb x8, [x0, #1]!
|
||||
; CHECK-NEXT: str x8, [x1]
|
||||
; CHECK-NEXT: ret
|
||||
%ptr = getelementptr inbounds i8, i8* %src, i64 1
|
||||
%tmp = load i8, i8* %ptr, align 4
|
||||
%ext = sext i8 %tmp to i64
|
||||
|
@ -504,8 +569,19 @@ define i8* @preidx8sext64(i8* %src, i64* %out) {
|
|||
; This test checks if illegal post-index is generated
|
||||
|
||||
define i64* @postidx_clobber(i64* %addr) nounwind noinline ssp {
|
||||
; CHECK-LABEL: postidx_clobber:
|
||||
; CHECK-NOT: str x0, [x0], #8
|
||||
; CHECK64-LABEL: postidx_clobber:
|
||||
; CHECK64: ; %bb.0:
|
||||
; CHECK64-NEXT: mov x8, x0
|
||||
; CHECK64-NEXT: str x0, [x8], #8
|
||||
; CHECK64-NEXT: mov x0, x8
|
||||
; CHECK64-NEXT: ret
|
||||
;
|
||||
; CHECK32-LABEL: postidx_clobber:
|
||||
; CHECK32: ; %bb.0:
|
||||
; CHECK32-NEXT: add w8, w0, #8
|
||||
; CHECK32-NEXT: str w0, [x0]
|
||||
; CHECK32-NEXT: mov x0, x8
|
||||
; CHECK32-NEXT: ret
|
||||
; ret
|
||||
%paddr = bitcast i64* %addr to i64**
|
||||
store i64* %addr, i64** %paddr
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -1,27 +1,40 @@
|
|||
; RUN: llc < %s -mtriple=arm64-apple-ios -aarch64-neon-syntax=apple -no-integrated-as -disable-post-ra | FileCheck %s
|
||||
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
||||
; RUN: llc < %s -mtriple=arm64-apple-ios -aarch64-neon-syntax=apple -no-integrated-as | FileCheck %s
|
||||
|
||||
; rdar://9167275
|
||||
|
||||
define i32 @t1() nounwind ssp {
|
||||
entry:
|
||||
; CHECK-LABEL: t1:
|
||||
; CHECK: mov {{w[0-9]+}}, 7
|
||||
; CHECK: ; %bb.0: ; %entry
|
||||
; CHECK-NEXT: ; InlineAsm Start
|
||||
; CHECK-NEXT: mov w0, 7
|
||||
; CHECK-NEXT: ; InlineAsm End
|
||||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%0 = tail call i32 asm "mov ${0:w}, 7", "=r"() nounwind
|
||||
ret i32 %0
|
||||
}
|
||||
|
||||
define i64 @t2() nounwind ssp {
|
||||
entry:
|
||||
; CHECK-LABEL: t2:
|
||||
; CHECK: mov {{x[0-9]+}}, 7
|
||||
; CHECK: ; %bb.0: ; %entry
|
||||
; CHECK-NEXT: ; InlineAsm Start
|
||||
; CHECK-NEXT: mov x0, 7
|
||||
; CHECK-NEXT: ; InlineAsm End
|
||||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%0 = tail call i64 asm "mov $0, 7", "=r"() nounwind
|
||||
ret i64 %0
|
||||
}
|
||||
|
||||
define i64 @t3() nounwind ssp {
|
||||
entry:
|
||||
; CHECK-LABEL: t3:
|
||||
; CHECK: mov {{w[0-9]+}}, 7
|
||||
; CHECK: ; %bb.0: ; %entry
|
||||
; CHECK-NEXT: ; InlineAsm Start
|
||||
; CHECK-NEXT: mov w0, 7
|
||||
; CHECK-NEXT: ; InlineAsm End
|
||||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%0 = tail call i64 asm "mov ${0:w}, 7", "=r"() nounwind
|
||||
ret i64 %0
|
||||
}
|
||||
|
@ -29,9 +42,14 @@ entry:
|
|||
; rdar://9281206
|
||||
|
||||
define void @t4(i64 %op) nounwind {
|
||||
entry:
|
||||
; CHECK-LABEL: t4:
|
||||
; CHECK: mov x0, {{x[0-9]+}}; svc #0
|
||||
; CHECK: ; %bb.0: ; %entry
|
||||
; CHECK-NEXT: mov x8, x0
|
||||
; CHECK-NEXT: ; InlineAsm Start
|
||||
; CHECK-NEXT: mov x0, x8; svc #0;
|
||||
; CHECK-NEXT: ; InlineAsm End
|
||||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%0 = tail call i64 asm sideeffect "mov x0, $1; svc #0;", "=r,r,r,~{x0}"(i64 %op, i64 undef) nounwind
|
||||
ret void
|
||||
}
|
||||
|
@ -39,9 +57,13 @@ entry:
|
|||
; rdar://9394290
|
||||
|
||||
define float @t5(float %x) nounwind {
|
||||
entry:
|
||||
; CHECK-LABEL: t5:
|
||||
; CHECK: fadd {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}
|
||||
; CHECK: ; %bb.0: ; %entry
|
||||
; CHECK-NEXT: ; InlineAsm Start
|
||||
; CHECK-NEXT: fadd s0, s0, s0
|
||||
; CHECK-NEXT: ; InlineAsm End
|
||||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%0 = tail call float asm "fadd ${0:s}, ${0:s}, ${0:s}", "=w,0"(float %x) nounwind
|
||||
ret float %0
|
||||
}
|
||||
|
@ -49,19 +71,32 @@ entry:
|
|||
; rdar://9553599
|
||||
|
||||
define zeroext i8 @t6(i8* %src) nounwind {
|
||||
entry:
|
||||
; CHECK-LABEL: t6:
|
||||
; CHECK: ldtrb {{w[0-9]+}}, [{{x[0-9]+}}]
|
||||
; CHECK: ; %bb.0: ; %entry
|
||||
; CHECK-NEXT: ; InlineAsm Start
|
||||
; CHECK-NEXT: ldtrb w8, [x0]
|
||||
; CHECK-NEXT: ; InlineAsm End
|
||||
; CHECK-NEXT: and w0, w8, #0xff
|
||||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%0 = tail call i8 asm "ldtrb ${0:w}, [$1]", "=r,r"(i8* %src) nounwind
|
||||
ret i8 %0
|
||||
}
|
||||
|
||||
define void @t7(i8* %f, i32 %g) nounwind {
|
||||
; CHECK-LABEL: t7:
|
||||
; CHECK: ; %bb.0: ; %entry
|
||||
; CHECK-NEXT: sub sp, sp, #16
|
||||
; CHECK-NEXT: str x0, [sp, #8]
|
||||
; CHECK-NEXT: add x8, sp, #8
|
||||
; CHECK-NEXT: ; InlineAsm Start
|
||||
; CHECK-NEXT: str w1, [x8]
|
||||
; CHECK-NEXT: ; InlineAsm End
|
||||
; CHECK-NEXT: add sp, sp, #16
|
||||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%f.addr = alloca i8*, align 8
|
||||
store i8* %f, i8** %f.addr, align 8
|
||||
; CHECK-LABEL: t7:
|
||||
; CHECK: str {{w[0-9]+}}, [{{x[0-9]+}}]
|
||||
call void asm "str ${1:w}, $0", "=*Q,r"(i8** %f.addr, i32 %g) nounwind
|
||||
ret void
|
||||
}
|
||||
|
@ -70,144 +105,261 @@ entry:
|
|||
; ARM64TargetLowering::getRegForInlineAsmConstraint() should recognize 'v'
|
||||
; registers.
|
||||
define void @t8() nounwind ssp {
|
||||
entry:
|
||||
; CHECK-LABEL: t8:
|
||||
; CHECK: stp {{d[0-9]+}}, {{d[0-9]+}}, [sp, #-16]
|
||||
; CHECK: ; %bb.0: ; %entry
|
||||
; CHECK-NEXT: stp d9, d8, [sp, #-16]! ; 16-byte Folded Spill
|
||||
; CHECK-NEXT: ; InlineAsm Start
|
||||
; CHECK-NEXT: nop
|
||||
; CHECK-NEXT: ; InlineAsm End
|
||||
; CHECK-NEXT: ldp d9, d8, [sp], #16 ; 16-byte Folded Reload
|
||||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
tail call void asm sideeffect "nop", "~{v8}"() nounwind
|
||||
ret void
|
||||
}
|
||||
|
||||
define i32 @constraint_I(i32 %i, i32 %j) nounwind {
|
||||
; CHECK-LABEL: constraint_I:
|
||||
; CHECK: ; %bb.0: ; %entry
|
||||
; CHECK-NEXT: ; InlineAsm Start
|
||||
; CHECK-NEXT: add w8, w0, 16773120
|
||||
; CHECK-NEXT: ; InlineAsm End
|
||||
; CHECK-NEXT: ; InlineAsm Start
|
||||
; CHECK-NEXT: add w0, w0, 4096
|
||||
; CHECK-NEXT: ; InlineAsm End
|
||||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
; CHECK-LABEL: constraint_I:
|
||||
%0 = tail call i32 asm sideeffect "add ${0:w}, ${1:w}, $2", "=r,r,I"(i32 %i, i32 16773120) nounwind
|
||||
; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, 16773120
|
||||
%1 = tail call i32 asm sideeffect "add ${0:w}, ${1:w}, $2", "=r,r,I"(i32 %i, i32 4096) nounwind
|
||||
; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, 4096
|
||||
ret i32 %1
|
||||
}
|
||||
|
||||
define i32 @constraint_J(i32 %i, i32 %j, i64 %k) nounwind {
|
||||
; CHECK-LABEL: constraint_J:
|
||||
; CHECK: ; %bb.0: ; %entry
|
||||
; CHECK-NEXT: ; InlineAsm Start
|
||||
; CHECK-NEXT: sub w8, w0, -16773120
|
||||
; CHECK-NEXT: ; InlineAsm End
|
||||
; CHECK-NEXT: ; InlineAsm Start
|
||||
; CHECK-NEXT: sub w0, w0, -1
|
||||
; CHECK-NEXT: ; InlineAsm End
|
||||
; CHECK-NEXT: ; InlineAsm Start
|
||||
; CHECK-NEXT: sub x8, x2, -1
|
||||
; CHECK-NEXT: ; InlineAsm End
|
||||
; CHECK-NEXT: ; InlineAsm Start
|
||||
; CHECK-NEXT: sub x8, x2, -1
|
||||
; CHECK-NEXT: ; InlineAsm End
|
||||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
; CHECK-LABEL: constraint_J:
|
||||
%0 = tail call i32 asm sideeffect "sub ${0:w}, ${1:w}, $2", "=r,r,J"(i32 %i, i32 -16773120) nounwind
|
||||
; CHECK: sub {{w[0-9]+}}, {{w[0-9]+}}, -16773120
|
||||
%1 = tail call i32 asm sideeffect "sub ${0:w}, ${1:w}, $2", "=r,r,J"(i32 %i, i32 -1) nounwind
|
||||
; CHECK: sub {{w[0-9]+}}, {{w[0-9]+}}, -1
|
||||
%2 = tail call i64 asm sideeffect "sub ${0:x}, ${1:x}, $2", "=r,r,J"(i64 %k, i32 -1) nounwind
|
||||
; CHECK: sub {{x[0-9]+}}, {{x[0-9]+}}, -1
|
||||
%3 = tail call i64 asm sideeffect "sub ${0:x}, ${1:x}, $2", "=r,r,J"(i64 %k, i64 -1) nounwind
|
||||
; CHECK: sub {{x[0-9]+}}, {{x[0-9]+}}, -1
|
||||
ret i32 %1
|
||||
}
|
||||
|
||||
define i32 @constraint_KL(i32 %i, i32 %j) nounwind {
|
||||
; CHECK-LABEL: constraint_KL:
|
||||
; CHECK: ; %bb.0: ; %entry
|
||||
; CHECK-NEXT: ; InlineAsm Start
|
||||
; CHECK-NEXT: eor w8, w0, 255
|
||||
; CHECK-NEXT: ; InlineAsm End
|
||||
; CHECK-NEXT: ; InlineAsm Start
|
||||
; CHECK-NEXT: eor w0, w0, 16711680
|
||||
; CHECK-NEXT: ; InlineAsm End
|
||||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
; CHECK-LABEL: constraint_KL:
|
||||
%0 = tail call i32 asm sideeffect "eor ${0:w}, ${1:w}, $2", "=r,r,K"(i32 %i, i32 255) nounwind
|
||||
; CHECK: eor {{w[0-9]+}}, {{w[0-9]+}}, 255
|
||||
%1 = tail call i32 asm sideeffect "eor ${0:w}, ${1:w}, $2", "=r,r,L"(i32 %i, i64 16711680) nounwind
|
||||
; CHECK: eor {{w[0-9]+}}, {{w[0-9]+}}, 16711680
|
||||
ret i32 %1
|
||||
}
|
||||
|
||||
define i32 @constraint_MN(i32 %i, i32 %j) nounwind {
|
||||
; CHECK-LABEL: constraint_MN:
|
||||
; CHECK: ; %bb.0: ; %entry
|
||||
; CHECK-NEXT: ; InlineAsm Start
|
||||
; CHECK-NEXT: movk w8, 65535
|
||||
; CHECK-NEXT: ; InlineAsm End
|
||||
; CHECK-NEXT: ; InlineAsm Start
|
||||
; CHECK-NEXT: movz w0, 0
|
||||
; CHECK-NEXT: ; InlineAsm End
|
||||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
; CHECK-LABEL: constraint_MN:
|
||||
%0 = tail call i32 asm sideeffect "movk ${0:w}, $1", "=r,M"(i32 65535) nounwind
|
||||
; CHECK: movk {{w[0-9]+}}, 65535
|
||||
%1 = tail call i32 asm sideeffect "movz ${0:w}, $1", "=r,N"(i64 0) nounwind
|
||||
; CHECK: movz {{w[0-9]+}}, 0
|
||||
ret i32 %1
|
||||
}
|
||||
|
||||
define void @t9() nounwind {
|
||||
; CHECK-LABEL: t9:
|
||||
; CHECK: ; %bb.0: ; %entry
|
||||
; CHECK-NEXT: sub sp, sp, #16
|
||||
; CHECK-NEXT: ldr q0, [sp], #16
|
||||
; CHECK-NEXT: ; InlineAsm Start
|
||||
; CHECK-NEXT: mov.2d v4, v0
|
||||
; CHECK-EMPTY:
|
||||
; CHECK-NEXT: ; InlineAsm End
|
||||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
; CHECK-LABEL: t9:
|
||||
%data = alloca <2 x double>, align 16
|
||||
%0 = load <2 x double>, <2 x double>* %data, align 16
|
||||
call void asm sideeffect "mov.2d v4, $0\0A", "w,~{v4}"(<2 x double> %0) nounwind
|
||||
; CHECK: mov.2d v4, {{v[0-9]+}}
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @t10() nounwind {
|
||||
; CHECK-LABEL: t10:
|
||||
; CHECK: ; %bb.0: ; %entry
|
||||
; CHECK-NEXT: sub sp, sp, #16
|
||||
; CHECK-NEXT: ldr d0, [sp, #8]
|
||||
; CHECK-NEXT: mov x8, sp
|
||||
; CHECK-NEXT: ; InlineAsm Start
|
||||
; CHECK-NEXT: ldr z0, [x8]
|
||||
; CHECK-EMPTY:
|
||||
; CHECK-NEXT: ; InlineAsm End
|
||||
; CHECK-NEXT: ; InlineAsm Start
|
||||
; CHECK-NEXT: ldr q0, [x8]
|
||||
; CHECK-EMPTY:
|
||||
; CHECK-NEXT: ; InlineAsm End
|
||||
; CHECK-NEXT: ; InlineAsm Start
|
||||
; CHECK-NEXT: ldr d0, [x8]
|
||||
; CHECK-EMPTY:
|
||||
; CHECK-NEXT: ; InlineAsm End
|
||||
; CHECK-NEXT: ; InlineAsm Start
|
||||
; CHECK-NEXT: ldr s0, [x8]
|
||||
; CHECK-EMPTY:
|
||||
; CHECK-NEXT: ; InlineAsm End
|
||||
; CHECK-NEXT: ; InlineAsm Start
|
||||
; CHECK-NEXT: ldr h0, [x8]
|
||||
; CHECK-EMPTY:
|
||||
; CHECK-NEXT: ; InlineAsm End
|
||||
; CHECK-NEXT: ; InlineAsm Start
|
||||
; CHECK-NEXT: ldr b0, [x8]
|
||||
; CHECK-EMPTY:
|
||||
; CHECK-NEXT: ; InlineAsm End
|
||||
; CHECK-NEXT: add sp, sp, #16
|
||||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
; CHECK-LABEL: t10:
|
||||
%data = alloca <2 x float>, align 8
|
||||
%a = alloca [2 x float], align 4
|
||||
%arraydecay = getelementptr inbounds [2 x float], [2 x float]* %a, i32 0, i32 0
|
||||
%0 = load <2 x float>, <2 x float>* %data, align 8
|
||||
call void asm sideeffect "ldr ${1:z}, [$0]\0A", "r,w"(float* %arraydecay, <2 x float> %0) nounwind
|
||||
; CHECK: ldr {{z[0-9]+}}, [{{x[0-9]+}}]
|
||||
call void asm sideeffect "ldr ${1:q}, [$0]\0A", "r,w"(float* %arraydecay, <2 x float> %0) nounwind
|
||||
; CHECK: ldr {{q[0-9]+}}, [{{x[0-9]+}}]
|
||||
call void asm sideeffect "ldr ${1:d}, [$0]\0A", "r,w"(float* %arraydecay, <2 x float> %0) nounwind
|
||||
; CHECK: ldr {{d[0-9]+}}, [{{x[0-9]+}}]
|
||||
call void asm sideeffect "ldr ${1:s}, [$0]\0A", "r,w"(float* %arraydecay, <2 x float> %0) nounwind
|
||||
; CHECK: ldr {{s[0-9]+}}, [{{x[0-9]+}}]
|
||||
call void asm sideeffect "ldr ${1:h}, [$0]\0A", "r,w"(float* %arraydecay, <2 x float> %0) nounwind
|
||||
; CHECK: ldr {{h[0-9]+}}, [{{x[0-9]+}}]
|
||||
call void asm sideeffect "ldr ${1:b}, [$0]\0A", "r,w"(float* %arraydecay, <2 x float> %0) nounwind
|
||||
; CHECK: ldr {{b[0-9]+}}, [{{x[0-9]+}}]
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @t11() nounwind {
|
||||
; CHECK-LABEL: t11:
|
||||
; CHECK: ; %bb.0: ; %entry
|
||||
; CHECK-NEXT: sub sp, sp, #16
|
||||
; CHECK-NEXT: ldr w8, [sp, #12]
|
||||
; CHECK-NEXT: ; InlineAsm Start
|
||||
; CHECK-NEXT: mov xzr, x8
|
||||
; CHECK-EMPTY:
|
||||
; CHECK-NEXT: ; InlineAsm End
|
||||
; CHECK-NEXT: ldr w8, [sp, #12]
|
||||
; CHECK-NEXT: ; InlineAsm Start
|
||||
; CHECK-NEXT: mov wzr, w8
|
||||
; CHECK-EMPTY:
|
||||
; CHECK-NEXT: ; InlineAsm End
|
||||
; CHECK-NEXT: add sp, sp, #16
|
||||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
; CHECK-LABEL: t11:
|
||||
%a = alloca i32, align 4
|
||||
%0 = load i32, i32* %a, align 4
|
||||
call void asm sideeffect "mov ${1:x}, ${0:x}\0A", "r,i"(i32 %0, i32 0) nounwind
|
||||
; CHECK: mov xzr, {{x[0-9]+}}
|
||||
%1 = load i32, i32* %a, align 4
|
||||
call void asm sideeffect "mov ${1:w}, ${0:w}\0A", "r,i"(i32 %1, i32 0) nounwind
|
||||
; CHECK: mov wzr, {{w[0-9]+}}
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @t12() nounwind {
|
||||
; CHECK-LABEL: t12:
|
||||
; CHECK: ; %bb.0: ; %entry
|
||||
; CHECK-NEXT: sub sp, sp, #16
|
||||
; CHECK-NEXT: ldr q0, [sp], #16
|
||||
; CHECK-NEXT: ; InlineAsm Start
|
||||
; CHECK-NEXT: mov.2d v4, v0
|
||||
; CHECK-EMPTY:
|
||||
; CHECK-NEXT: ; InlineAsm End
|
||||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
; CHECK-LABEL: t12:
|
||||
%data = alloca <4 x float>, align 16
|
||||
%0 = load <4 x float>, <4 x float>* %data, align 16
|
||||
call void asm sideeffect "mov.2d v4, $0\0A", "x,~{v4}"(<4 x float> %0) nounwind
|
||||
; CHECK: mov.2d v4, {{v([0-9])|(1[0-5])}}
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @t13() nounwind {
|
||||
; CHECK-LABEL: t13:
|
||||
; CHECK: ; %bb.0: ; %entry
|
||||
; CHECK-NEXT: ; InlineAsm Start
|
||||
; CHECK-NEXT: mov x4, 1311673391471656960
|
||||
; CHECK-EMPTY:
|
||||
; CHECK-NEXT: ; InlineAsm End
|
||||
; CHECK-NEXT: ; InlineAsm Start
|
||||
; CHECK-NEXT: mov x4, -4662
|
||||
; CHECK-EMPTY:
|
||||
; CHECK-NEXT: ; InlineAsm End
|
||||
; CHECK-NEXT: ; InlineAsm Start
|
||||
; CHECK-NEXT: mov x4, 4660
|
||||
; CHECK-EMPTY:
|
||||
; CHECK-NEXT: ; InlineAsm End
|
||||
; CHECK-NEXT: ; InlineAsm Start
|
||||
; CHECK-NEXT: mov x4, -71777214294589696
|
||||
; CHECK-EMPTY:
|
||||
; CHECK-NEXT: ; InlineAsm End
|
||||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
; CHECK-LABEL: t13:
|
||||
tail call void asm sideeffect "mov x4, $0\0A", "N"(i64 1311673391471656960) nounwind
|
||||
; CHECK: mov x4, 1311673391471656960
|
||||
tail call void asm sideeffect "mov x4, $0\0A", "N"(i64 -4662) nounwind
|
||||
; CHECK: mov x4, -4662
|
||||
tail call void asm sideeffect "mov x4, $0\0A", "N"(i64 4660) nounwind
|
||||
; CHECK: mov x4, 4660
|
||||
call void asm sideeffect "mov x4, $0\0A", "N"(i64 -71777214294589696) nounwind
|
||||
; CHECK: mov x4, -71777214294589696
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @t14() nounwind {
|
||||
; CHECK-LABEL: t14:
|
||||
; CHECK: ; %bb.0: ; %entry
|
||||
; CHECK-NEXT: ; InlineAsm Start
|
||||
; CHECK-NEXT: mov w4, 305397760
|
||||
; CHECK-EMPTY:
|
||||
; CHECK-NEXT: ; InlineAsm End
|
||||
; CHECK-NEXT: ; InlineAsm Start
|
||||
; CHECK-NEXT: mov w4, 4294962634
|
||||
; CHECK-EMPTY:
|
||||
; CHECK-NEXT: ; InlineAsm End
|
||||
; CHECK-NEXT: ; InlineAsm Start
|
||||
; CHECK-NEXT: mov w4, 4660
|
||||
; CHECK-EMPTY:
|
||||
; CHECK-NEXT: ; InlineAsm End
|
||||
; CHECK-NEXT: ; InlineAsm Start
|
||||
; CHECK-NEXT: mov w4, 4278255360
|
||||
; CHECK-EMPTY:
|
||||
; CHECK-NEXT: ; InlineAsm End
|
||||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
; CHECK-LABEL: t14:
|
||||
tail call void asm sideeffect "mov w4, $0\0A", "M"(i32 305397760) nounwind
|
||||
; CHECK: mov w4, 305397760
|
||||
tail call void asm sideeffect "mov w4, $0\0A", "M"(i32 -4662) nounwind
|
||||
; CHECK: mov w4, 4294962634
|
||||
tail call void asm sideeffect "mov w4, $0\0A", "M"(i32 4660) nounwind
|
||||
; CHECK: mov w4, 4660
|
||||
call void asm sideeffect "mov w4, $0\0A", "M"(i32 -16711936) nounwind
|
||||
; CHECK: mov w4, 4278255360
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @t15() nounwind {
|
||||
; CHECK-LABEL: t15:
|
||||
; CHECK: ; %bb.0: ; %entry
|
||||
; CHECK-NEXT: ; InlineAsm Start
|
||||
; CHECK-NEXT: fmov x8, d8
|
||||
; CHECK-NEXT: ; InlineAsm End
|
||||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%0 = tail call double asm sideeffect "fmov $0, d8", "=r"() nounwind
|
||||
; CHECK: fmov {{x[0-9]+}}, d8
|
||||
ret void
|
||||
}
|
||||
|
||||
|
@ -215,81 +367,134 @@ entry:
|
|||
|
||||
define void @test_zero_reg(i32* %addr) {
|
||||
; CHECK-LABEL: test_zero_reg:
|
||||
; CHECK: ; %bb.0:
|
||||
; CHECK-NEXT: ; InlineAsm Start
|
||||
; CHECK-NEXT: USE(xzr)
|
||||
; CHECK-NEXT: ; InlineAsm End
|
||||
; CHECK-NEXT: ; InlineAsm Start
|
||||
; CHECK-NEXT: USE(wzr)
|
||||
; CHECK-NEXT: ; InlineAsm End
|
||||
; CHECK-NEXT: mov w8, #1
|
||||
; CHECK-NEXT: ; InlineAsm Start
|
||||
; CHECK-NEXT: USE(w8)
|
||||
; CHECK-NEXT: ; InlineAsm End
|
||||
; CHECK-NEXT: ; InlineAsm Start
|
||||
; CHECK-NEXT: USE(xzr), USE(xzr)
|
||||
; CHECK-NEXT: ; InlineAsm End
|
||||
; CHECK-NEXT: ; InlineAsm Start
|
||||
; CHECK-NEXT: USE(xzr), USE(wzr)
|
||||
; CHECK-NEXT: ; InlineAsm End
|
||||
; CHECK-NEXT: ret
|
||||
|
||||
tail call void asm sideeffect "USE($0)", "z"(i32 0) nounwind
|
||||
; CHECK: USE(xzr)
|
||||
|
||||
tail call void asm sideeffect "USE(${0:w})", "zr"(i32 0)
|
||||
; CHECK: USE(wzr)
|
||||
|
||||
tail call void asm sideeffect "USE(${0:w})", "zr"(i32 1)
|
||||
; CHECK: mov [[VAL1:w[0-9]+]], #1
|
||||
; CHECK: USE([[VAL1]])
|
||||
|
||||
tail call void asm sideeffect "USE($0), USE($1)", "z,z"(i32 0, i32 0) nounwind
|
||||
; CHECK: USE(xzr), USE(xzr)
|
||||
|
||||
tail call void asm sideeffect "USE($0), USE(${1:w})", "z,z"(i32 0, i32 0) nounwind
|
||||
; CHECK: USE(xzr), USE(wzr)
|
||||
|
||||
ret void
|
||||
}
|
||||
|
||||
define <2 x float> @test_vreg_64bit(<2 x float> %in) nounwind {
|
||||
; CHECK-LABEL: test_vreg_64bit:
|
||||
; CHECK-LABEL: test_vreg_64bit:
|
||||
; CHECK: ; %bb.0:
|
||||
; CHECK-NEXT: stp d15, d14, [sp, #-16]! ; 16-byte Folded Spill
|
||||
; CHECK-NEXT: ; InlineAsm Start
|
||||
; CHECK-NEXT: fadd v14.2s, v0.2s, v0.2s
|
||||
; CHECK-NEXT: ; InlineAsm End
|
||||
; CHECK-NEXT: fmov d0, d14
|
||||
; CHECK-NEXT: ldp d15, d14, [sp], #16 ; 16-byte Folded Reload
|
||||
; CHECK-NEXT: ret
|
||||
%1 = tail call <2 x float> asm sideeffect "fadd ${0}.2s, ${1}.2s, ${1}.2s", "={v14},w"(<2 x float> %in) nounwind
|
||||
; CHECK: fadd v14.2s, v0.2s, v0.2s
|
||||
ret <2 x float> %1
|
||||
}
|
||||
|
||||
define <4 x float> @test_vreg_128bit(<4 x float> %in) nounwind {
|
||||
; CHECK-LABEL: test_vreg_128bit:
|
||||
; CHECK-LABEL: test_vreg_128bit:
|
||||
; CHECK: ; %bb.0:
|
||||
; CHECK-NEXT: stp d15, d14, [sp, #-16]! ; 16-byte Folded Spill
|
||||
; CHECK-NEXT: ; InlineAsm Start
|
||||
; CHECK-NEXT: fadd v14.4s, v0.4s, v0.4s
|
||||
; CHECK-NEXT: ; InlineAsm End
|
||||
; CHECK-NEXT: mov.16b v0, v14
|
||||
; CHECK-NEXT: ldp d15, d14, [sp], #16 ; 16-byte Folded Reload
|
||||
; CHECK-NEXT: ret
|
||||
%1 = tail call <4 x float> asm sideeffect "fadd ${0}.4s, ${1}.4s, ${1}.4s", "={v14},w"(<4 x float> %in) nounwind
|
||||
; CHECK: fadd v14.4s, v0.4s, v0.4s
|
||||
ret <4 x float> %1
|
||||
}
|
||||
|
||||
define void @test_constraint_w(i32 %a) {
|
||||
; CHECK: fmov [[SREG:s[0-9]+]], {{w[0-9]+}}
|
||||
; CHECK: sqxtn h0, [[SREG]]
|
||||
; CHECK-LABEL: test_constraint_w:
|
||||
; CHECK: ; %bb.0:
|
||||
; CHECK-NEXT: fmov s0, w0
|
||||
; CHECK-NEXT: ; InlineAsm Start
|
||||
; CHECK-NEXT: sqxtn h0, s0
|
||||
; CHECK-EMPTY:
|
||||
; CHECK-NEXT: ; InlineAsm End
|
||||
; CHECK-NEXT: ret
|
||||
|
||||
tail call void asm sideeffect "sqxtn h0, ${0:s}\0A", "w"(i32 %a)
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @test_inline_modifier_a(i8* %ptr) nounwind {
|
||||
; CHECK-LABEL: test_inline_modifier_a:
|
||||
; CHECK-LABEL: test_inline_modifier_a:
|
||||
; CHECK: ; %bb.0:
|
||||
; CHECK-NEXT: ; InlineAsm Start
|
||||
; CHECK-NEXT: prfm pldl1keep, [x0]
|
||||
; CHECK-EMPTY:
|
||||
; CHECK-NEXT: ; InlineAsm End
|
||||
; CHECK-NEXT: ret
|
||||
tail call void asm sideeffect "prfm pldl1keep, ${0:a}\0A", "r"(i8* %ptr)
|
||||
; CHECK: prfm pldl1keep, [x0]
|
||||
ret void
|
||||
}
|
||||
|
||||
; PR33134
|
||||
define void @test_zero_address() {
|
||||
; CHECK-LABEL: test_zero_address:
|
||||
; CHECK: ; %bb.0: ; %entry
|
||||
; CHECK-NEXT: mov x8, xzr
|
||||
; CHECK-NEXT: ; InlineAsm Start
|
||||
; CHECK-NEXT: ldr x8, [x8]
|
||||
; CHECK-EMPTY:
|
||||
; CHECK-NEXT: ; InlineAsm End
|
||||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
; CHECK-LABEL: test_zero_address
|
||||
; CHECK: mov {{x[0-9]+}}, xzr
|
||||
; CHECK: ldr {{x[0-9]+}}, {{[x[0-9]+]}}
|
||||
tail call i32 asm sideeffect "ldr $0, $1 \0A", "=r,*Q"(i32* null)
|
||||
ret void
|
||||
}
|
||||
|
||||
; No '#' in lane specifier
|
||||
define void @test_no_hash_in_lane_specifier() {
|
||||
; CHECK-LABEL: test_no_hash_in_lane_specifier
|
||||
; CHECK: fmla v2.4s, v0.4s, v1.s[1]
|
||||
; CHECK: ret
|
||||
; CHECK-LABEL: test_no_hash_in_lane_specifier:
|
||||
; CHECK: ; %bb.0:
|
||||
; CHECK-NEXT: ; InlineAsm Start
|
||||
; CHECK-NEXT: fmla v2.4s, v0.4s, v1.s[1]
|
||||
; CHECK-NEXT: ; InlineAsm End
|
||||
; CHECK-NEXT: ret
|
||||
tail call void asm sideeffect "fmla v2.4s, v0.4s, v1.s[$0]", "I"(i32 1) #1
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @test_vector_too_large_r_m(<9 x float>* nocapture readonly %0) {
|
||||
; CHECK-LABEL: test_vector_too_large_r_m
|
||||
; CHECK: ldr [[S:s[0-9]+]], [x0, #32]
|
||||
; CHECK-DAG: ldp [[Q0:q[0-9]+]], [[Q1:q[0-9]+]], [x0]
|
||||
; CHECK: str [[S]], [sp, #32]
|
||||
; CHECK-LABEL: test_vector_too_large_r_m:
|
||||
; CHECK: ; %bb.0: ; %entry
|
||||
; CHECK-NEXT: sub sp, sp, #64
|
||||
; CHECK-NEXT: .cfi_def_cfa_offset 64
|
||||
; CHECK-NEXT: ldr s0, [x0, #32]
|
||||
; CHECK-NEXT: ldp q2, q1, [x0]
|
||||
; CHECK-NEXT: mov x8, sp
|
||||
; CHECK-NEXT: str s0, [sp, #32]
|
||||
; CHECK-NEXT: stp q2, q1, [sp]
|
||||
; CHECK-NEXT: ; InlineAsm Start
|
||||
; CHECK-NEXT: ; InlineAsm End
|
||||
; CHECK-NEXT: add sp, sp, #64
|
||||
; CHECK-NEXT: ret
|
||||
; CHECK-DAG stp [[Q0]], [[Q1]], [sp]
|
||||
; CHECK: ; InlineAsm Start
|
||||
;
|
||||
entry:
|
||||
%m.addr = alloca <9 x float>, align 16
|
||||
%m = load <9 x float>, <9 x float>* %0, align 16
|
||||
|
@ -300,9 +505,15 @@ entry:
|
|||
|
||||
define void @test_o_output_constraint() {
|
||||
; CHECK-LABEL: test_o_output_constraint:
|
||||
; CHECK: sub sp, sp, #16
|
||||
; CHECK: add x[[REG:[0-9]+]], sp, #15
|
||||
; CHECK: mov [x[[REG]]], 7
|
||||
; CHECK: ; %bb.0:
|
||||
; CHECK-NEXT: sub sp, sp, #16
|
||||
; CHECK-NEXT: .cfi_def_cfa_offset 16
|
||||
; CHECK-NEXT: add x8, sp, #15
|
||||
; CHECK-NEXT: ; InlineAsm Start
|
||||
; CHECK-NEXT: mov [x8], 7
|
||||
; CHECK-NEXT: ; InlineAsm End
|
||||
; CHECK-NEXT: add sp, sp, #16
|
||||
; CHECK-NEXT: ret
|
||||
%b = alloca i8, align 1
|
||||
call void asm "mov $0, 7", "=*o"(i8* %b)
|
||||
ret void
|
||||
|
|
|
@ -1,8 +1,12 @@
|
|||
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
||||
; RUN: llc < %s -mtriple=arm64-eabi -verify-machineinstrs | FileCheck %s
|
||||
|
||||
; CHECK-LABEL: ldp_int
|
||||
; CHECK: ldp
|
||||
define i32 @ldp_int(i32* %p) nounwind {
|
||||
; CHECK-LABEL: ldp_int:
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: ldp w8, w9, [x0]
|
||||
; CHECK-NEXT: add w0, w9, w8
|
||||
; CHECK-NEXT: ret
|
||||
%tmp = load i32, i32* %p, align 4
|
||||
%add.ptr = getelementptr inbounds i32, i32* %p, i64 1
|
||||
%tmp1 = load i32, i32* %add.ptr, align 4
|
||||
|
@ -10,9 +14,12 @@ define i32 @ldp_int(i32* %p) nounwind {
|
|||
ret i32 %add
|
||||
}
|
||||
|
||||
; CHECK-LABEL: ldp_sext_int
|
||||
; CHECK: ldpsw
|
||||
define i64 @ldp_sext_int(i32* %p) nounwind {
|
||||
; CHECK-LABEL: ldp_sext_int:
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: ldpsw x8, x9, [x0]
|
||||
; CHECK-NEXT: add x0, x9, x8
|
||||
; CHECK-NEXT: ret
|
||||
%tmp = load i32, i32* %p, align 4
|
||||
%add.ptr = getelementptr inbounds i32, i32* %p, i64 1
|
||||
%tmp1 = load i32, i32* %add.ptr, align 4
|
||||
|
@ -22,10 +29,14 @@ define i64 @ldp_sext_int(i32* %p) nounwind {
|
|||
ret i64 %add
|
||||
}
|
||||
|
||||
; CHECK-LABEL: ldp_half_sext_res0_int:
|
||||
; CHECK: ldp w[[DST1:[0-9]+]], w[[DST2:[0-9]+]], [x0]
|
||||
; CHECK: sxtw x[[DST1]], w[[DST1]]
|
||||
define i64 @ldp_half_sext_res0_int(i32* %p) nounwind {
|
||||
; CHECK-LABEL: ldp_half_sext_res0_int:
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: ldp w8, w9, [x0]
|
||||
; CHECK-NEXT: // kill: def $w8 killed $w8 def $x8
|
||||
; CHECK-NEXT: sxtw x8, w8
|
||||
; CHECK-NEXT: add x0, x9, x8
|
||||
; CHECK-NEXT: ret
|
||||
%tmp = load i32, i32* %p, align 4
|
||||
%add.ptr = getelementptr inbounds i32, i32* %p, i64 1
|
||||
%tmp1 = load i32, i32* %add.ptr, align 4
|
||||
|
@ -35,10 +46,14 @@ define i64 @ldp_half_sext_res0_int(i32* %p) nounwind {
|
|||
ret i64 %add
|
||||
}
|
||||
|
||||
; CHECK-LABEL: ldp_half_sext_res1_int:
|
||||
; CHECK: ldp w[[DST1:[0-9]+]], w[[DST2:[0-9]+]], [x0]
|
||||
; CHECK: sxtw x[[DST2]], w[[DST2]]
|
||||
define i64 @ldp_half_sext_res1_int(i32* %p) nounwind {
|
||||
; CHECK-LABEL: ldp_half_sext_res1_int:
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: ldp w8, w9, [x0]
|
||||
; CHECK-NEXT: // kill: def $w9 killed $w9 def $x9
|
||||
; CHECK-NEXT: sxtw x9, w9
|
||||
; CHECK-NEXT: add x0, x9, x8
|
||||
; CHECK-NEXT: ret
|
||||
%tmp = load i32, i32* %p, align 4
|
||||
%add.ptr = getelementptr inbounds i32, i32* %p, i64 1
|
||||
%tmp1 = load i32, i32* %add.ptr, align 4
|
||||
|
@ -49,9 +64,12 @@ define i64 @ldp_half_sext_res1_int(i32* %p) nounwind {
|
|||
}
|
||||
|
||||
|
||||
; CHECK-LABEL: ldp_long
|
||||
; CHECK: ldp
|
||||
define i64 @ldp_long(i64* %p) nounwind {
|
||||
; CHECK-LABEL: ldp_long:
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: ldp x8, x9, [x0]
|
||||
; CHECK-NEXT: add x0, x9, x8
|
||||
; CHECK-NEXT: ret
|
||||
%tmp = load i64, i64* %p, align 8
|
||||
%add.ptr = getelementptr inbounds i64, i64* %p, i64 1
|
||||
%tmp1 = load i64, i64* %add.ptr, align 8
|
||||
|
@ -59,9 +77,12 @@ define i64 @ldp_long(i64* %p) nounwind {
|
|||
ret i64 %add
|
||||
}
|
||||
|
||||
; CHECK-LABEL: ldp_float
|
||||
; CHECK: ldp
|
||||
define float @ldp_float(float* %p) nounwind {
|
||||
; CHECK-LABEL: ldp_float:
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: ldp s0, s1, [x0]
|
||||
; CHECK-NEXT: fadd s0, s0, s1
|
||||
; CHECK-NEXT: ret
|
||||
%tmp = load float, float* %p, align 4
|
||||
%add.ptr = getelementptr inbounds float, float* %p, i64 1
|
||||
%tmp1 = load float, float* %add.ptr, align 4
|
||||
|
@ -69,9 +90,12 @@ define float @ldp_float(float* %p) nounwind {
|
|||
ret float %add
|
||||
}
|
||||
|
||||
; CHECK-LABEL: ldp_double
|
||||
; CHECK: ldp
|
||||
define double @ldp_double(double* %p) nounwind {
|
||||
; CHECK-LABEL: ldp_double:
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: ldp d0, d1, [x0]
|
||||
; CHECK-NEXT: fadd d0, d0, d1
|
||||
; CHECK-NEXT: ret
|
||||
%tmp = load double, double* %p, align 8
|
||||
%add.ptr = getelementptr inbounds double, double* %p, i64 1
|
||||
%tmp1 = load double, double* %add.ptr, align 8
|
||||
|
@ -79,9 +103,12 @@ define double @ldp_double(double* %p) nounwind {
|
|||
ret double %add
|
||||
}
|
||||
|
||||
; CHECK-LABEL: ldp_doublex2
|
||||
; CHECK: ldp
|
||||
define <2 x double> @ldp_doublex2(<2 x double>* %p) nounwind {
|
||||
; CHECK-LABEL: ldp_doublex2:
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: ldp q0, q1, [x0]
|
||||
; CHECK-NEXT: fadd v0.2d, v0.2d, v1.2d
|
||||
; CHECK-NEXT: ret
|
||||
%tmp = load <2 x double>, <2 x double>* %p, align 16
|
||||
%add.ptr = getelementptr inbounds <2 x double>, <2 x double>* %p, i64 1
|
||||
%tmp1 = load <2 x double>, <2 x double>* %add.ptr, align 16
|
||||
|
@ -91,10 +118,11 @@ define <2 x double> @ldp_doublex2(<2 x double>* %p) nounwind {
|
|||
|
||||
; Test the load/store optimizer---combine ldurs into a ldp, if appropriate
|
||||
define i32 @ldur_int(i32* %a) nounwind {
|
||||
; CHECK-LABEL: ldur_int
|
||||
; CHECK: ldp [[DST1:w[0-9]+]], [[DST2:w[0-9]+]], [x0, #-8]
|
||||
; CHECK-NEXT: add w{{[0-9]+}}, [[DST2]], [[DST1]]
|
||||
; CHECK-NEXT: ret
|
||||
; CHECK-LABEL: ldur_int:
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: ldp w9, w8, [x0, #-8]
|
||||
; CHECK-NEXT: add w0, w8, w9
|
||||
; CHECK-NEXT: ret
|
||||
%p1 = getelementptr inbounds i32, i32* %a, i32 -1
|
||||
%tmp1 = load i32, i32* %p1, align 2
|
||||
%p2 = getelementptr inbounds i32, i32* %a, i32 -2
|
||||
|
@ -104,10 +132,11 @@ define i32 @ldur_int(i32* %a) nounwind {
|
|||
}
|
||||
|
||||
define i64 @ldur_sext_int(i32* %a) nounwind {
|
||||
; CHECK-LABEL: ldur_sext_int
|
||||
; CHECK: ldpsw [[DST1:x[0-9]+]], [[DST2:x[0-9]+]], [x0, #-8]
|
||||
; CHECK-NEXT: add x{{[0-9]+}}, [[DST2]], [[DST1]]
|
||||
; CHECK-NEXT: ret
|
||||
; CHECK-LABEL: ldur_sext_int:
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: ldpsw x9, x8, [x0, #-8]
|
||||
; CHECK-NEXT: add x0, x8, x9
|
||||
; CHECK-NEXT: ret
|
||||
%p1 = getelementptr inbounds i32, i32* %a, i32 -1
|
||||
%tmp1 = load i32, i32* %p1, align 2
|
||||
%p2 = getelementptr inbounds i32, i32* %a, i32 -2
|
||||
|
@ -119,11 +148,13 @@ define i64 @ldur_sext_int(i32* %a) nounwind {
|
|||
}
|
||||
|
||||
define i64 @ldur_half_sext_int_res0(i32* %a) nounwind {
|
||||
; CHECK-LABEL: ldur_half_sext_int_res0
|
||||
; CHECK: ldp w[[DST1:[0-9]+]], w[[DST2:[0-9]+]], [x0, #-8]
|
||||
; CHECK: sxtw x[[DST1]], w[[DST1]]
|
||||
; CHECK-NEXT: add x{{[0-9]+}}, x[[DST2]], x[[DST1]]
|
||||
; CHECK-NEXT: ret
|
||||
; CHECK-LABEL: ldur_half_sext_int_res0:
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: ldp w9, w8, [x0, #-8]
|
||||
; CHECK-NEXT: // kill: def $w9 killed $w9 def $x9
|
||||
; CHECK-NEXT: sxtw x9, w9
|
||||
; CHECK-NEXT: add x0, x8, x9
|
||||
; CHECK-NEXT: ret
|
||||
%p1 = getelementptr inbounds i32, i32* %a, i32 -1
|
||||
%tmp1 = load i32, i32* %p1, align 2
|
||||
%p2 = getelementptr inbounds i32, i32* %a, i32 -2
|
||||
|
@ -135,11 +166,13 @@ define i64 @ldur_half_sext_int_res0(i32* %a) nounwind {
|
|||
}
|
||||
|
||||
define i64 @ldur_half_sext_int_res1(i32* %a) nounwind {
|
||||
; CHECK-LABEL: ldur_half_sext_int_res1
|
||||
; CHECK: ldp w[[DST1:[0-9]+]], w[[DST2:[0-9]+]], [x0, #-8]
|
||||
; CHECK: sxtw x[[DST2]], w[[DST2]]
|
||||
; CHECK-NEXT: add x{{[0-9]+}}, x[[DST2]], x[[DST1]]
|
||||
; CHECK-NEXT: ret
|
||||
; CHECK-LABEL: ldur_half_sext_int_res1:
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: ldp w9, w8, [x0, #-8]
|
||||
; CHECK-NEXT: // kill: def $w8 killed $w8 def $x8
|
||||
; CHECK-NEXT: sxtw x8, w8
|
||||
; CHECK-NEXT: add x0, x8, x9
|
||||
; CHECK-NEXT: ret
|
||||
%p1 = getelementptr inbounds i32, i32* %a, i32 -1
|
||||
%tmp1 = load i32, i32* %p1, align 2
|
||||
%p2 = getelementptr inbounds i32, i32* %a, i32 -2
|
||||
|
@ -152,10 +185,11 @@ define i64 @ldur_half_sext_int_res1(i32* %a) nounwind {
|
|||
|
||||
|
||||
define i64 @ldur_long(i64* %a) nounwind ssp {
|
||||
; CHECK-LABEL: ldur_long
|
||||
; CHECK: ldp [[DST1:x[0-9]+]], [[DST2:x[0-9]+]], [x0, #-16]
|
||||
; CHECK-NEXT: add x{{[0-9]+}}, [[DST2]], [[DST1]]
|
||||
; CHECK-NEXT: ret
|
||||
; CHECK-LABEL: ldur_long:
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: ldp x9, x8, [x0, #-16]
|
||||
; CHECK-NEXT: add x0, x8, x9
|
||||
; CHECK-NEXT: ret
|
||||
%p1 = getelementptr inbounds i64, i64* %a, i64 -1
|
||||
%tmp1 = load i64, i64* %p1, align 2
|
||||
%p2 = getelementptr inbounds i64, i64* %a, i64 -2
|
||||
|
@ -165,10 +199,11 @@ define i64 @ldur_long(i64* %a) nounwind ssp {
|
|||
}
|
||||
|
||||
define float @ldur_float(float* %a) {
|
||||
; CHECK-LABEL: ldur_float
|
||||
; CHECK: ldp [[DST1:s[0-9]+]], [[DST2:s[0-9]+]], [x0, #-8]
|
||||
; CHECK-NEXT: fadd s{{[0-9]+}}, [[DST2]], [[DST1]]
|
||||
; CHECK-NEXT: ret
|
||||
; CHECK-LABEL: ldur_float:
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: ldp s1, s0, [x0, #-8]
|
||||
; CHECK-NEXT: fadd s0, s0, s1
|
||||
; CHECK-NEXT: ret
|
||||
%p1 = getelementptr inbounds float, float* %a, i64 -1
|
||||
%tmp1 = load float, float* %p1, align 2
|
||||
%p2 = getelementptr inbounds float, float* %a, i64 -2
|
||||
|
@ -178,10 +213,11 @@ define float @ldur_float(float* %a) {
|
|||
}
|
||||
|
||||
define double @ldur_double(double* %a) {
|
||||
; CHECK-LABEL: ldur_double
|
||||
; CHECK: ldp [[DST1:d[0-9]+]], [[DST2:d[0-9]+]], [x0, #-16]
|
||||
; CHECK-NEXT: fadd d{{[0-9]+}}, [[DST2]], [[DST1]]
|
||||
; CHECK-NEXT: ret
|
||||
; CHECK-LABEL: ldur_double:
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: ldp d1, d0, [x0, #-16]
|
||||
; CHECK-NEXT: fadd d0, d0, d1
|
||||
; CHECK-NEXT: ret
|
||||
%p1 = getelementptr inbounds double, double* %a, i64 -1
|
||||
%tmp1 = load double, double* %p1, align 2
|
||||
%p2 = getelementptr inbounds double, double* %a, i64 -2
|
||||
|
@ -191,10 +227,11 @@ define double @ldur_double(double* %a) {
|
|||
}
|
||||
|
||||
define <2 x double> @ldur_doublex2(<2 x double>* %a) {
|
||||
; CHECK-LABEL: ldur_doublex2
|
||||
; CHECK: ldp q[[DST1:[0-9]+]], q[[DST2:[0-9]+]], [x0, #-32]
|
||||
; CHECK-NEXT: fadd v{{[0-9]+}}.2d, v[[DST2]].2d, v[[DST1]].2d
|
||||
; CHECK-NEXT: ret
|
||||
; CHECK-LABEL: ldur_doublex2:
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: ldp q1, q0, [x0, #-32]
|
||||
; CHECK-NEXT: fadd v0.2d, v0.2d, v1.2d
|
||||
; CHECK-NEXT: ret
|
||||
%p1 = getelementptr inbounds <2 x double>, <2 x double>* %a, i64 -1
|
||||
%tmp1 = load <2 x double>, <2 x double>* %p1, align 2
|
||||
%p2 = getelementptr inbounds <2 x double>, <2 x double>* %a, i64 -2
|
||||
|
@ -205,11 +242,11 @@ define <2 x double> @ldur_doublex2(<2 x double>* %a) {
|
|||
|
||||
; Now check some boundary conditions
|
||||
define i64 @pairUpBarelyIn(i64* %a) nounwind ssp {
|
||||
; CHECK-LABEL: pairUpBarelyIn
|
||||
; CHECK-NOT: ldur
|
||||
; CHECK: ldp [[DST1:x[0-9]+]], [[DST2:x[0-9]+]], [x0, #-256]
|
||||
; CHECK-NEXT: add x{{[0-9]+}}, [[DST2]], [[DST1]]
|
||||
; CHECK-NEXT: ret
|
||||
; CHECK-LABEL: pairUpBarelyIn:
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: ldp x9, x8, [x0, #-256]
|
||||
; CHECK-NEXT: add x0, x8, x9
|
||||
; CHECK-NEXT: ret
|
||||
%p1 = getelementptr inbounds i64, i64* %a, i64 -31
|
||||
%tmp1 = load i64, i64* %p1, align 2
|
||||
%p2 = getelementptr inbounds i64, i64* %a, i64 -32
|
||||
|
@ -219,11 +256,11 @@ define i64 @pairUpBarelyIn(i64* %a) nounwind ssp {
|
|||
}
|
||||
|
||||
define i64 @pairUpBarelyInSext(i32* %a) nounwind ssp {
|
||||
; CHECK-LABEL: pairUpBarelyInSext
|
||||
; CHECK-NOT: ldur
|
||||
; CHECK: ldpsw [[DST1:x[0-9]+]], [[DST2:x[0-9]+]], [x0, #-256]
|
||||
; CHECK-NEXT: add x{{[0-9]+}}, [[DST2]], [[DST1]]
|
||||
; CHECK-NEXT: ret
|
||||
; CHECK-LABEL: pairUpBarelyInSext:
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: ldpsw x9, x8, [x0, #-256]
|
||||
; CHECK-NEXT: add x0, x8, x9
|
||||
; CHECK-NEXT: ret
|
||||
%p1 = getelementptr inbounds i32, i32* %a, i64 -63
|
||||
%tmp1 = load i32, i32* %p1, align 2
|
||||
%p2 = getelementptr inbounds i32, i32* %a, i64 -64
|
||||
|
@ -235,12 +272,13 @@ define i64 @pairUpBarelyInSext(i32* %a) nounwind ssp {
|
|||
}
|
||||
|
||||
define i64 @pairUpBarelyInHalfSextRes0(i32* %a) nounwind ssp {
|
||||
; CHECK-LABEL: pairUpBarelyInHalfSextRes0
|
||||
; CHECK-NOT: ldur
|
||||
; CHECK: ldp w[[DST1:[0-9]+]], w[[DST2:[0-9]+]], [x0, #-256]
|
||||
; CHECK: sxtw x[[DST1]], w[[DST1]]
|
||||
; CHECK-NEXT: add x{{[0-9]+}}, x[[DST2]], x[[DST1]]
|
||||
; CHECK-NEXT: ret
|
||||
; CHECK-LABEL: pairUpBarelyInHalfSextRes0:
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: ldp w9, w8, [x0, #-256]
|
||||
; CHECK-NEXT: // kill: def $w9 killed $w9 def $x9
|
||||
; CHECK-NEXT: sxtw x9, w9
|
||||
; CHECK-NEXT: add x0, x8, x9
|
||||
; CHECK-NEXT: ret
|
||||
%p1 = getelementptr inbounds i32, i32* %a, i64 -63
|
||||
%tmp1 = load i32, i32* %p1, align 2
|
||||
%p2 = getelementptr inbounds i32, i32* %a, i64 -64
|
||||
|
@ -252,12 +290,13 @@ define i64 @pairUpBarelyInHalfSextRes0(i32* %a) nounwind ssp {
|
|||
}
|
||||
|
||||
define i64 @pairUpBarelyInHalfSextRes1(i32* %a) nounwind ssp {
|
||||
; CHECK-LABEL: pairUpBarelyInHalfSextRes1
|
||||
; CHECK-NOT: ldur
|
||||
; CHECK: ldp w[[DST1:[0-9]+]], w[[DST2:[0-9]+]], [x0, #-256]
|
||||
; CHECK: sxtw x[[DST2]], w[[DST2]]
|
||||
; CHECK-NEXT: add x{{[0-9]+}}, x[[DST2]], x[[DST1]]
|
||||
; CHECK-NEXT: ret
|
||||
; CHECK-LABEL: pairUpBarelyInHalfSextRes1:
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: ldp w9, w8, [x0, #-256]
|
||||
; CHECK-NEXT: // kill: def $w8 killed $w8 def $x8
|
||||
; CHECK-NEXT: sxtw x8, w8
|
||||
; CHECK-NEXT: add x0, x8, x9
|
||||
; CHECK-NEXT: ret
|
||||
%p1 = getelementptr inbounds i32, i32* %a, i64 -63
|
||||
%tmp1 = load i32, i32* %p1, align 2
|
||||
%p2 = getelementptr inbounds i32, i32* %a, i64 -64
|
||||
|
@ -269,12 +308,15 @@ define i64 @pairUpBarelyInHalfSextRes1(i32* %a) nounwind ssp {
|
|||
}
|
||||
|
||||
define i64 @pairUpBarelyOut(i64* %a) nounwind ssp {
|
||||
; CHECK-LABEL: pairUpBarelyOut
|
||||
; CHECK-NOT: ldp
|
||||
; Don't be fragile about which loads or manipulations of the base register
|
||||
; are used---just check that there isn't an ldp before the add
|
||||
; CHECK: add
|
||||
; CHECK-NEXT: ret
|
||||
; CHECK-LABEL: pairUpBarelyOut:
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: sub x9, x0, #264
|
||||
; CHECK-NEXT: ldur x8, [x0, #-256]
|
||||
; CHECK-NEXT: ldr x9, [x9]
|
||||
; CHECK-NEXT: add x0, x8, x9
|
||||
; CHECK-NEXT: ret
|
||||
%p1 = getelementptr inbounds i64, i64* %a, i64 -32
|
||||
%tmp1 = load i64, i64* %p1, align 2
|
||||
%p2 = getelementptr inbounds i64, i64* %a, i64 -33
|
||||
|
@ -284,12 +326,15 @@ define i64 @pairUpBarelyOut(i64* %a) nounwind ssp {
|
|||
}
|
||||
|
||||
define i64 @pairUpBarelyOutSext(i32* %a) nounwind ssp {
|
||||
; CHECK-LABEL: pairUpBarelyOutSext
|
||||
; CHECK-NOT: ldp
|
||||
; Don't be fragile about which loads or manipulations of the base register
|
||||
; are used---just check that there isn't an ldp before the add
|
||||
; CHECK: add
|
||||
; CHECK-NEXT: ret
|
||||
; CHECK-LABEL: pairUpBarelyOutSext:
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: sub x9, x0, #260
|
||||
; CHECK-NEXT: ldursw x8, [x0, #-256]
|
||||
; CHECK-NEXT: ldrsw x9, [x9]
|
||||
; CHECK-NEXT: add x0, x8, x9
|
||||
; CHECK-NEXT: ret
|
||||
%p1 = getelementptr inbounds i32, i32* %a, i64 -64
|
||||
%tmp1 = load i32, i32* %p1, align 2
|
||||
%p2 = getelementptr inbounds i32, i32* %a, i64 -65
|
||||
|
@ -301,12 +346,12 @@ define i64 @pairUpBarelyOutSext(i32* %a) nounwind ssp {
|
|||
}
|
||||
|
||||
define i64 @pairUpNotAligned(i64* %a) nounwind ssp {
|
||||
; CHECK-LABEL: pairUpNotAligned
|
||||
; CHECK-NOT: ldp
|
||||
; CHECK: ldur
|
||||
; CHECK-NEXT: ldur
|
||||
; CHECK-NEXT: add
|
||||
; CHECK-NEXT: ret
|
||||
; CHECK-LABEL: pairUpNotAligned:
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: ldur x8, [x0, #-143]
|
||||
; CHECK-NEXT: ldur x9, [x0, #-135]
|
||||
; CHECK-NEXT: add x0, x8, x9
|
||||
; CHECK-NEXT: ret
|
||||
%p1 = getelementptr inbounds i64, i64* %a, i64 -18
|
||||
%bp1 = bitcast i64* %p1 to i8*
|
||||
%bp1p1 = getelementptr inbounds i8, i8* %bp1, i64 1
|
||||
|
@ -324,12 +369,12 @@ define i64 @pairUpNotAligned(i64* %a) nounwind ssp {
|
|||
}
|
||||
|
||||
define i64 @pairUpNotAlignedSext(i32* %a) nounwind ssp {
|
||||
; CHECK-LABEL: pairUpNotAlignedSext
|
||||
; CHECK-NOT: ldp
|
||||
; CHECK: ldursw
|
||||
; CHECK-NEXT: ldursw
|
||||
; CHECK-NEXT: add
|
||||
; CHECK-NEXT: ret
|
||||
; CHECK-LABEL: pairUpNotAlignedSext:
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: ldursw x8, [x0, #-71]
|
||||
; CHECK-NEXT: ldursw x9, [x0, #-67]
|
||||
; CHECK-NEXT: add x0, x8, x9
|
||||
; CHECK-NEXT: ret
|
||||
%p1 = getelementptr inbounds i32, i32* %a, i64 -18
|
||||
%bp1 = bitcast i32* %p1 to i8*
|
||||
%bp1p1 = getelementptr inbounds i8, i8* %bp1, i64 1
|
||||
|
@ -350,9 +395,17 @@ define i64 @pairUpNotAlignedSext(i32* %a) nounwind ssp {
|
|||
|
||||
declare void @use-ptr(i32*)
|
||||
|
||||
; CHECK-LABEL: ldp_sext_int_pre
|
||||
; CHECK: ldpsw x{{[0-9]+}}, x{{[0-9]+}}, [x{{[0-9]+}}, #8]
|
||||
define i64 @ldp_sext_int_pre(i32* %p) nounwind {
|
||||
; CHECK-LABEL: ldp_sext_int_pre:
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: stp x30, x19, [sp, #-16]! // 16-byte Folded Spill
|
||||
; CHECK-NEXT: mov x19, x0
|
||||
; CHECK-NEXT: add x0, x0, #8
|
||||
; CHECK-NEXT: bl "use-ptr"
|
||||
; CHECK-NEXT: ldpsw x8, x9, [x19, #8]
|
||||
; CHECK-NEXT: add x0, x9, x8
|
||||
; CHECK-NEXT: ldp x30, x19, [sp], #16 // 16-byte Folded Reload
|
||||
; CHECK-NEXT: ret
|
||||
%ptr = getelementptr inbounds i32, i32* %p, i64 2
|
||||
call void @use-ptr(i32* %ptr)
|
||||
%add.ptr = getelementptr inbounds i32, i32* %ptr, i64 0
|
||||
|
@ -365,9 +418,17 @@ define i64 @ldp_sext_int_pre(i32* %p) nounwind {
|
|||
ret i64 %add
|
||||
}
|
||||
|
||||
; CHECK-LABEL: ldp_sext_int_post
|
||||
; CHECK: ldpsw x{{[0-9]+}}, x{{[0-9]+}}, [x0], #8
|
||||
define i64 @ldp_sext_int_post(i32* %p) nounwind {
|
||||
; CHECK-LABEL: ldp_sext_int_post:
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: str x30, [sp, #-32]! // 8-byte Folded Spill
|
||||
; CHECK-NEXT: stp x20, x19, [sp, #16] // 16-byte Folded Spill
|
||||
; CHECK-NEXT: ldpsw x19, x20, [x0], #8
|
||||
; CHECK-NEXT: bl "use-ptr"
|
||||
; CHECK-NEXT: add x0, x20, x19
|
||||
; CHECK-NEXT: ldp x20, x19, [sp, #16] // 16-byte Folded Reload
|
||||
; CHECK-NEXT: ldr x30, [sp], #32 // 8-byte Folded Reload
|
||||
; CHECK-NEXT: ret
|
||||
%tmp = load i32, i32* %p, align 4
|
||||
%add.ptr = getelementptr inbounds i32, i32* %p, i64 1
|
||||
%tmp1 = load i32, i32* %add.ptr, align 4
|
||||
|
|
|
@ -1,61 +1,75 @@
|
|||
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
||||
; RUN: llc < %s -mtriple=arm64-eabi | FileCheck %s
|
||||
|
||||
define void @bzero_4_heap(i8* nocapture %c) {
|
||||
; CHECK-LABEL: bzero_4_heap:
|
||||
; CHECK: str wzr, [x0]
|
||||
; CHECK-NEXT: ret
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: str wzr, [x0]
|
||||
; CHECK-NEXT: ret
|
||||
call void @llvm.memset.p0i8.i64(i8* align 4 %c, i8 0, i64 4, i1 false)
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @bzero_8_heap(i8* nocapture %c) {
|
||||
; CHECK-LABEL: bzero_8_heap:
|
||||
; CHECK: str xzr, [x0]
|
||||
; CHECK-NEXT: ret
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: str xzr, [x0]
|
||||
; CHECK-NEXT: ret
|
||||
call void @llvm.memset.p0i8.i64(i8* align 8 %c, i8 0, i64 8, i1 false)
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @bzero_12_heap(i8* nocapture %c) {
|
||||
; CHECK-LABEL: bzero_12_heap:
|
||||
; CHECK: str wzr, [x0, #8]
|
||||
; CHECK-NEXT: str xzr, [x0]
|
||||
; CHECK-NEXT: ret
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: str wzr, [x0, #8]
|
||||
; CHECK-NEXT: str xzr, [x0]
|
||||
; CHECK-NEXT: ret
|
||||
call void @llvm.memset.p0i8.i64(i8* align 8 %c, i8 0, i64 12, i1 false)
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @bzero_16_heap(i8* nocapture %c) {
|
||||
; CHECK-LABEL: bzero_16_heap:
|
||||
; CHECK: stp xzr, xzr, [x0]
|
||||
; CHECK-NEXT: ret
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: stp xzr, xzr, [x0]
|
||||
; CHECK-NEXT: ret
|
||||
call void @llvm.memset.p0i8.i64(i8* align 8 %c, i8 0, i64 16, i1 false)
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @bzero_32_heap(i8* nocapture %c) {
|
||||
; CHECK-LABEL: bzero_32_heap:
|
||||
; CHECK: movi v0.2d, #0000000000000000
|
||||
; CHECK-NEXT: stp q0, q0, [x0]
|
||||
; CHECK-NEXT: ret
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: movi v0.2d, #0000000000000000
|
||||
; CHECK-NEXT: stp q0, q0, [x0]
|
||||
; CHECK-NEXT: ret
|
||||
call void @llvm.memset.p0i8.i64(i8* align 8 %c, i8 0, i64 32, i1 false)
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @bzero_64_heap(i8* nocapture %c) {
|
||||
; CHECK-LABEL: bzero_64_heap:
|
||||
; CHECK: movi v0.2d, #0000000000000000
|
||||
; CHECK-NEXT: stp q0, q0, [x0, #32]
|
||||
; CHECK-NEXT: stp q0, q0, [x0]
|
||||
; CHECK-NEXT: ret
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: movi v0.2d, #0000000000000000
|
||||
; CHECK-NEXT: stp q0, q0, [x0, #32]
|
||||
; CHECK-NEXT: stp q0, q0, [x0]
|
||||
; CHECK-NEXT: ret
|
||||
call void @llvm.memset.p0i8.i64(i8* align 8 %c, i8 0, i64 64, i1 false)
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @bzero_4_stack() {
|
||||
; CHECK-LABEL: bzero_4_stack:
|
||||
; CHECK: str wzr, [sp, #12]
|
||||
; CHECK-NEXT: bl something
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
|
||||
; CHECK-NEXT: .cfi_def_cfa_offset 16
|
||||
; CHECK-NEXT: .cfi_offset w30, -16
|
||||
; CHECK-NEXT: add x0, sp, #12
|
||||
; CHECK-NEXT: str wzr, [sp, #12]
|
||||
; CHECK-NEXT: bl something
|
||||
; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
|
||||
; CHECK-NEXT: ret
|
||||
%buf = alloca [4 x i8], align 1
|
||||
%cast = bitcast [4 x i8]* %buf to i8*
|
||||
call void @llvm.memset.p0i8.i32(i8* %cast, i8 0, i32 4, i1 false)
|
||||
|
@ -65,8 +79,14 @@ define void @bzero_4_stack() {
|
|||
|
||||
define void @bzero_8_stack() {
|
||||
; CHECK-LABEL: bzero_8_stack:
|
||||
; CHECK: stp x30, xzr, [sp, #-16]!
|
||||
; CHECK: bl something
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: stp x30, xzr, [sp, #-16]! // 8-byte Folded Spill
|
||||
; CHECK-NEXT: .cfi_def_cfa_offset 16
|
||||
; CHECK-NEXT: .cfi_offset w30, -16
|
||||
; CHECK-NEXT: add x0, sp, #8
|
||||
; CHECK-NEXT: bl something
|
||||
; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
|
||||
; CHECK-NEXT: ret
|
||||
%buf = alloca [8 x i8], align 1
|
||||
%cast = bitcast [8 x i8]* %buf to i8*
|
||||
call void @llvm.memset.p0i8.i32(i8* %cast, i8 0, i32 8, i1 false)
|
||||
|
@ -76,9 +96,18 @@ define void @bzero_8_stack() {
|
|||
|
||||
define void @bzero_12_stack() {
|
||||
; CHECK-LABEL: bzero_12_stack:
|
||||
; CHECK: str wzr, [sp, #8]
|
||||
; CHECK-NEXT: str xzr, [sp]
|
||||
; CHECK-NEXT: bl something
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: sub sp, sp, #32
|
||||
; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill
|
||||
; CHECK-NEXT: .cfi_def_cfa_offset 32
|
||||
; CHECK-NEXT: .cfi_offset w30, -16
|
||||
; CHECK-NEXT: mov x0, sp
|
||||
; CHECK-NEXT: str wzr, [sp, #8]
|
||||
; CHECK-NEXT: str xzr, [sp]
|
||||
; CHECK-NEXT: bl something
|
||||
; CHECK-NEXT: ldr x30, [sp, #16] // 8-byte Folded Reload
|
||||
; CHECK-NEXT: add sp, sp, #32
|
||||
; CHECK-NEXT: ret
|
||||
%buf = alloca [12 x i8], align 1
|
||||
%cast = bitcast [12 x i8]* %buf to i8*
|
||||
call void @llvm.memset.p0i8.i32(i8* %cast, i8 0, i32 12, i1 false)
|
||||
|
@ -88,10 +117,17 @@ define void @bzero_12_stack() {
|
|||
|
||||
define void @bzero_16_stack() {
|
||||
; CHECK-LABEL: bzero_16_stack:
|
||||
; CHECK: stp xzr, x30, [sp, #8]
|
||||
; CHECK: mov x0, sp
|
||||
; CHECK: str xzr, [sp]
|
||||
; CHECK-NEXT: bl something
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: sub sp, sp, #32
|
||||
; CHECK-NEXT: stp xzr, x30, [sp, #8] // 8-byte Folded Spill
|
||||
; CHECK-NEXT: .cfi_def_cfa_offset 32
|
||||
; CHECK-NEXT: .cfi_offset w30, -16
|
||||
; CHECK-NEXT: mov x0, sp
|
||||
; CHECK-NEXT: str xzr, [sp]
|
||||
; CHECK-NEXT: bl something
|
||||
; CHECK-NEXT: ldr x30, [sp, #16] // 8-byte Folded Reload
|
||||
; CHECK-NEXT: add sp, sp, #32
|
||||
; CHECK-NEXT: ret
|
||||
%buf = alloca [16 x i8], align 1
|
||||
%cast = bitcast [16 x i8]* %buf to i8*
|
||||
call void @llvm.memset.p0i8.i32(i8* %cast, i8 0, i32 16, i1 false)
|
||||
|
@ -101,9 +137,18 @@ define void @bzero_16_stack() {
|
|||
|
||||
define void @bzero_20_stack() {
|
||||
; CHECK-LABEL: bzero_20_stack:
|
||||
; CHECK: stp xzr, xzr, [sp, #8]
|
||||
; CHECK-NEXT: str wzr, [sp, #24]
|
||||
; CHECK-NEXT: bl something
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: sub sp, sp, #48
|
||||
; CHECK-NEXT: str x30, [sp, #32] // 8-byte Folded Spill
|
||||
; CHECK-NEXT: .cfi_def_cfa_offset 48
|
||||
; CHECK-NEXT: .cfi_offset w30, -16
|
||||
; CHECK-NEXT: add x0, sp, #8
|
||||
; CHECK-NEXT: stp xzr, xzr, [sp, #8]
|
||||
; CHECK-NEXT: str wzr, [sp, #24]
|
||||
; CHECK-NEXT: bl something
|
||||
; CHECK-NEXT: ldr x30, [sp, #32] // 8-byte Folded Reload
|
||||
; CHECK-NEXT: add sp, sp, #48
|
||||
; CHECK-NEXT: ret
|
||||
%buf = alloca [20 x i8], align 1
|
||||
%cast = bitcast [20 x i8]* %buf to i8*
|
||||
call void @llvm.memset.p0i8.i32(i8* %cast, i8 0, i32 20, i1 false)
|
||||
|
@ -113,10 +158,19 @@ define void @bzero_20_stack() {
|
|||
|
||||
define void @bzero_26_stack() {
|
||||
; CHECK-LABEL: bzero_26_stack:
|
||||
; CHECK: stp xzr, xzr, [sp]
|
||||
; CHECK-NEXT: strh wzr, [sp, #24]
|
||||
; CHECK-NEXT: str xzr, [sp, #16]
|
||||
; CHECK-NEXT: bl something
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: sub sp, sp, #48
|
||||
; CHECK-NEXT: str x30, [sp, #32] // 8-byte Folded Spill
|
||||
; CHECK-NEXT: .cfi_def_cfa_offset 48
|
||||
; CHECK-NEXT: .cfi_offset w30, -16
|
||||
; CHECK-NEXT: mov x0, sp
|
||||
; CHECK-NEXT: stp xzr, xzr, [sp]
|
||||
; CHECK-NEXT: strh wzr, [sp, #24]
|
||||
; CHECK-NEXT: str xzr, [sp, #16]
|
||||
; CHECK-NEXT: bl something
|
||||
; CHECK-NEXT: ldr x30, [sp, #32] // 8-byte Folded Reload
|
||||
; CHECK-NEXT: add sp, sp, #48
|
||||
; CHECK-NEXT: ret
|
||||
%buf = alloca [26 x i8], align 1
|
||||
%cast = bitcast [26 x i8]* %buf to i8*
|
||||
call void @llvm.memset.p0i8.i32(i8* %cast, i8 0, i32 26, i1 false)
|
||||
|
@ -126,10 +180,18 @@ define void @bzero_26_stack() {
|
|||
|
||||
define void @bzero_32_stack() {
|
||||
; CHECK-LABEL: bzero_32_stack:
|
||||
; CHECK: movi v0.2d, #0000000000000000
|
||||
; CHECK-NEXT: mov x0, sp
|
||||
; CHECK-NEXT: stp q0, q0, [sp]
|
||||
; CHECK-NEXT: bl something
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: sub sp, sp, #48
|
||||
; CHECK-NEXT: str x30, [sp, #32] // 8-byte Folded Spill
|
||||
; CHECK-NEXT: .cfi_def_cfa_offset 48
|
||||
; CHECK-NEXT: .cfi_offset w30, -16
|
||||
; CHECK-NEXT: movi v0.2d, #0000000000000000
|
||||
; CHECK-NEXT: mov x0, sp
|
||||
; CHECK-NEXT: stp q0, q0, [sp]
|
||||
; CHECK-NEXT: bl something
|
||||
; CHECK-NEXT: ldr x30, [sp, #32] // 8-byte Folded Reload
|
||||
; CHECK-NEXT: add sp, sp, #48
|
||||
; CHECK-NEXT: ret
|
||||
%buf = alloca [32 x i8], align 1
|
||||
%cast = bitcast [32 x i8]* %buf to i8*
|
||||
call void @llvm.memset.p0i8.i32(i8* %cast, i8 0, i32 32, i1 false)
|
||||
|
@ -139,11 +201,19 @@ define void @bzero_32_stack() {
|
|||
|
||||
define void @bzero_40_stack() {
|
||||
; CHECK-LABEL: bzero_40_stack:
|
||||
; CHECK: movi v0.2d, #0000000000000000
|
||||
; CHECK-NEXT: mov x0, sp
|
||||
; CHECK-NEXT: str xzr, [sp, #32]
|
||||
; CHECK-NEXT: stp q0, q0, [sp]
|
||||
; CHECK-NEXT: bl something
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: sub sp, sp, #64
|
||||
; CHECK-NEXT: str x30, [sp, #48] // 8-byte Folded Spill
|
||||
; CHECK-NEXT: .cfi_def_cfa_offset 64
|
||||
; CHECK-NEXT: .cfi_offset w30, -16
|
||||
; CHECK-NEXT: movi v0.2d, #0000000000000000
|
||||
; CHECK-NEXT: mov x0, sp
|
||||
; CHECK-NEXT: str xzr, [sp, #32]
|
||||
; CHECK-NEXT: stp q0, q0, [sp]
|
||||
; CHECK-NEXT: bl something
|
||||
; CHECK-NEXT: ldr x30, [sp, #48] // 8-byte Folded Reload
|
||||
; CHECK-NEXT: add sp, sp, #64
|
||||
; CHECK-NEXT: ret
|
||||
%buf = alloca [40 x i8], align 1
|
||||
%cast = bitcast [40 x i8]* %buf to i8*
|
||||
call void @llvm.memset.p0i8.i32(i8* %cast, i8 0, i32 40, i1 false)
|
||||
|
@ -153,11 +223,19 @@ define void @bzero_40_stack() {
|
|||
|
||||
define void @bzero_64_stack() {
|
||||
; CHECK-LABEL: bzero_64_stack:
|
||||
; CHECK: movi v0.2d, #0000000000000000
|
||||
; CHECK-NEXT: mov x0, sp
|
||||
; CHECK-NEXT: stp q0, q0, [sp, #32]
|
||||
; CHECK-NEXT: stp q0, q0, [sp]
|
||||
; CHECK-NEXT: bl something
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: sub sp, sp, #80
|
||||
; CHECK-NEXT: str x30, [sp, #64] // 8-byte Folded Spill
|
||||
; CHECK-NEXT: .cfi_def_cfa_offset 80
|
||||
; CHECK-NEXT: .cfi_offset w30, -16
|
||||
; CHECK-NEXT: movi v0.2d, #0000000000000000
|
||||
; CHECK-NEXT: mov x0, sp
|
||||
; CHECK-NEXT: stp q0, q0, [sp, #32]
|
||||
; CHECK-NEXT: stp q0, q0, [sp]
|
||||
; CHECK-NEXT: bl something
|
||||
; CHECK-NEXT: ldr x30, [sp, #64] // 8-byte Folded Reload
|
||||
; CHECK-NEXT: add sp, sp, #80
|
||||
; CHECK-NEXT: ret
|
||||
%buf = alloca [64 x i8], align 1
|
||||
%cast = bitcast [64 x i8]* %buf to i8*
|
||||
call void @llvm.memset.p0i8.i32(i8* %cast, i8 0, i32 64, i1 false)
|
||||
|
@ -167,12 +245,20 @@ define void @bzero_64_stack() {
|
|||
|
||||
define void @bzero_72_stack() {
|
||||
; CHECK-LABEL: bzero_72_stack:
|
||||
; CHECK: movi v0.2d, #0000000000000000
|
||||
; CHECK-NEXT: mov x0, sp
|
||||
; CHECK-NEXT: str xzr, [sp, #64]
|
||||
; CHECK-NEXT: stp q0, q0, [sp, #32]
|
||||
; CHECK-NEXT: stp q0, q0, [sp]
|
||||
; CHECK-NEXT: bl something
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: sub sp, sp, #96
|
||||
; CHECK-NEXT: str x30, [sp, #80] // 8-byte Folded Spill
|
||||
; CHECK-NEXT: .cfi_def_cfa_offset 96
|
||||
; CHECK-NEXT: .cfi_offset w30, -16
|
||||
; CHECK-NEXT: movi v0.2d, #0000000000000000
|
||||
; CHECK-NEXT: mov x0, sp
|
||||
; CHECK-NEXT: str xzr, [sp, #64]
|
||||
; CHECK-NEXT: stp q0, q0, [sp, #32]
|
||||
; CHECK-NEXT: stp q0, q0, [sp]
|
||||
; CHECK-NEXT: bl something
|
||||
; CHECK-NEXT: ldr x30, [sp, #80] // 8-byte Folded Reload
|
||||
; CHECK-NEXT: add sp, sp, #96
|
||||
; CHECK-NEXT: ret
|
||||
%buf = alloca [72 x i8], align 1
|
||||
%cast = bitcast [72 x i8]* %buf to i8*
|
||||
call void @llvm.memset.p0i8.i32(i8* %cast, i8 0, i32 72, i1 false)
|
||||
|
@ -182,13 +268,21 @@ define void @bzero_72_stack() {
|
|||
|
||||
define void @bzero_128_stack() {
|
||||
; CHECK-LABEL: bzero_128_stack:
|
||||
; CHECK: movi v0.2d, #0000000000000000
|
||||
; CHECK-NEXT: mov x0, sp
|
||||
; CHECK-NEXT: stp q0, q0, [sp, #96]
|
||||
; CHECK-NEXT: stp q0, q0, [sp, #64]
|
||||
; CHECK-NEXT: stp q0, q0, [sp, #32]
|
||||
; CHECK-NEXT: stp q0, q0, [sp]
|
||||
; CHECK-NEXT: bl something
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: sub sp, sp, #144
|
||||
; CHECK-NEXT: str x30, [sp, #128] // 8-byte Folded Spill
|
||||
; CHECK-NEXT: .cfi_def_cfa_offset 144
|
||||
; CHECK-NEXT: .cfi_offset w30, -16
|
||||
; CHECK-NEXT: movi v0.2d, #0000000000000000
|
||||
; CHECK-NEXT: mov x0, sp
|
||||
; CHECK-NEXT: stp q0, q0, [sp, #96]
|
||||
; CHECK-NEXT: stp q0, q0, [sp, #64]
|
||||
; CHECK-NEXT: stp q0, q0, [sp, #32]
|
||||
; CHECK-NEXT: stp q0, q0, [sp]
|
||||
; CHECK-NEXT: bl something
|
||||
; CHECK-NEXT: ldr x30, [sp, #128] // 8-byte Folded Reload
|
||||
; CHECK-NEXT: add sp, sp, #144
|
||||
; CHECK-NEXT: ret
|
||||
%buf = alloca [128 x i8], align 1
|
||||
%cast = bitcast [128 x i8]* %buf to i8*
|
||||
call void @llvm.memset.p0i8.i32(i8* %cast, i8 0, i32 128, i1 false)
|
||||
|
@ -198,17 +292,26 @@ define void @bzero_128_stack() {
|
|||
|
||||
define void @bzero_256_stack() {
|
||||
; CHECK-LABEL: bzero_256_stack:
|
||||
; CHECK: movi v0.2d, #0000000000000000
|
||||
; CHECK-NEXT: mov x0, sp
|
||||
; CHECK-NEXT: stp q0, q0, [sp, #224]
|
||||
; CHECK-NEXT: stp q0, q0, [sp, #192]
|
||||
; CHECK-NEXT: stp q0, q0, [sp, #160]
|
||||
; CHECK-NEXT: stp q0, q0, [sp, #128]
|
||||
; CHECK-NEXT: stp q0, q0, [sp, #96]
|
||||
; CHECK-NEXT: stp q0, q0, [sp, #64]
|
||||
; CHECK-NEXT: stp q0, q0, [sp, #32]
|
||||
; CHECK-NEXT: stp q0, q0, [sp]
|
||||
; CHECK-NEXT: bl something
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: sub sp, sp, #272
|
||||
; CHECK-NEXT: stp x29, x30, [sp, #256] // 16-byte Folded Spill
|
||||
; CHECK-NEXT: .cfi_def_cfa_offset 272
|
||||
; CHECK-NEXT: .cfi_offset w30, -8
|
||||
; CHECK-NEXT: .cfi_offset w29, -16
|
||||
; CHECK-NEXT: movi v0.2d, #0000000000000000
|
||||
; CHECK-NEXT: mov x0, sp
|
||||
; CHECK-NEXT: stp q0, q0, [sp, #224]
|
||||
; CHECK-NEXT: stp q0, q0, [sp, #192]
|
||||
; CHECK-NEXT: stp q0, q0, [sp, #160]
|
||||
; CHECK-NEXT: stp q0, q0, [sp, #128]
|
||||
; CHECK-NEXT: stp q0, q0, [sp, #96]
|
||||
; CHECK-NEXT: stp q0, q0, [sp, #64]
|
||||
; CHECK-NEXT: stp q0, q0, [sp, #32]
|
||||
; CHECK-NEXT: stp q0, q0, [sp]
|
||||
; CHECK-NEXT: bl something
|
||||
; CHECK-NEXT: ldp x29, x30, [sp, #256] // 16-byte Folded Reload
|
||||
; CHECK-NEXT: add sp, sp, #272
|
||||
; CHECK-NEXT: ret
|
||||
%buf = alloca [256 x i8], align 1
|
||||
%cast = bitcast [256 x i8]* %buf to i8*
|
||||
call void @llvm.memset.p0i8.i32(i8* %cast, i8 0, i32 256, i1 false)
|
||||
|
@ -218,10 +321,16 @@ define void @bzero_256_stack() {
|
|||
|
||||
define void @memset_4_stack() {
|
||||
; CHECK-LABEL: memset_4_stack:
|
||||
; CHECK: mov w8, #-1431655766
|
||||
; CHECK-NEXT: add x0, sp, #12
|
||||
; CHECK-NEXT: str w8, [sp, #12]
|
||||
; CHECK-NEXT: bl something
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
|
||||
; CHECK-NEXT: .cfi_def_cfa_offset 16
|
||||
; CHECK-NEXT: .cfi_offset w30, -16
|
||||
; CHECK-NEXT: mov w8, #-1431655766
|
||||
; CHECK-NEXT: add x0, sp, #12
|
||||
; CHECK-NEXT: str w8, [sp, #12]
|
||||
; CHECK-NEXT: bl something
|
||||
; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
|
||||
; CHECK-NEXT: ret
|
||||
%buf = alloca [4 x i8], align 1
|
||||
%cast = bitcast [4 x i8]* %buf to i8*
|
||||
call void @llvm.memset.p0i8.i32(i8* %cast, i8 -86, i32 4, i1 false)
|
||||
|
@ -231,10 +340,15 @@ define void @memset_4_stack() {
|
|||
|
||||
define void @memset_8_stack() {
|
||||
; CHECK-LABEL: memset_8_stack:
|
||||
; CHECK: mov x8, #-6148914691236517206
|
||||
; CHECK-NEXT: stp x30, x8, [sp, #-16]!
|
||||
; CHECK-NEXT: add x0, sp, #8
|
||||
; CHECK-NEXT: bl something
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: .cfi_def_cfa_offset 16
|
||||
; CHECK-NEXT: .cfi_offset w30, -16
|
||||
; CHECK-NEXT: mov x8, #-6148914691236517206
|
||||
; CHECK-NEXT: stp x30, x8, [sp, #-16]! // 8-byte Folded Spill
|
||||
; CHECK-NEXT: add x0, sp, #8
|
||||
; CHECK-NEXT: bl something
|
||||
; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
|
||||
; CHECK-NEXT: ret
|
||||
%buf = alloca [8 x i8], align 1
|
||||
%cast = bitcast [8 x i8]* %buf to i8*
|
||||
call void @llvm.memset.p0i8.i32(i8* %cast, i8 -86, i32 8, i1 false)
|
||||
|
@ -244,11 +358,19 @@ define void @memset_8_stack() {
|
|||
|
||||
define void @memset_12_stack() {
|
||||
; CHECK-LABEL: memset_12_stack:
|
||||
; CHECK: mov x8, #-6148914691236517206
|
||||
; CHECK-NEXT: mov x0, sp
|
||||
; CHECK-NEXT: str x8, [sp]
|
||||
; CHECK-NEXT: str w8, [sp, #8]
|
||||
; CHECK-NEXT: bl something
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: sub sp, sp, #32
|
||||
; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill
|
||||
; CHECK-NEXT: .cfi_def_cfa_offset 32
|
||||
; CHECK-NEXT: .cfi_offset w30, -16
|
||||
; CHECK-NEXT: mov x8, #-6148914691236517206
|
||||
; CHECK-NEXT: mov x0, sp
|
||||
; CHECK-NEXT: str x8, [sp]
|
||||
; CHECK-NEXT: str w8, [sp, #8]
|
||||
; CHECK-NEXT: bl something
|
||||
; CHECK-NEXT: ldr x30, [sp, #16] // 8-byte Folded Reload
|
||||
; CHECK-NEXT: add sp, sp, #32
|
||||
; CHECK-NEXT: ret
|
||||
%buf = alloca [12 x i8], align 1
|
||||
%cast = bitcast [12 x i8]* %buf to i8*
|
||||
call void @llvm.memset.p0i8.i32(i8* %cast, i8 -86, i32 12, i1 false)
|
||||
|
@ -258,11 +380,18 @@ define void @memset_12_stack() {
|
|||
|
||||
define void @memset_16_stack() {
|
||||
; CHECK-LABEL: memset_16_stack:
|
||||
; CHECK: mov x8, #-6148914691236517206
|
||||
; CHECK-NEXT: mov x0, sp
|
||||
; CHECK-NEXT: stp x8, x30, [sp, #8]
|
||||
; CHECK-NEXT: str x8, [sp]
|
||||
; CHECK-NEXT: bl something
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: sub sp, sp, #32
|
||||
; CHECK-NEXT: .cfi_def_cfa_offset 32
|
||||
; CHECK-NEXT: .cfi_offset w30, -16
|
||||
; CHECK-NEXT: mov x8, #-6148914691236517206
|
||||
; CHECK-NEXT: mov x0, sp
|
||||
; CHECK-NEXT: stp x8, x30, [sp, #8] // 8-byte Folded Spill
|
||||
; CHECK-NEXT: str x8, [sp]
|
||||
; CHECK-NEXT: bl something
|
||||
; CHECK-NEXT: ldr x30, [sp, #16] // 8-byte Folded Reload
|
||||
; CHECK-NEXT: add sp, sp, #32
|
||||
; CHECK-NEXT: ret
|
||||
%buf = alloca [16 x i8], align 1
|
||||
%cast = bitcast [16 x i8]* %buf to i8*
|
||||
call void @llvm.memset.p0i8.i32(i8* %cast, i8 -86, i32 16, i1 false)
|
||||
|
@ -272,11 +401,19 @@ define void @memset_16_stack() {
|
|||
|
||||
define void @memset_20_stack() {
|
||||
; CHECK-LABEL: memset_20_stack:
|
||||
; CHECK: mov x8, #-6148914691236517206
|
||||
; CHECK-NEXT: add x0, sp, #8
|
||||
; CHECK-NEXT: stp x8, x8, [sp, #8]
|
||||
; CHECK-NEXT: str w8, [sp, #24]
|
||||
; CHECK-NEXT: bl something
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: sub sp, sp, #48
|
||||
; CHECK-NEXT: str x30, [sp, #32] // 8-byte Folded Spill
|
||||
; CHECK-NEXT: .cfi_def_cfa_offset 48
|
||||
; CHECK-NEXT: .cfi_offset w30, -16
|
||||
; CHECK-NEXT: mov x8, #-6148914691236517206
|
||||
; CHECK-NEXT: add x0, sp, #8
|
||||
; CHECK-NEXT: stp x8, x8, [sp, #8]
|
||||
; CHECK-NEXT: str w8, [sp, #24]
|
||||
; CHECK-NEXT: bl something
|
||||
; CHECK-NEXT: ldr x30, [sp, #32] // 8-byte Folded Reload
|
||||
; CHECK-NEXT: add sp, sp, #48
|
||||
; CHECK-NEXT: ret
|
||||
%buf = alloca [20 x i8], align 1
|
||||
%cast = bitcast [20 x i8]* %buf to i8*
|
||||
call void @llvm.memset.p0i8.i32(i8* %cast, i8 -86, i32 20, i1 false)
|
||||
|
@ -286,12 +423,20 @@ define void @memset_20_stack() {
|
|||
|
||||
define void @memset_26_stack() {
|
||||
; CHECK-LABEL: memset_26_stack:
|
||||
; CHECK: mov x8, #-6148914691236517206
|
||||
; CHECK-NEXT: mov x0, sp
|
||||
; CHECK-NEXT: stp x8, x8, [sp, #8]
|
||||
; CHECK-NEXT: str x8, [sp]
|
||||
; CHECK-NEXT: strh w8, [sp, #24]
|
||||
; CHECK-NEXT: bl something
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: sub sp, sp, #48
|
||||
; CHECK-NEXT: str x30, [sp, #32] // 8-byte Folded Spill
|
||||
; CHECK-NEXT: .cfi_def_cfa_offset 48
|
||||
; CHECK-NEXT: .cfi_offset w30, -16
|
||||
; CHECK-NEXT: mov x8, #-6148914691236517206
|
||||
; CHECK-NEXT: mov x0, sp
|
||||
; CHECK-NEXT: stp x8, x8, [sp, #8]
|
||||
; CHECK-NEXT: str x8, [sp]
|
||||
; CHECK-NEXT: strh w8, [sp, #24]
|
||||
; CHECK-NEXT: bl something
|
||||
; CHECK-NEXT: ldr x30, [sp, #32] // 8-byte Folded Reload
|
||||
; CHECK-NEXT: add sp, sp, #48
|
||||
; CHECK-NEXT: ret
|
||||
%buf = alloca [26 x i8], align 1
|
||||
%cast = bitcast [26 x i8]* %buf to i8*
|
||||
call void @llvm.memset.p0i8.i32(i8* %cast, i8 -86, i32 26, i1 false)
|
||||
|
@ -301,10 +446,18 @@ define void @memset_26_stack() {
|
|||
|
||||
define void @memset_32_stack() {
|
||||
; CHECK-LABEL: memset_32_stack:
|
||||
; CHECK: movi v0.16b, #170
|
||||
; CHECK-NEXT: mov x0, sp
|
||||
; CHECK-NEXT: stp q0, q0, [sp]
|
||||
; CHECK-NEXT: bl something
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: sub sp, sp, #48
|
||||
; CHECK-NEXT: str x30, [sp, #32] // 8-byte Folded Spill
|
||||
; CHECK-NEXT: .cfi_def_cfa_offset 48
|
||||
; CHECK-NEXT: .cfi_offset w30, -16
|
||||
; CHECK-NEXT: movi v0.16b, #170
|
||||
; CHECK-NEXT: mov x0, sp
|
||||
; CHECK-NEXT: stp q0, q0, [sp]
|
||||
; CHECK-NEXT: bl something
|
||||
; CHECK-NEXT: ldr x30, [sp, #32] // 8-byte Folded Reload
|
||||
; CHECK-NEXT: add sp, sp, #48
|
||||
; CHECK-NEXT: ret
|
||||
%buf = alloca [32 x i8], align 1
|
||||
%cast = bitcast [32 x i8]* %buf to i8*
|
||||
call void @llvm.memset.p0i8.i32(i8* %cast, i8 -86, i32 32, i1 false)
|
||||
|
@ -314,12 +467,20 @@ define void @memset_32_stack() {
|
|||
|
||||
define void @memset_40_stack() {
|
||||
; CHECK-LABEL: memset_40_stack:
|
||||
; CHECK: mov x8, #-6148914691236517206
|
||||
; CHECK-NEXT: movi v0.16b, #170
|
||||
; CHECK-NEXT: mov x0, sp
|
||||
; CHECK-NEXT: str x8, [sp, #32]
|
||||
; CHECK-NEXT: stp q0, q0, [sp]
|
||||
; CHECK-NEXT: bl something
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: sub sp, sp, #64
|
||||
; CHECK-NEXT: str x30, [sp, #48] // 8-byte Folded Spill
|
||||
; CHECK-NEXT: .cfi_def_cfa_offset 64
|
||||
; CHECK-NEXT: .cfi_offset w30, -16
|
||||
; CHECK-NEXT: mov x8, #-6148914691236517206
|
||||
; CHECK-NEXT: movi v0.16b, #170
|
||||
; CHECK-NEXT: mov x0, sp
|
||||
; CHECK-NEXT: str x8, [sp, #32]
|
||||
; CHECK-NEXT: stp q0, q0, [sp]
|
||||
; CHECK-NEXT: bl something
|
||||
; CHECK-NEXT: ldr x30, [sp, #48] // 8-byte Folded Reload
|
||||
; CHECK-NEXT: add sp, sp, #64
|
||||
; CHECK-NEXT: ret
|
||||
%buf = alloca [40 x i8], align 1
|
||||
%cast = bitcast [40 x i8]* %buf to i8*
|
||||
call void @llvm.memset.p0i8.i32(i8* %cast, i8 -86, i32 40, i1 false)
|
||||
|
@ -329,11 +490,19 @@ define void @memset_40_stack() {
|
|||
|
||||
define void @memset_64_stack() {
|
||||
; CHECK-LABEL: memset_64_stack:
|
||||
; CHECK: movi v0.16b, #170
|
||||
; CHECK-NEXT: mov x0, sp
|
||||
; CHECK-NEXT: stp q0, q0, [sp, #32]
|
||||
; CHECK-NEXT: stp q0, q0, [sp]
|
||||
; CHECK-NEXT: bl something
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: sub sp, sp, #80
|
||||
; CHECK-NEXT: str x30, [sp, #64] // 8-byte Folded Spill
|
||||
; CHECK-NEXT: .cfi_def_cfa_offset 80
|
||||
; CHECK-NEXT: .cfi_offset w30, -16
|
||||
; CHECK-NEXT: movi v0.16b, #170
|
||||
; CHECK-NEXT: mov x0, sp
|
||||
; CHECK-NEXT: stp q0, q0, [sp, #32]
|
||||
; CHECK-NEXT: stp q0, q0, [sp]
|
||||
; CHECK-NEXT: bl something
|
||||
; CHECK-NEXT: ldr x30, [sp, #64] // 8-byte Folded Reload
|
||||
; CHECK-NEXT: add sp, sp, #80
|
||||
; CHECK-NEXT: ret
|
||||
%buf = alloca [64 x i8], align 1
|
||||
%cast = bitcast [64 x i8]* %buf to i8*
|
||||
call void @llvm.memset.p0i8.i32(i8* %cast, i8 -86, i32 64, i1 false)
|
||||
|
@ -343,13 +512,21 @@ define void @memset_64_stack() {
|
|||
|
||||
define void @memset_72_stack() {
|
||||
; CHECK-LABEL: memset_72_stack:
|
||||
; CHECK: mov x8, #-6148914691236517206
|
||||
; CHECK-NEXT: movi v0.16b, #170
|
||||
; CHECK-NEXT: mov x0, sp
|
||||
; CHECK-NEXT: str x8, [sp, #64]
|
||||
; CHECK-NEXT: stp q0, q0, [sp, #32]
|
||||
; CHECK-NEXT: stp q0, q0, [sp]
|
||||
; CHECK-NEXT: bl something
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: sub sp, sp, #96
|
||||
; CHECK-NEXT: str x30, [sp, #80] // 8-byte Folded Spill
|
||||
; CHECK-NEXT: .cfi_def_cfa_offset 96
|
||||
; CHECK-NEXT: .cfi_offset w30, -16
|
||||
; CHECK-NEXT: mov x8, #-6148914691236517206
|
||||
; CHECK-NEXT: movi v0.16b, #170
|
||||
; CHECK-NEXT: mov x0, sp
|
||||
; CHECK-NEXT: str x8, [sp, #64]
|
||||
; CHECK-NEXT: stp q0, q0, [sp, #32]
|
||||
; CHECK-NEXT: stp q0, q0, [sp]
|
||||
; CHECK-NEXT: bl something
|
||||
; CHECK-NEXT: ldr x30, [sp, #80] // 8-byte Folded Reload
|
||||
; CHECK-NEXT: add sp, sp, #96
|
||||
; CHECK-NEXT: ret
|
||||
%buf = alloca [72 x i8], align 1
|
||||
%cast = bitcast [72 x i8]* %buf to i8*
|
||||
call void @llvm.memset.p0i8.i32(i8* %cast, i8 -86, i32 72, i1 false)
|
||||
|
@ -359,13 +536,21 @@ define void @memset_72_stack() {
|
|||
|
||||
define void @memset_128_stack() {
|
||||
; CHECK-LABEL: memset_128_stack:
|
||||
; CHECK: movi v0.16b, #170
|
||||
; CHECK-NEXT: mov x0, sp
|
||||
; CHECK-NEXT: stp q0, q0, [sp, #96]
|
||||
; CHECK-NEXT: stp q0, q0, [sp, #64]
|
||||
; CHECK-NEXT: stp q0, q0, [sp, #32]
|
||||
; CHECK-NEXT: stp q0, q0, [sp]
|
||||
; CHECK-NEXT: bl something
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: sub sp, sp, #144
|
||||
; CHECK-NEXT: str x30, [sp, #128] // 8-byte Folded Spill
|
||||
; CHECK-NEXT: .cfi_def_cfa_offset 144
|
||||
; CHECK-NEXT: .cfi_offset w30, -16
|
||||
; CHECK-NEXT: movi v0.16b, #170
|
||||
; CHECK-NEXT: mov x0, sp
|
||||
; CHECK-NEXT: stp q0, q0, [sp, #96]
|
||||
; CHECK-NEXT: stp q0, q0, [sp, #64]
|
||||
; CHECK-NEXT: stp q0, q0, [sp, #32]
|
||||
; CHECK-NEXT: stp q0, q0, [sp]
|
||||
; CHECK-NEXT: bl something
|
||||
; CHECK-NEXT: ldr x30, [sp, #128] // 8-byte Folded Reload
|
||||
; CHECK-NEXT: add sp, sp, #144
|
||||
; CHECK-NEXT: ret
|
||||
%buf = alloca [128 x i8], align 1
|
||||
%cast = bitcast [128 x i8]* %buf to i8*
|
||||
call void @llvm.memset.p0i8.i32(i8* %cast, i8 -86, i32 128, i1 false)
|
||||
|
@ -375,17 +560,26 @@ define void @memset_128_stack() {
|
|||
|
||||
define void @memset_256_stack() {
|
||||
; CHECK-LABEL: memset_256_stack:
|
||||
; CHECK: movi v0.16b, #170
|
||||
; CHECK-NEXT: mov x0, sp
|
||||
; CHECK-NEXT: stp q0, q0, [sp, #224]
|
||||
; CHECK-NEXT: stp q0, q0, [sp, #192]
|
||||
; CHECK-NEXT: stp q0, q0, [sp, #160]
|
||||
; CHECK-NEXT: stp q0, q0, [sp, #128]
|
||||
; CHECK-NEXT: stp q0, q0, [sp, #96]
|
||||
; CHECK-NEXT: stp q0, q0, [sp, #64]
|
||||
; CHECK-NEXT: stp q0, q0, [sp, #32]
|
||||
; CHECK-NEXT: stp q0, q0, [sp]
|
||||
; CHECK-NEXT: bl something
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: sub sp, sp, #272
|
||||
; CHECK-NEXT: stp x29, x30, [sp, #256] // 16-byte Folded Spill
|
||||
; CHECK-NEXT: .cfi_def_cfa_offset 272
|
||||
; CHECK-NEXT: .cfi_offset w30, -8
|
||||
; CHECK-NEXT: .cfi_offset w29, -16
|
||||
; CHECK-NEXT: movi v0.16b, #170
|
||||
; CHECK-NEXT: mov x0, sp
|
||||
; CHECK-NEXT: stp q0, q0, [sp, #224]
|
||||
; CHECK-NEXT: stp q0, q0, [sp, #192]
|
||||
; CHECK-NEXT: stp q0, q0, [sp, #160]
|
||||
; CHECK-NEXT: stp q0, q0, [sp, #128]
|
||||
; CHECK-NEXT: stp q0, q0, [sp, #96]
|
||||
; CHECK-NEXT: stp q0, q0, [sp, #64]
|
||||
; CHECK-NEXT: stp q0, q0, [sp, #32]
|
||||
; CHECK-NEXT: stp q0, q0, [sp]
|
||||
; CHECK-NEXT: bl something
|
||||
; CHECK-NEXT: ldp x29, x30, [sp, #256] // 16-byte Folded Reload
|
||||
; CHECK-NEXT: add sp, sp, #272
|
||||
; CHECK-NEXT: ret
|
||||
%buf = alloca [256 x i8], align 1
|
||||
%cast = bitcast [256 x i8]* %buf to i8*
|
||||
call void @llvm.memset.p0i8.i32(i8* %cast, i8 -86, i32 256, i1 false)
|
||||
|
|
|
@ -1,11 +1,19 @@
|
|||
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
||||
; RUN: llc -mtriple=arm64-apple-ios -mattr=+strict-align < %s | FileCheck %s
|
||||
|
||||
; Small (16 bytes here) unaligned memcpy() should be a function call if
|
||||
; strict-alignment is turned on.
|
||||
define void @t0(i8* %out, i8* %in) {
|
||||
; CHECK-LABEL: t0:
|
||||
; CHECK: mov w2, #16
|
||||
; CHECK-NEXT: bl _memcpy
|
||||
; CHECK: ; %bb.0: ; %entry
|
||||
; CHECK-NEXT: stp x29, x30, [sp, #-16]! ; 16-byte Folded Spill
|
||||
; CHECK-NEXT: .cfi_def_cfa_offset 16
|
||||
; CHECK-NEXT: .cfi_offset w30, -8
|
||||
; CHECK-NEXT: .cfi_offset w29, -16
|
||||
; CHECK-NEXT: mov w2, #16
|
||||
; CHECK-NEXT: bl _memcpy
|
||||
; CHECK-NEXT: ldp x29, x30, [sp], #16 ; 16-byte Folded Reload
|
||||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %out, i8* %in, i64 16, i1 false)
|
||||
ret void
|
||||
|
|
|
@ -1,11 +1,14 @@
|
|||
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
||||
; RUN: llc -mtriple=arm64_32-apple-ios %s -o - | FileCheck %s
|
||||
|
||||
; If %base < 96 then the sum will not wrap (in an unsigned sense), but "ldr w0,
|
||||
; [x0, #-96]" would.
|
||||
define i32 @test_valid_wrap(i32 %base) {
|
||||
; CHECK-LABEL: test_valid_wrap:
|
||||
; CHECK: sub w[[ADDR:[0-9]+]], w0, #96
|
||||
; CHECK: ldr w0, [x[[ADDR]]]
|
||||
; CHECK: ; %bb.0:
|
||||
; CHECK-NEXT: sub w8, w0, #96
|
||||
; CHECK-NEXT: ldr w0, [x8]
|
||||
; CHECK-NEXT: ret
|
||||
|
||||
%newaddr = add nuw i32 %base, -96
|
||||
%ptr = inttoptr i32 %newaddr to i32*
|
||||
|
@ -15,7 +18,9 @@ define i32 @test_valid_wrap(i32 %base) {
|
|||
|
||||
define i8 @test_valid_wrap_optimizable(i8* %base) {
|
||||
; CHECK-LABEL: test_valid_wrap_optimizable:
|
||||
; CHECK: ldurb w0, [x0, #-96]
|
||||
; CHECK: ; %bb.0:
|
||||
; CHECK-NEXT: ldurb w0, [x0, #-96]
|
||||
; CHECK-NEXT: ret
|
||||
|
||||
%newaddr = getelementptr inbounds i8, i8* %base, i32 -96
|
||||
%val = load i8, i8* %newaddr
|
||||
|
@ -24,7 +29,9 @@ define i8 @test_valid_wrap_optimizable(i8* %base) {
|
|||
|
||||
define i8 @test_valid_wrap_optimizable1(i8* %base, i32 %offset) {
|
||||
; CHECK-LABEL: test_valid_wrap_optimizable1:
|
||||
; CHECK: ldrb w0, [x0, w1, sxtw]
|
||||
; CHECK: ; %bb.0:
|
||||
; CHECK-NEXT: ldrb w0, [x0, w1, sxtw]
|
||||
; CHECK-NEXT: ret
|
||||
|
||||
%newaddr = getelementptr inbounds i8, i8* %base, i32 %offset
|
||||
%val = load i8, i8* %newaddr
|
||||
|
@ -34,9 +41,12 @@ define i8 @test_valid_wrap_optimizable1(i8* %base, i32 %offset) {
|
|||
;
|
||||
define i8 @test_valid_wrap_optimizable2(i8* %base, i32 %offset) {
|
||||
; CHECK-LABEL: test_valid_wrap_optimizable2:
|
||||
; CHECK: sxtw x[[OFFSET:[0-9]+]], w1
|
||||
; CHECK: mov w[[BASE:[0-9]+]], #-100
|
||||
; CHECK: ldrb w0, [x[[OFFSET]], x[[BASE]]]
|
||||
; CHECK: ; %bb.0:
|
||||
; CHECK-NEXT: ; kill: def $w1 killed $w1 def $x1
|
||||
; CHECK-NEXT: sxtw x8, w1
|
||||
; CHECK-NEXT: mov w9, #-100
|
||||
; CHECK-NEXT: ldrb w0, [x8, x9]
|
||||
; CHECK-NEXT: ret
|
||||
|
||||
%newaddr = getelementptr inbounds i8, i8* inttoptr(i32 -100 to i8*), i32 %offset
|
||||
%val = load i8, i8* %newaddr
|
||||
|
|
Loading…
Reference in New Issue