[AArch64] Regenerate 3 codegen test files. NFC

This commit is contained in:
David Green 2022-06-16 18:23:05 +01:00
parent 47bfc365fc
commit 527b8ccde5
3 changed files with 73 additions and 41 deletions

View File

@ -1,3 +1,4 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -O3 < %s -aarch64-enable-atomic-cfg-tidy=0 -aarch64-enable-gep-opt=false -verify-machineinstrs | FileCheck %s
target triple = "arm64-apple-ios"
@ -5,16 +6,20 @@ target triple = "arm64-apple-ios"
; CSE between "icmp reg reg" and "sub reg reg".
; Both can be in the same basic block or in different basic blocks.
define i8* @t1(i8* %base, i32* nocapture %offset, i32 %size) nounwind {
entry:
; CHECK-LABEL: t1:
; CHECK: subs
; CHECK-NOT: cmp
; CHECK-NOT: sub
; CHECK: b.ge
; CHECK: sub
; CHECK-NEXT: add
; CHECK-NOT: sub
; CHECK: ret
; CHECK: ; %bb.0: ; %entry
; CHECK-NEXT: ldr w9, [x1]
; CHECK-NEXT: subs w8, w9, w2
; CHECK-NEXT: b.ge LBB0_2
; CHECK-NEXT: ; %bb.1:
; CHECK-NEXT: mov x0, xzr
; CHECK-NEXT: ret
; CHECK-NEXT: LBB0_2: ; %if.end
; CHECK-NEXT: sub w9, w9, w8
; CHECK-NEXT: add x0, x0, w8, sxtw
; CHECK-NEXT: str w9, [x1]
; CHECK-NEXT: ret
entry:
%0 = load i32, i32* %offset, align 4
%cmp = icmp slt i32 %0, %size
%s = sub nsw i32 %0, %size
@ -35,14 +40,19 @@ return:
; CSE between "icmp reg imm" and "sub reg imm".
define i8* @t2(i8* %base, i32* nocapture %offset) nounwind {
entry:
; CHECK-LABEL: t2:
; CHECK: subs
; CHECK-NOT: cmp
; CHECK-NOT: sub
; CHECK: b.lt
; CHECK-NOT: sub
; CHECK: ret
; CHECK: ; %bb.0: ; %entry
; CHECK-NEXT: ldr w8, [x1]
; CHECK-NEXT: subs w8, w8, #1
; CHECK-NEXT: b.lt LBB1_2
; CHECK-NEXT: ; %bb.1: ; %if.end
; CHECK-NEXT: add x0, x0, w8, sxtw
; CHECK-NEXT: str w8, [x1]
; CHECK-NEXT: ret
; CHECK-NEXT: LBB1_2:
; CHECK-NEXT: mov x0, xzr
; CHECK-NEXT: ret
entry:
%0 = load i32, i32* %offset, align 4
%cmp = icmp slt i32 %0, 1
br i1 %cmp, label %return, label %if.end

View File

@ -1,3 +1,4 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -verify-machineinstrs -mtriple=arm64-apple-ios7.0 -frame-pointer=all -o - %s | FileCheck %s
; When generating DAG selection tables, TableGen used to only flag an
@ -13,11 +14,27 @@ declare void @bar(i8*)
define i64 @test_chains() {
; CHECK-LABEL: test_chains:
; CHECK: ; %bb.0:
; CHECK-NEXT: sub sp, sp, #32
; CHECK-NEXT: .cfi_def_cfa_offset 32
; CHECK-NEXT: stp x29, x30, [sp, #16] ; 16-byte Folded Spill
; CHECK-NEXT: add x29, sp, #16
; CHECK-NEXT: .cfi_def_cfa w29, 16
; CHECK-NEXT: .cfi_offset w30, -8
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: sub x0, x29, #1
; CHECK-NEXT: bl _bar
; CHECK-NEXT: ldurb w8, [x29, #-1]
; CHECK-NEXT: add x8, x8, #1
; CHECK-NEXT: and x0, x8, #0xff
; CHECK-NEXT: sturb w8, [x29, #-1]
; CHECK-NEXT: ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
; CHECK-NEXT: add sp, sp, #32
; CHECK-NEXT: ret
%locvar = alloca i8
call void @bar(i8* %locvar)
; CHECK: bl {{_?bar}}
%inc.1 = load i8, i8* %locvar
%inc.2 = zext i8 %inc.1 to i64
@ -25,13 +42,8 @@ define i64 @test_chains() {
%inc.4 = trunc i64 %inc.3 to i8
store i8 %inc.4, i8* %locvar
; CHECK: ldurb {{w[0-9]+}}, [x29, [[LOCADDR:#-?[0-9]+]]]
; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, #1
; CHECK: and x0, x[[STRVAL:[0-9]+]], #0xff
; CHECK: sturb w[[STRVAL]], [x29, [[LOCADDR]]]
%ret.1 = load i8, i8* %locvar
%ret.2 = zext i8 %ret.1 to i64
ret i64 %ret.2
; CHECK: ret
}

View File

@ -1,3 +1,4 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=aarch64--linux-gnu -mattr=+sve < %s | FileCheck %s
;
@ -6,8 +7,9 @@
define <vscale x 2 x i64> @masked_sload_nxv2i8(<vscale x 2 x i8> *%a, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: masked_sload_nxv2i8:
; CHECK: ld1sb { [[IN:z[0-9]+]].d }, [[PG:p[0-9]+]]/z, [x0]
; CHECK-NEXT: ret
; CHECK: // %bb.0:
; CHECK-NEXT: ld1sb { z0.d }, p0/z, [x0]
; CHECK-NEXT: ret
%load = call <vscale x 2 x i8> @llvm.masked.load.nxv2i8(<vscale x 2 x i8> *%a, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x i8> undef)
%ext = sext <vscale x 2 x i8> %load to <vscale x 2 x i64>
ret <vscale x 2 x i64> %ext
@ -15,8 +17,9 @@ define <vscale x 2 x i64> @masked_sload_nxv2i8(<vscale x 2 x i8> *%a, <vscale x
define <vscale x 2 x i64> @masked_sload_nxv2i16(<vscale x 2 x i16> *%a, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: masked_sload_nxv2i16:
; CHECK: ld1sh { [[IN:z[0-9]+]].d }, [[PG:p[0-9]+]]/z, [x0]
; CHECK-NEXT: ret
; CHECK: // %bb.0:
; CHECK-NEXT: ld1sh { z0.d }, p0/z, [x0]
; CHECK-NEXT: ret
%load = call <vscale x 2 x i16> @llvm.masked.load.nxv2i16(<vscale x 2 x i16> *%a, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x i16> undef)
%ext = sext <vscale x 2 x i16> %load to <vscale x 2 x i64>
ret <vscale x 2 x i64> %ext
@ -24,8 +27,9 @@ define <vscale x 2 x i64> @masked_sload_nxv2i16(<vscale x 2 x i16> *%a, <vscale
define <vscale x 2 x i64> @masked_sload_nxv2i32(<vscale x 2 x i32> *%a, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: masked_sload_nxv2i32:
; CHECK: ld1sw { [[IN:z[0-9]+]].d }, [[PG:p[0-9]+]]/z, [x0]
; CHECK-NEXT: ret
; CHECK: // %bb.0:
; CHECK-NEXT: ld1sw { z0.d }, p0/z, [x0]
; CHECK-NEXT: ret
%load = call <vscale x 2 x i32> @llvm.masked.load.nxv2i32(<vscale x 2 x i32> *%a, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x i32> undef)
%ext = sext <vscale x 2 x i32> %load to <vscale x 2 x i64>
ret <vscale x 2 x i64> %ext
@ -33,8 +37,9 @@ define <vscale x 2 x i64> @masked_sload_nxv2i32(<vscale x 2 x i32> *%a, <vscale
define <vscale x 4 x i32> @masked_sload_nxv4i8(<vscale x 4 x i8> *%a, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: masked_sload_nxv4i8:
; CHECK: ld1sb { [[IN:z[0-9]+]].s }, [[PG:p[0-9]+]]/z, [x0]
; CHECK-NEXT: ret
; CHECK: // %bb.0:
; CHECK-NEXT: ld1sb { z0.s }, p0/z, [x0]
; CHECK-NEXT: ret
%load = call <vscale x 4 x i8> @llvm.masked.load.nxv4i8(<vscale x 4 x i8> *%a, i32 1, <vscale x 4 x i1> %mask, <vscale x 4 x i8> undef)
%ext = sext <vscale x 4 x i8> %load to <vscale x 4 x i32>
ret <vscale x 4 x i32> %ext
@ -42,8 +47,9 @@ define <vscale x 4 x i32> @masked_sload_nxv4i8(<vscale x 4 x i8> *%a, <vscale x
define <vscale x 4 x i32> @masked_sload_nxv4i16(<vscale x 4 x i16> *%a, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: masked_sload_nxv4i16:
; CHECK: ld1sh { [[IN:z[0-9]+]].s }, [[PG:p[0-9]+]]/z, [x0]
; CHECK-NEXT: ret
; CHECK: // %bb.0:
; CHECK-NEXT: ld1sh { z0.s }, p0/z, [x0]
; CHECK-NEXT: ret
%load = call <vscale x 4 x i16> @llvm.masked.load.nxv4i16(<vscale x 4 x i16> *%a, i32 1, <vscale x 4 x i1> %mask, <vscale x 4 x i16> undef)
%ext = sext <vscale x 4 x i16> %load to <vscale x 4 x i32>
ret <vscale x 4 x i32> %ext
@ -51,8 +57,9 @@ define <vscale x 4 x i32> @masked_sload_nxv4i16(<vscale x 4 x i16> *%a, <vscale
define <vscale x 8 x i16> @masked_sload_nxv8i8(<vscale x 8 x i8> *%a, <vscale x 8 x i1> %mask) {
; CHECK-LABEL: masked_sload_nxv8i8:
; CHECK: ld1sb { [[IN:z[0-9]+]].h }, [[PG:p[0-9]+]]/z, [x0]
; CHECK-NEXT: ret
; CHECK: // %bb.0:
; CHECK-NEXT: ld1sb { z0.h }, p0/z, [x0]
; CHECK-NEXT: ret
%load = call <vscale x 8 x i8> @llvm.masked.load.nxv8i8(<vscale x 8 x i8> *%a, i32 1, <vscale x 8 x i1> %mask, <vscale x 8 x i8> undef)
%ext = sext <vscale x 8 x i8> %load to <vscale x 8 x i16>
ret <vscale x 8 x i16> %ext
@ -60,11 +67,12 @@ define <vscale x 8 x i16> @masked_sload_nxv8i8(<vscale x 8 x i8> *%a, <vscale x
define <vscale x 2 x i64> @masked_sload_passthru(<vscale x 2 x i32> *%a, <vscale x 2 x i1> %mask, <vscale x 2 x i32> %passthru) {
; CHECK-LABEL: masked_sload_passthru:
; CHECK: ld1sw { [[IN:z[0-9]+]].d }, [[PG1:p[0-9]+]]/z, [x0]
; CHECK-NEXT: ptrue [[PG2:p[0-9]+]].d
; CHECK-NEXT: sxtw z0.d, [[PG2]]/m, z0.d
; CHECK-NEXT: mov z0.d, [[PG1]]/m, [[IN]].d
; CHECK-NEXT: ret
; CHECK: // %bb.0:
; CHECK-NEXT: ld1sw { z1.d }, p0/z, [x0]
; CHECK-NEXT: ptrue p1.d
; CHECK-NEXT: sxtw z0.d, p1/m, z0.d
; CHECK-NEXT: mov z0.d, p0/m, z1.d
; CHECK-NEXT: ret
%load = call <vscale x 2 x i32> @llvm.masked.load.nxv2i32(<vscale x 2 x i32> *%a, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x i32> %passthru)
%ext = sext <vscale x 2 x i32> %load to <vscale x 2 x i64>
ret <vscale x 2 x i64> %ext
@ -73,7 +81,8 @@ define <vscale x 2 x i64> @masked_sload_passthru(<vscale x 2 x i32> *%a, <vscale
; Return type requires splitting
define <vscale x 16 x i32> @masked_sload_nxv16i8(<vscale x 16 x i8>* %a, <vscale x 16 x i1> %mask) {
; CHECK-LABEL: masked_sload_nxv16i8:
; CHECK: ld1b { z0.b }, p0/z, [x0]
; CHECK: // %bb.0:
; CHECK-NEXT: ld1b { z0.b }, p0/z, [x0]
; CHECK-NEXT: sunpklo z1.h, z0.b
; CHECK-NEXT: sunpkhi z3.h, z0.b
; CHECK-NEXT: sunpklo z0.s, z1.h
@ -89,7 +98,8 @@ define <vscale x 16 x i32> @masked_sload_nxv16i8(<vscale x 16 x i8>* %a, <vscale
; Masked load requires promotion
define <vscale x 4 x double> @masked_sload_4i8_4f32(<vscale x 4 x i8>* noalias %in, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: masked_sload_4i8_4f32:
; CHECK: ld1sb { z0.s }, p0/z, [x0]
; CHECK: // %bb.0:
; CHECK-NEXT: ld1sb { z0.s }, p0/z, [x0]
; CHECK-NEXT: ptrue p1.d
; CHECK-NEXT: sunpkhi z1.d, z0.s
; CHECK-NEXT: sunpklo z0.d, z0.s