forked from OSchip/llvm-project
68 lines
2.5 KiB
YAML
68 lines
2.5 KiB
YAML
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
|
|
# RUN: llc %s -mtriple=riscv64 -mattr=experimental-v -riscv-v-vector-bits-min=128 -run-pass=finalize-isel -o - | FileCheck %s
|
|
|
|
# This test makes sure we peak through the COPY instruction between the
|
|
# IMPLICIT_DEF and PseudoVLE64_V_M8_MASK in order to select the tail agnostic
|
|
# policy. The test is working if the second argument to PseudoVSETVLI has bit 6
|
|
# set.
|
|
|
|
--- |
|
|
; ModuleID = 'test.ll'
|
|
source_filename = "test.ll"
|
|
target datalayout = "e-m:e-p:64:64-i64:64-i128:128-n64-S128"
|
|
target triple = "riscv64"
|
|
|
|
; Function Attrs: nounwind
|
|
define <vscale x 8 x i64> @masked_load_nxv8i64(<vscale x 8 x i64>* %a, <vscale x 8 x i1> %mask) #0 {
|
|
%load = call <vscale x 8 x i64> @llvm.masked.load.nxv8i64.p0nxv8i64(<vscale x 8 x i64>* %a, i32 8, <vscale x 8 x i1> %mask, <vscale x 8 x i64> undef)
|
|
ret <vscale x 8 x i64> %load
|
|
}
|
|
|
|
; Function Attrs: argmemonly nofree nosync nounwind readonly willreturn
|
|
declare <vscale x 8 x i64> @llvm.masked.load.nxv8i64.p0nxv8i64(<vscale x 8 x i64>*, i32 immarg, <vscale x 8 x i1>, <vscale x 8 x i64>) #1
|
|
|
|
attributes #0 = { nounwind "target-features"="+experimental-v" }
|
|
attributes #1 = { argmemonly nofree nosync nounwind readonly willreturn "target-features"="+experimental-v" }
|
|
|
|
...
|
|
---
|
|
name: masked_load_nxv8i64
|
|
alignment: 4
|
|
tracksRegLiveness: true
|
|
registers:
|
|
- { id: 0, class: gpr }
|
|
- { id: 1, class: vr }
|
|
- { id: 2, class: vrm8nov0 }
|
|
- { id: 3, class: vrm8 }
|
|
- { id: 4, class: vrm8nov0 }
|
|
liveins:
|
|
- { reg: '$x10', virtual-reg: '%0' }
|
|
- { reg: '$v0', virtual-reg: '%1' }
|
|
frameInfo:
|
|
maxAlignment: 1
|
|
machineFunctionInfo: {}
|
|
body: |
|
|
bb.0 (%ir-block.0):
|
|
liveins: $x10, $v0
|
|
|
|
; CHECK-LABEL: name: masked_load_nxv8i64
|
|
; CHECK: liveins: $x10, $v0
|
|
; CHECK: [[COPY:%[0-9]+]]:vr = COPY $v0
|
|
; CHECK: [[COPY1:%[0-9]+]]:gpr = COPY $x10
|
|
; CHECK: $v0 = COPY [[COPY]]
|
|
; CHECK: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
|
|
; CHECK: [[COPY2:%[0-9]+]]:vrm8nov0 = COPY [[DEF]]
|
|
; CHECK: [[PseudoVLE64_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE64_V_M8_MASK [[COPY2]], [[COPY1]], $v0, $x0, 6 :: (load 64 from %ir.a, align 8)
|
|
; CHECK: $v8m8 = COPY [[PseudoVLE64_V_M8_MASK]]
|
|
; CHECK: PseudoRET implicit $v8m8
|
|
%1:vr = COPY $v0
|
|
%0:gpr = COPY $x10
|
|
$v0 = COPY %1
|
|
%3:vrm8 = IMPLICIT_DEF
|
|
%4:vrm8nov0 = COPY %3
|
|
%2:vrm8nov0 = PseudoVLE64_V_M8_MASK %4, %0, $v0, $x0, 6 :: (load 64 from %ir.a, align 8)
|
|
$v8m8 = COPY %2
|
|
PseudoRET implicit $v8m8
|
|
|
|
...
|